query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns an alphabet that is consistent with the provided list of words in sorted order. Assumes there is at least one possible alphabet for the sequence of words (i.e. no cycles)
def parse_alphabet(words): letters = Graph() for i in range(len(words) - 1): for l in words[i]: letters.add_vertex(l) # make sure all the letters are in the graph let_idx = first_uncommon_letter(words[i], words[i+1]) if let_idx != -1: letters.add_edge(words[i][let_idx], words[i+1][let_idx]) for l in words[-1]: letters.add_vertex(l) return letters.top_sort()
[ "def adv_alpha_sort_by_word_length(words):\n\n # Alternate, pre-sort method\n #\n # d = {}\n # for w in words:\n # d.setdefault(len(w), []).append(w)\n # for v in d.values():\n # v.sort()\n # return sorted(d.items())\n\n d = {}\n for w in words:\n d.setdefault(len(w), []).append(w)\n out = []\n for k, v in sorted(d.items()):\n out.append((k, sorted(v)))\n return out", "def get_letter_swaps(words):\n\n letter_swaps = defaultdict(list)\n\n for word in words:\n for index, letter in enumerate(word):\n for new_letter in ascii_lowercase[ascii_lowercase.index(letter) + 1:]:\n new_word = word[:index] + new_letter + word[index + 1:]\n if new_word in words:\n letter_swaps[letter + new_letter].append((word, new_word))\n\n return letter_swaps", "def get_letter_to_letter_swaps(words):\n\n letter_swaps = { letter: defaultdict(list) for letter in ascii_lowercase }\n\n for word in words:\n for index, letter in enumerate(word):\n for new_letter in ascii_lowercase[ascii_lowercase.index(letter) + 1:]:\n new_word = word[:index] + new_letter + word[index + 1:]\n if new_word in words:\n letter_swaps[letter][new_letter].append((word, new_word))\n letter_swaps[new_letter][letter].append((word, new_word))\n\n return letter_swaps", "def get_alphabet() -> List:\n return list(string.ascii_lowercase)", "def sort_words_case_insensitively(words):\r\n pass", "def find_anagrams(words):\n anagrams = {}\n\n for word in words:\n anagrams.setdefault(alphabetize(word), [word])\n if word not in anagrams[alphabetize(word)]:\n anagrams[alphabetize(word)].append(word)\n\n return anagrams", "def scrabble_helper(word_list, char_set_list):\n alpha_word_list = []\n max_word_len = 0\n # sorts words into tuple: (word with letters in alphabetical order, original word, original position)\n # e.g. apple becomes aelpp\n for i in range(len(word_list)):\n if len(word_list[i]) > max_word_len:\n max_word_len = len(word_list[i])\n alpha_word_list.append((singleword_radix_sort(word_list[i]), word_list[i], i))\n\n # Sorts char set list into tuples: (chars in alphabetical order, original char_set_list tileset, original position)\n # Does not include tilesets that are longer than the longest word in word_list\n alpha_char_set_list = []\n for i in range(len(char_set_list)):\n if len(char_set_list[i]) <= max_word_len:\n alpha_char_set_list.append((singleword_radix_sort(char_set_list[i]), char_set_list[i], i))\n\n # Sorts all words into alphabetical order (Each word is currently all the letters in alphabetical order)\n sorted_alpha_word_list = multiword_radix_sort(alpha_word_list, max_word_len)\n\n # The output from multiword_radix_sort is a list of numbers (ascii values of each letter) so this part turns\n # the numbers back into letters\n for i in range(len(sorted_alpha_word_list)):\n word = []\n for j in range(max_word_len):\n if sorted_alpha_word_list[i][0][j] != 0:\n word.append(chr(sorted_alpha_word_list[i][0][j]+96))\n sorted_alpha_word_list[i][0] = ''.join(word)\n\n # Combine all of the same anagrams into one list. E.g. 'apple' and 'aeppl' would combine into one list as they are\n # anagrams of each other\n grouped_anagrams = [[sorted_alpha_word_list[0]]]\n group_counter = 0\n for i in range(1, len(sorted_alpha_word_list)):\n if sorted_alpha_word_list[i][0] == sorted_alpha_word_list[i-1][0]:\n grouped_anagrams[group_counter].append(sorted_alpha_word_list[i])\n else:\n grouped_anagrams.append([sorted_alpha_word_list[i]])\n group_counter += 1\n\n # Prepares lists in a format for output. Just the word without the alphabetical order letters or original position\n output_grouped_anagrams = []\n for i in range(len(grouped_anagrams)):\n group = []\n for j in range(len(grouped_anagrams[i])):\n group.append(grouped_anagrams[i][j][1])\n output_grouped_anagrams.append(group)\n\n # This chunk sorts each list of anagrams into alphabetical order so the final output is in alphabetical order\n for i in range(len(output_grouped_anagrams)):\n if len(output_grouped_anagrams[i]) > 1:\n max_len = len(output_grouped_anagrams[i][0])\n output_grouped_anagrams[i] = multiword_radix_sort2(output_grouped_anagrams[i], max_len)\n\n # This turns the words back into their string representation rather than list of ascii values\n for k in range(len(output_grouped_anagrams[i])):\n word = []\n for j in range(max_len):\n if output_grouped_anagrams[i][k][0][j] != 0:\n word.append(chr(output_grouped_anagrams[i][k][0][j] + 96))\n output_grouped_anagrams[i][k] = ''.join(word)\n\n # Finds the words that can be made using each tileset in char_set_list using binary search\n # Only compares to the groups of anagrams rather than the whole list\n final_output_list = []\n for i in range(len(char_set_list)):\n final_output_list.append([])\n for i in range(len(alpha_char_set_list)):\n index = binary_search_anagram_groups(grouped_anagrams, alpha_char_set_list[i][0])\n if index is not None:\n final_output_list[alpha_char_set_list[i][2]] = output_grouped_anagrams[index]\n return final_output_list", "def alphabetical_sort():\r\n\r\n input_text = input(\"Enter a string:\\n\")\r\n\r\n words = input_text.split() # split text into individual words\r\n\r\n words.sort() # sort the list\r\n\r\n print(\"\\nThe sorted words are:\")\r\n for word in words:\r\n print(word)\r\n\r\n return 0", "def problem4_1(wordlist):\n print(wordlist)\n wordlist.sort(key = str.lower)\n print(wordlist)", "def _alphabet_generator():\n for i in itertools.count():\n for t in itertools.product(string.ascii_lowercase, repeat=i):\n yield ''.join(t)", "def get_letter_mapping(all_words):\n mapping = defaultdict(set)\n\n for word in all_words:\n mapping[(0, word[0])].add(word)\n mapping[(2, word[2])].add(word)\n mapping[(4, word[4])].add(word)\n\n return mapping", "def get_substitution_alphabet():\n alphabet = []\n\n for ch in get_key().lower() + string.ascii_lowercase:\n if ch not in alphabet:\n alphabet.append(ch)\n\n if len(alphabet) != len(string.ascii_lowercase):\n die('key must consist solely of ASCII letters')\n\n return alphabet", "def generate_alphabet() -> Alphabet:\n alpha = []\n for letter in string.ascii_lowercase:\n alpha.append(dict([(letter, random.randint(0, 100))]))\n return alpha", "def choose_letter(words, pattern):\n # Generate dictionary of occurrences per each valid letter\n histogram = dict(zip(VALID_LETTERS_LIST, [0]*len(VALID_LETTERS_LIST)))\n for word in words:\n for letter in word:\n histogram[letter] += 1\n \n # Remove all the letters which already appeared in the pattern by setting\n # their value to -1 (which is smaller than 0).\n NOT_IN_HISTOGRAM = -1\n for letter in pattern: \n if (letter in VALID_LETTERS_LIST):\n histogram[letter] = NOT_IN_HISTOGRAM\n \n # Return the letter with the largest corresponding number of occurences\n return max(histogram, key=histogram.get)", "def permute(letter_list):\n results = []\n re_str = '^'+''.join(map(lambda w: w+'?', sorted(letter_list)))+'$'\n for word in WORDS:\n letters = ''.join(sorted(word))\n if re.search(re_str, letters):\n results.append(word)\n return map_results(sorted(results, key=len, reverse=True))", "def alphabetize(word_key, encrypted_matrix):\n\n\t#the reordered string\n\tchar_order = list()\n\n\tfor char in word_key:\n\t\t#print char\n\t\tchar_order.append(char)\n\tchar_order.sort()\n\tspot_dict = create_dict_order(word_key)\n\n\t#Creates a new matrix\n\tfinal = np.copy(encrypted_matrix)\n\tfor spot in range(len(word_key)):\n\t\told_col = spot_dict[char_order[spot]]\n\t\tfinal = swap_col(encrypted_matrix,final,old_col,spot)\n\treturn final", "def make_alphabetic(hits, processname, sortnames=False, lang=\"sv\"):\n def fix_lastname(name):\n vonaf_pattern = re.compile(r\"^(%s) \" % \"|\".join(VONAV_LIST))\n name = re.sub(vonaf_pattern, r\"\", name)\n return name.replace(\" \", \"z\")\n\n results = []\n for hit in hits:\n processname(hit, results)\n\n letter_results = {}\n # Split the result into start letters\n for first_letter, result in results:\n if first_letter == \"Ø\":\n first_letter = \"Ö\"\n if first_letter == \"Æ\":\n first_letter = \"Ä\"\n if first_letter == \"Ü\":\n first_letter = \"Y\"\n if lang == \"en\" and first_letter == \"Ö\":\n first_letter = \"O\"\n if lang == \"en\" and first_letter in \"ÄÅ\":\n first_letter = \"A\"\n if first_letter not in letter_results:\n letter_results[first_letter] = [result]\n else:\n letter_results[first_letter].append(result)\n\n # Sort result dictionary alphabetically into list\n if lang == \"en\":\n collator = icu.Collator.createInstance(icu.Locale(\"en_EN.UTF-8\"))\n else:\n collator = icu.Collator.createInstance(icu.Locale(\"sv_SE.UTF-8\"))\n for _n, items in list(letter_results.items()):\n if sortnames:\n items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + \" \" + x[1]))\n else:\n items.sort(key=lambda x: collator.getSortKey(x[0]))\n\n letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0]))\n return letter_results", "def sort_words(array):\n sortedwords = []\n for word in wordarray:\n rev_w = ' '.join(reversed(word.split(' ')))\n last_letter = ''\n for i, l in enumerate(rev_w):\n if l == last_letter:\n listed_word = list(rev_w)\n listed_word[i-1] = '*'\n listed_word[i] = '*'\n rev_w = ''.join(listed_word)\n last_letter = l\n sortedwords.append(rev_w)\n return sortedwords", "def radix_sort_str(strlist):\n offset = ord('a') - 1 # We want a placeholder space before 'a', chr(96)\n max_length = 0\n for word in strlist:\n max_length = max(max_length, len(word))\n\n # Add placeholders so all words are max length\n for i, word in enumerate(strlist[:]):\n strlist[i] = word + chr(96) * (max_length - len(word))\n\n buckets = [[] for j in xrange(ord('z') - offset)]\n for i in xrange(1, max_length + 1):\n for word in strlist:\n buckets[ord(word[-i].lower()) - offset].append(word)\n strlist[:] = []\n for bucket in buckets:\n strlist.extend(bucket)\n bucket[:] = []\n\n strlist[:] = [word.strip(chr(96)) for word in strlist]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Auxiliary function that checks that one letter comes before another one in a given alphabet. Assumes alphabet is a List.
def check_order(self, alphabet, let1, let2): return alphabet.index(let1) < alphabet.index(let2)
[ "def less_letter(self,a,b):\n return (self.rank(a)<=self.rank(b))", "def is_alphabetized(roster, ordering):\n for i, person in enumerate(roster):\n if i < (len(roster)-1):\n #ignore if same person 2x\n if person == roster[i+1]:\n continue\n #if roster is not alphabetized\n elif not ordering(person, roster[i+1]):\n return False\n #if roster is alphabetized\n return True", "def alphabet_position(letter):\n\tletter = letter.lower()\n\talphabet = string.ascii_lowercase\n\treturn alphabet.find(letter)", "def is_alphabetized(roster, ordering):\n i = 0\n while i < (len(roster)-1):\n x = ((ordering(roster[i], roster[i+1])))\n if roster[i] == roster[i+1]:\n i += 1\n continue\n elif x:\n i += 1\n continue\n return False\n return True", "def covers_alphabet(sentence: str) -> bool:\n # greater than or equal to include , ; ! etc.\n return set(sentence.lower()) >= set(\"abcdefghijklmnopqrstuvwxyz\")", "def whole_alphabet(input_str):\n\n alphabet_set = set(string.ascii_lowercase)\n check_set = set()\n\n for letter in input_str:\n letter = letter.lower()\n if letter.isalpha():\n check_set.add(letter)\n\n if alphabet_set == check_set:\n return 1\n else:\n return 0", "def is_abecedarian(word):\n # Make it all lower case, just in case\n word = word.lower()\n for i in range(len(word)-1):\n # if this letter is greater than (further in the alphabet) the next,\n # it's not in alphabetical order so just return False now\n if word[i]>word[i+1]:\n return False\n return True # Nothing broke the rules, so return True", "def test_isInAlphabet(self):\n sEncodings = StringUtils.stringEncodings()\n lEncodings = StringUtils.languageEncodings()\n \n self.assertTrue(isInAlphabet(\"Howdy\", sEncodings['ASCII'],lEncodings['ENGLISH']))\n self.assertTrue(isInAlphabet(\"First\", sEncodings['ASCII'], lEncodings['ENGLISH']))\n self.assertFalse(isInAlphabet(\"0123456789\", sEncodings['ASCII'], lEncodings['ENGLISH']))\n self.assertTrue(isInAlphabet(\"g\", sEncodings['ASCII'], lEncodings['ENGLISH']))", "def rearrange_alphabet(self, ordered_letters):\n for active_letter, next_letter in zip(ordered_letters, ordered_letters[1:]):\n\n # Find active and next letters' index in list, or add it to the end of alphabet\n active_letter_index = self.get_index(active_letter)\n next_letter_index = self.get_index(next_letter)\n\n # If current_letter is after next_letter in alphabet, move it in front.\n if active_letter_index > next_letter_index:\n self.remove(active_letter)\n self.insert(next_letter_index,active_letter)", "def from_alphabet_only(s, alphabet):\n return [c for c in s if c in alphabet]", "def _is_in_alphabet(self, char: str):\n in_alphabet = False\n for rotor in self.rotors:\n in_alphabet = rotor._is_char_in_alphabet(char)\n if in_alphabet:\n break\n \n return in_alphabet", "def excludes_least():\n\talphabet_string = 'abcdefghijklmnopqrstuwxyz'\n\tfor letter in alphabet_string:\n\t\tprint letter\n\t\tavoids(letter)", "def alphabet_position(letter):\n\n #create index base\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n #uniform to get index of any letter\n letter = letter.lower()\n return alphabet.index(letter)", "def find_missing_letter(chars):\n alphabet = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n for i, lett in enumerate(alphabet):\n if lett in chars:\n if alphabet[i+1] not in chars:\n return alphabet[i+1]", "def checkAlphabet(self, sequence):\n ok = [ch for ch in sequence if ch in self.E]\n if len(ok) < len(sequence):\n return False \n return True", "def alphabet_position(letter):\n\talphabet = string.ascii_uppercase\n\tif letter.upper() in alphabet:\n\t\treturn alphabet.index(letter.upper())\n\t\t\n\treturn -1", "def check_whether_letter_was_prev_mentioned(self):\n if self.letter in self.given_letters:\n\n if self.given_letters.count(self.letter) < self.num_letter_occurence:\n self.word_dict[self.letter] += 1\n else:\n self.inputs_wrong_letter()", "def first_letter(self, letter):\n return self[0] == letter", "def is_lower(a, b):\n for idx, a_value in enumerate(a):\n if a[idx] > b[idx]:\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that there are no repeating letters in the alphabet
def check_unique(self, alphabet): letters_set = set() for let in alphabet: if let in letters_set: return False else: letters_set.add(let) return True
[ "def letter_repeated(word: string) -> bool:\n for _, letter_seq in groupby(sorted(word)):\n if len(list(letter_seq)) > 1:\n return True\n return False", "def test_3_duplicate_alphabet_count():\n word = \"mississippi\"\n print(word)\n alphabets = list(word)\n for i in alphabets:\n if alphabets.count(i) > 1:\n print(\"count of '{0}' is:{1}\".format(i, alphabets.count(i)))\n # Removing the letter\n for j in alphabets:\n if str(i) == str(j):\n alphabets.remove(j)", "def whole_alphabet(input_str):\n\n alphabet_set = set(string.ascii_lowercase)\n check_set = set()\n\n for letter in input_str:\n letter = letter.lower()\n if letter.isalpha():\n check_set.add(letter)\n\n if alphabet_set == check_set:\n return 1\n else:\n return 0", "def checkAlphabet(self, sequence):\n ok = [ch for ch in sequence if ch in self.E]\n if len(ok) < len(sequence):\n return False \n return True", "def is_unique_chars(text):\n for i in range(len(text) - 1):\n if text[i] in text[i + 1:]:\n return False\n\n return True", "def contains_all_letters(text):\n\t\n\t# use a flag to hold our return value, to support having only one return\n\treturn_value = True\n \n # use a set to get the unique values from the input text into a \n # quickly searchable data structure, force everything to be lowercase\n # so that we don't have to search for upper and lower\n\ts = set(text.lower())\n\n\t# if the number of unique characters in the string is less than the\n # size of the alphabet, it cannot contain the full alphabet\n\tif len(s) >= 26:\n\t\t\n\t # the .ascii_lowercase method returns a string containing the lowercase\n\t # alphabet, iterate through looking for each of the letters\n\t\tfor a in string.ascii_lowercase:\n\t\t\t# if at any time we cannot find a letter, we can stop searching\n\t\t\tif not a in s:\n\t\t\t\treturn_value = False\n\t\t\t\tbreak\n\n\telse:\n\t\treturn_value = False\n\n\treturn return_value", "def is_in_alphabet(self, cur_ngram):\r\n for letter in cur_ngram:\r\n if letter not in self.alphabet:\r\n return False\r\n\r\n return True", "def find_missing_letter(chars):\n alphabet = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n for i, lett in enumerate(alphabet):\n if lett in chars:\n if alphabet[i+1] not in chars:\n return alphabet[i+1]", "def is_isogram(word):\n alphabet_freq = Counter()\n for c in word.lower():\n if c.isalpha():\n alphabet_freq[c] += 1\n if alphabet_freq[c] > 1:\n return False\n\n return True", "def repeat_letter(text):\n \n return len(re.findall(r\"([A-Za-z!\\*])\\1{3,}\", text))", "def is_unique_chars(in_str):\n checker = 0\n if len(in_str) > 128:\n return False\n for c in in_str:\n val = ord(c)\n if checker & 1 << val > 0:\n return False\n checker |= 1 << val\n return True", "def is_unique(a_string):\n charactors = {}\n for char in a_string:\n if char in charactors:\n return False\n charactors[char] = True\n return True", "def valid_word(word, chosen_letters):\n\tletter_count = Counter(chosen_letters)\n\tfor letter in word.upper():\n\t\tif letter not in chosen_letters:\n\t\t\treturn False\n\t\tif not letter_count[letter]:\n\t\t\treturn False\n\t\tletter_count[letter] -= 1\n\treturn True", "def not_letter(character: str) -> bool:\n return character not in LETTERS", "def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n letter_counter = Counter(s)\n for ch in t:\n if ch not in letter_counter:\n return False\n else:\n if letter_counter[ch] <= 0:\n return False\n else:\n letter_counter[ch] -= 1\n return True", "def is_unique_chars(s):\n if len(s) > 256:\n return False\n char_set = [False] * 256\n for c in s:\n if char_set[ord(c)]:\n return False\n char_set[ord(c)] = True\n return True", "def contains_three_letters(word):\n return contains_dup_letters(word, 3)", "def missing_letters(string):\n missing_letters_string = str()\n histogram_dict = histogram(string)\n for letter in alphabet:\n frequency_of_letter = histogram_dict.get(letter)\n if frequency_of_letter is None:\n missing_letters_string += letter\n return missing_letters_string", "def is_unique(cls, chars: list):\n\n compare_list = []\n for char in chars:\n if char in compare_list:\n return False\n compare_list.append(char)\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the contents of a text file into a list and return the list. Each element in the list will contain one line of text from the text file.
def read_list(filename): # Create an empty list named text_list. text_list = [] # Open the text file for reading and store a reference # to the opened file in a variable named text_file. with open(filename, "rt") as text_file: # Read the contents of the text # file one line at a time. for line in text_file: # Remove white space, if there is any, # from the beginning and end of the line. clean_line = line.strip() # Append the clean line of text # onto the end of the list. text_list.append(clean_line) # Return the list that contains the lines of text. return text_list
[ "def read_txt(path):\n with open(path) as f:\n lines = f.readlines()\n lines = [x.strip() for x in lines]\n return lines", "def file_to_list(file_name):\n lines = []\n with open(file_name) as f:\n lines = f.read().splitlines()\n return lines", "def read_file_line_to_list(file_path):\n files = []\n with open(file_path, encoding='utf8') as infile:\n for line in infile:\n files.append(line.strip())\n return files", "def read_file_into_list(filename):\n\tf = open(filename)\n\tlines = [line.rstrip('\\n') for line in f]\n\tf.close()\n\treturn lines", "def file_lines(file_path):\n with open(file_path) as f:\n return [line.rstrip() for line in f.readlines()]", "def get_file_as_list_of_lines(fname):\r\n with open(fname, 'r') as f:\r\n return [\r\n l.replace('\\n','').replace('\\r','')\r\n for l in f.readlines()\r\n ]", "def get_lines(filename: str) -> List[str]:\n try:\n f = open(filename, \"r\")\n return f.read().splitlines()\n except IOError:\n return\n finally:\n f.close()", "def get_list_from_text_file(config):\n\n kwfile = None\n\n #Open keyword file\n try:\n kwfile = open(config.keyword_file, 'r')\n except IOError:\n print 'Unable to open keyword file'\n exit()\n\n lst = []\n\n for line in kwfile:\n lst.append(line.rstrip('\\r''\\n'))\n\n return lst", "def readFile(_file_obj) :\n\t\n\t# list for text's tokens\n\tf_list = []\n\t\t\t \n\twhile 1:\n\t\t# read text line by line\n\t\tline = _file_obj.readline();\n\t\tif not line: break\n\t\t# tokenize each line\n\t\tf_list += tokenize_line(line);\n\n\t\t\n\treturn f_list", "def dump_text( filename ):\n line_list = open( filename ).readlines()\n\n output_lines = []\n for line in line_list:\n output_lines.append( line.split() )\n\n return output_lines", "def readFile(filename):\n filehandle = open(filename, \"r\")\n contents = filehandle.read()\n stringList = contents.split()\n filehandle.close()\n return stringList", "def get_lines(file_name):\n with open(file_name, 'r') as f_in:\n return f_in.readlines()", "def get_lines_from_file(file_path, file_encoding='utf-8'):\n with open(file_path, mode='r', encoding=file_encoding) as f:\n return f.readlines()", "def extract_lines(file):\n with open(file) as fp:\n return fp.readlines()", "def get_list_from(file_name):\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n current_list = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return current_list", "def read_file():\n data = []\n with open(tempfile) as fin:\n for line in fin.readlines():\n data.append(line.strip())\n return data", "def read_file_lines(file_name):\n reading_file = io.open(file_name, 'r', encoding='utf8')\n\n lines = reading_file.readlines()\n reading_file.close()\n return lines", "def read_file(filename):\n\n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n \n return lines", "def load_text_file(self):\n with open(self.file_name, \"r\") as filino:\n data = filino.readlines()\n\n return data", "def read_input(file_name):\n with open(file_name, 'r') as f:\n lines = f.readlines()\n return lines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test bcrypt maximum password length. The bcrypt algorithm has a maximum password length of 72 bytes, and ignores any bytes beyond that.
def test_long_password(self): # Create a password with a 72 bytes length password = 'A' * 72 pw_hash = self.flask_bcrypt.generate_password_hash(password) # Ensure that a longer password yields the same hash self.assertTrue(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))
[ "def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password **do not** yield the same hash\n self.assertFalse(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))", "def test_password_generation_length(self):\n\n # +10 for the minimum password length\n for pw_length in [10+int(100*random.random()) for i in range(100)]:\n password = generate_password(pw_length)\n self.assertTrue(len(password), pw_length)", "def test_users_new_password_is_not_less_than_6_characters_long(self):\n user_data = {\n \"email\": \"meshmbuvi@gmail.com\",\n \"username\": \"musyoka\",\n \"password\": \"mbuv\",\n \"confirm password\": \"mbuv\"\n }\n response = self.app.put('/api/v1/auth/reset_password',\n data=json.dumps(user_data),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n response_data = json.loads(response.get_data().decode('utf-8'))\n self.assertEqual(response_data['message'], 'passwords should be 6 characters or more.')", "def test_password_generation_minlength(self):\n\n self.assertIsNone(generate_password(6))", "def test_generate_with_different_length(self):\n pg = PasswordGenerator()\n length = 16\n pg.minlen = length\n pg.maxlen = length\n self.assertEqual(len(pg.generate()), length)", "def check_pwd_len(password, current_user):\n while len(password) < 8:\n print('Password too short')\n password = getpass.getpass('New password: ')\n current_user.set_password(password)", "def length(self):\n return len(self.password) >= 12", "def test_password_maxlength_exceed(self, data):\n self.assertEqual(User.objects.count(), 0)\n\n response = self.client.post(reverse('account:sign_up'), data=data)\n\n self.assertEqual(User.objects.count(), 0)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['password'][0].code, 'password_too_long')", "def testLoginPassword128Long(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"abcdefghijklmnopqrstuvwxyz\n abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy\"))", "def test_maxlength(db_conn):\n assert has_max_length('abcd1234', 2)\n assert has_max_length('a', 2) is None", "def test_check_password(self) -> None:\n self.assertTrue(check_pwd('2AbhhE'))\n self.assertTrue(check_pwd('0hj454@6hBH'))\n self.assertTrue(check_pwd('1ja!2AB'))\n # less than 4\n self.assertFalse(check_pwd('4XY'))\n self.assertFalse(check_pwd('aa'))\n self.assertFalse(check_pwd(''))", "def test_generate_new_password(self):\n\n gen_password = self.new_cred.generate_new_password()\n self.assertEqual(len(gen_password),10)", "def validate_password(self, password):\n while True:\n if not re.search(\"[A-Z]\", password):\n break\n elif not re.search(\"[a-z]\", password):\n break\n elif len(password) > 8 or len(password) < 6:\n break\n elif not re.search(\"[0-9]\", password):\n break\n else:\n return True\n return jsonify({\"message\": \"Please use a valid password format\",\n \"details\": \"Password len(6-8), must have lower case,\\\n uppercase and number\",\n \"status\": 400})", "def get_pwd_length() -> int:\r\n num = 1\r\n while True:\r\n query = f\"SELECT username FROM users WHERE username='administrator' AND length(password)={num}\"\r\n if not is_resultset_extant(query):\r\n num = num + 1\r\n else:\r\n return num", "def clean_password(self):\n password = self.cleaned_data['password']\n if len(password) < 6:\n raise forms.ValidationError(_(\"Password must be at least 6 characters\"))\n return password", "def passwordcheck(password):\n\t\tif len(password) >= 5:\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Invalid password: Must be at least 5 characters.\\n\")\n\t\t\treturn False", "def verifyPlaintextPassword(password):", "def acceptable_password(password):\n LOG.debug(\"PASS\")\n LOG.debug(password)\n\n if password is not None:\n LOG.debug(len(password))\n\n if password is None:\n return False\n\n if len(password) < 3:\n return False\n\n return True", "def test_encrypt_long_input(self):\n s = scrypt.encrypt(self.longinput, self.password, 0.1)\n self.assertEqual(len(s), 128 + len(self.longinput))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the work around bcrypt maximum password length.
def test_long_password(self): # Create a password with a 72 bytes length password = 'A' * 72 pw_hash = self.flask_bcrypt.generate_password_hash(password) # Ensure that a longer password **do not** yield the same hash self.assertFalse(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))
[ "def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password yields the same hash\n self.assertTrue(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))", "def test_password_generation_length(self):\n\n # +10 for the minimum password length\n for pw_length in [10+int(100*random.random()) for i in range(100)]:\n password = generate_password(pw_length)\n self.assertTrue(len(password), pw_length)", "def test_password_generation_minlength(self):\n\n self.assertIsNone(generate_password(6))", "def test_users_new_password_is_not_less_than_6_characters_long(self):\n user_data = {\n \"email\": \"meshmbuvi@gmail.com\",\n \"username\": \"musyoka\",\n \"password\": \"mbuv\",\n \"confirm password\": \"mbuv\"\n }\n response = self.app.put('/api/v1/auth/reset_password',\n data=json.dumps(user_data),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n response_data = json.loads(response.get_data().decode('utf-8'))\n self.assertEqual(response_data['message'], 'passwords should be 6 characters or more.')", "def check_pwd_len(password, current_user):\n while len(password) < 8:\n print('Password too short')\n password = getpass.getpass('New password: ')\n current_user.set_password(password)", "def length(self):\n return len(self.password) >= 12", "def test_generate_with_different_length(self):\n pg = PasswordGenerator()\n length = 16\n pg.minlen = length\n pg.maxlen = length\n self.assertEqual(len(pg.generate()), length)", "def test_password_maxlength_exceed(self, data):\n self.assertEqual(User.objects.count(), 0)\n\n response = self.client.post(reverse('account:sign_up'), data=data)\n\n self.assertEqual(User.objects.count(), 0)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['password'][0].code, 'password_too_long')", "def testLoginPassword128Long(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"abcdefghijklmnopqrstuvwxyz\n abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy\"))", "def test_check_password(self) -> None:\n self.assertTrue(check_pwd('2AbhhE'))\n self.assertTrue(check_pwd('0hj454@6hBH'))\n self.assertTrue(check_pwd('1ja!2AB'))\n # less than 4\n self.assertFalse(check_pwd('4XY'))\n self.assertFalse(check_pwd('aa'))\n self.assertFalse(check_pwd(''))", "def test_generate_new_password(self):\n\n gen_password = self.new_cred.generate_new_password()\n self.assertEqual(len(gen_password),10)", "def test_maxlength(db_conn):\n assert has_max_length('abcd1234', 2)\n assert has_max_length('a', 2) is None", "def clean_password(self):\n password = self.cleaned_data['password']\n if len(password) < 6:\n raise forms.ValidationError(_(\"Password must be at least 6 characters\"))\n return password", "def validate_password(self, password):\n while True:\n if not re.search(\"[A-Z]\", password):\n break\n elif not re.search(\"[a-z]\", password):\n break\n elif len(password) > 8 or len(password) < 6:\n break\n elif not re.search(\"[0-9]\", password):\n break\n else:\n return True\n return jsonify({\"message\": \"Please use a valid password format\",\n \"details\": \"Password len(6-8), must have lower case,\\\n uppercase and number\",\n \"status\": 400})", "def acceptable_password(password):\n LOG.debug(\"PASS\")\n LOG.debug(password)\n\n if password is not None:\n LOG.debug(len(password))\n\n if password is None:\n return False\n\n if len(password) < 3:\n return False\n\n return True", "def passwordcheck(password):\n\t\tif len(password) >= 5:\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Invalid password: Must be at least 5 characters.\\n\")\n\t\t\treturn False", "def validate_password(self, value):\n validate_password(value)\n return value", "def verifyPlaintextPassword(password):", "def get_pwd_length() -> int:\r\n num = 1\r\n while True:\r\n query = f\"SELECT username FROM users WHERE username='administrator' AND length(password)={num}\"\r\n if not is_resultset_extant(query):\r\n num = num + 1\r\n else:\r\n return num" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a graph and a namespace for a local context.
def __init__(self, name, globalContext, propertiesDict): self.__name = name self.__globalContext = globalContext self.__localNS = Namespace(self.__globalContext[1][0:self.__globalContext[1].find("#")] + "/context#")[self.__name] self.__graph = Graph(self.__globalContext[0].store, self.__localNS) self.__propertiesDict = propertiesDict print("Creating new local context: %s\n%s\n" % (name.upper(), self))
[ "def create_graph(self, graph_name):", "def Graph(*av, **kw):\n g = rdflib.Graph(*av, **kw)\n import krdf\n if krdf.namespace_manager is None:\n krdf.namespace_manager = g.namespace_manager\n else:\n g.namespace_manager = krdf.namespace_manager\n return g", "def __init__(self):\n\t\tctx = _new_local()\n\t\tsuper(LocalContext, self).__init__(ctx)", "def root_name_space(graph=None):\n if graph is None:\n graph = tf.get_default_graph()\n with graph.as_default(), graph.name_scope(None) as scope:\n yield scope", "def create_graph(self):\n self.my_graph = eval_or_exec(self.program)\n self.parse_graph()", "async def make_graph(self, args):\n return self._graph", "def localcontext(ctx=None):\r\n # The string below can't be included in the docstring until Python 2.6\r\n # as the doctest module doesn't understand __future__ statements\r\n \"\"\"\r\n >>> from __future__ import with_statement\r\n >>> print getcontext().prec\r\n 28\r\n >>> with localcontext():\r\n ... ctx = getcontext()\r\n ... ctx.prec += 2\r\n ... print ctx.prec\r\n ...\r\n 30\r\n >>> with localcontext(ExtendedContext):\r\n ... print getcontext().prec\r\n ...\r\n 9\r\n >>> print getcontext().prec\r\n 28\r\n \"\"\"\r\n if ctx is None: ctx = getcontext()\r\n return _ContextManager(ctx)", "def _init_graph(self):\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self._init_network_variables()\n self._init_network_functions()", "def create_graph(self):\n # C reates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n self.graph_file), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create(self, stage):\n return nx.Graph()", "def getNamespaceFromName(*args, **kwargs):\n \n pass", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(infile), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_context(self):\n flask.g.context = self._context_class()", "def build_local_course_graph():\r\n g = Graph()\r\n for raw_course in parse_local_courses():\r\n course_subject = raw_course.get(\"Subject\", None)\r\n course_number = raw_course.get(\"Number\", None)\r\n if course_subject is None or course_number is None:\r\n continue\r\n\r\n # Add course to graph\r\n short_name = f\"{course_subject.upper()}_{course_number}\"\r\n course = URIRef(FOCUDATA + short_name)\r\n g.add((course, RDF.type, VIVO.Course))\r\n g.add((course, DBP.number, Literal(course_number)))\r\n\r\n # Add course URL if its in the known urls\r\n if short_name in COURSE_URLS:\r\n g.add((course, RDFS.seeAlso, URIRef(COURSE_URLS[short_name])))\r\n\r\n # Add course outline\r\n add_uris_to_graph(g, course, FOCU.outlines,\r\n raw_course.get(\"Outlines\", None))\r\n\r\n # Add lectures to graph\r\n raw_lectures = raw_course.get(\"Lectures\", None)\r\n if raw_lectures is not None:\r\n course_lectures = build_lecture_graph(raw_lectures, short_name)\r\n g += course_lectures\r\n\r\n # Create link from this course to each of its lectures\r\n for s, _, _ in course_lectures.triples((None, RDF.type, FOCU.Lecture)):\r\n g.add((course, FOCU.lectures, s))\r\n return g", "def init_graph():\r\n graph = nx.Graph()\r\n graph.add_node(1)\r\n graph.add_edge(1, 1)\r\n return graph", "def initialNamespaceContext (self):\n\n if self.__initialNamespaceContext is None:\n isn = { }\n if self.__contextInScopeNamespaces is not None:\n for (k, v) in six.iteritems(self.__contextInScopeNamespaces):\n isn[k] = self.__identifyNamespace(v)\n kw = { 'target_namespace' : self\n , 'default_namespace' : self.__identifyNamespace(self.__contextDefaultNamespace)\n , 'in_scope_namespaces' : isn }\n self.__initialNamespaceContext = resolution.NamespaceContext(None, **kw)\n return self.__initialNamespaceContext", "def new_graph(self):\n graph = GraphWrapper.blank_graph()\n graph.graph['graph_builder'] = self\n return graph", "def create_namespace(self):\n name = 'namespace-{random_string}'.format(random_string=random_str(5))\n\n namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=name))\n\n self.core_api.create_namespace(namespace)\n\n logger.info(\"Creating namespace: %s\", name)\n\n # save all namespaces created with this backend\n self.managed_namespaces.append(name)\n\n # wait for namespace to be ready\n Probe(timeout=30, pause=5, expected_retval=True,\n fnc=self._namespace_ready, namespace=name).run()\n\n return name", "def create(self, context):\n values = self.obj_get_changes()\n db_network = dbapi.create_network(context, values)\n self._from_db_object(self, db_network)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all the important information related to a local context.
def getInfo(self): return (self.__name, self.__graph, self.__globalContext[1], self.__localNS)
[ "def context(self):\n return self.__service.context()", "def determine_contexts(self):\n return []", "def inject_into_context():\n return dict(\n dev_server = running_local # Variable dev_server is True if running on the GAE development server\n )", "def localcontext(ctx=None):\r\n # The string below can't be included in the docstring until Python 2.6\r\n # as the doctest module doesn't understand __future__ statements\r\n \"\"\"\r\n >>> from __future__ import with_statement\r\n >>> print getcontext().prec\r\n 28\r\n >>> with localcontext():\r\n ... ctx = getcontext()\r\n ... ctx.prec += 2\r\n ... print ctx.prec\r\n ...\r\n 30\r\n >>> with localcontext(ExtendedContext):\r\n ... print getcontext().prec\r\n ...\r\n 9\r\n >>> print getcontext().prec\r\n 28\r\n \"\"\"\r\n if ctx is None: ctx = getcontext()\r\n return _ContextManager(ctx)", "def get():\n global context\n return context", "def get_extra_context():\n extra_context_f = Config.extra_context_func\n if extra_context_f is None:\n return {}\n extra_context = extra_context_f() # pylint: disable=E1120\n if not isinstance(extra_context, dict):\n print(\"bad extra_context (not a dict) => ignoring\", file=sys.stderr)\n return {}\n return extra_context", "def currentCtx():\n pass", "def get_locals(self, ):\n\t\tpass", "def __init__(self):\n\t\tctx = _new_local()\n\t\tsuper(LocalContext, self).__init__(ctx)", "def getCurrentContexts(self):\n return self._currentContexts", "def each_context(self, request):\n\n return {\n 'site_title': self.site_title,\n # 'site_header': self.site_header,\n # 'site_url': self.site_url,\n # 'has_permission': self.has_permission(view),\n 'available_apps': self.get_available_apps(request),\n }", "def get_context(self):\n assert self.uid, \"The user needs to be logged-in to initialize his context\"\n self.context = request.env['res.users'].context_get() or {}\n self.context['uid'] = self.uid\n self._fix_lang(self.context)\n return self.context", "def contexts(self):\n return self._hist[\"contexts\"]", "def get_contexts(cls) -> List:\n # no race-condition here, cls.context is a thread-local object\n # be sure not to override contexts in a subclass however!\n if not hasattr(cls.context, \"stack\"):\n cls.context.stack = []\n return cls.context.stack", "def global_context(self):\n self._page_con = con.Pages()\n static_uri = join_uri(settings.INTERLINK_URI, 'static/')\n context = {'static': static_uri, \n 'libs': join_uri(static_uri, 'libs/'), \n 'todays_date': dtdt.now().strftime('%Y-%m-%d'),\n 'this_page': '[set during page generation]'}\n context['site_uri'] = settings.SITE_URI\n context['site_name'] = settings.SITE_NAME\n context['pages'] = []\n context['sitemap_pages'] = []\n for cf in self._page_con.cfiles.values():\n page_name = cf.info['name']\n if page_name == 'index':\n uri = settings.SITE_URI\n else:\n uri = join_uri(settings.INTERLINK_URI, page_name)\n context['pages'].append(uri)\n if 'sitemap' in cf.info and cf.info['sitemap'] != '':\n priority = 0.1\n if page_name != 'index':\n uri = join_uri(settings.SITE_URI, page_name)\n try:\n priority = '%0.1f' % float(cf.info['sitemap'])\n except:\n self._output('Failed to convert sitemap priority to float: %s' % cf.info['sitemap'])\n context['sitemap_pages'].append({'uri': uri, 'priority': priority})\n context['sitemap_pages'].sort(key=lambda u: -float(u['priority']))\n extra_context = con.GlobConFiles().get_entire_context()\n context.update(extra_context)\n return context", "def show_context(context=None, level=0):\n if context is None:\n libca.ca_client_status(level)\n else:\n libca.ca_context_status(context, level)", "def print_context_contents(context):\n return_type = \"NoneType\"\n\n header = \"\\n[ ** Context: '\" + context.context_name + \"' ** ] \\n\"\n txt = \"\\tDeclared variables:\\n\"\n sorted_keys = sorted(context.types_of.keys())\n for name in sorted_keys:\n value = context.types_of[name]\n if name == default_function_ret_var_name:\n return_type = print_type(value)\n continue\n\n type_to_print = print_type(value)\n\n if \"__stypy_auto_var\" not in name:\n txt += \"\\t\\t{0} = {1}\\n\".format(name, type_to_print)\n if len(context.globals) > 0:\n txt += \"\\tDeclared globals: [\"\n for name in context.globals:\n txt += name + \", \"\n txt = txt[:-2]\n txt += \"]\"\n\n return header + \"Return type: \" + return_type + \"\\n\\n\" + txt + \"\\n\"", "def mpi_futures_find_context():\n try:\n module = sys.modules['__main__']\n local_context = None\n for item_name in dir(module):\n if isinstance(getattr(module, item_name), Context):\n local_context = getattr(module, item_name)\n break\n except Exception:\n raise Exception('nested: MPIFuturesInterface: remote instance of Context not found in the remote __main__ '\n 'namespace')\n return local_context", "def _dump_context(self):\n assert self.loaded_context is not None\n dumped_context, errors = self.schema.process(self.loaded_context)\n dumped_context.update(self.related)\n return dumped_context" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Module to create a service ticket
def create_ticket(service_msg,user_name,pass_word,api_key): headers = {'Accept': 'application/vnd.pagerduty+json;version=2',\ 'Authorization': 'Token token={token}'.format(token=api_key)} payload = {'note': {'content': "High CPU/Memory or Low Diskspace noticed. \ Staus of the service is " + str(service_msg)}} session = None ticketed = None try: session = requests.get(BASE_URL + '/user', auth=HTTPBasicAuth('user', 'pass')) except: logging.error("Authenticating Error") if session.status_code == 202: ticketed = requests.post(BASE_URL + "/createTicket", \ headers=headers, data=json.dumps(payload)) else: logging.error("Ticket creation failed") return ticketed
[ "def create_ticket(self, **kwargs):\n\n all_params = ['ticket']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_ticket\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/tickets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'ticket' in params:\n body_params = params['ticket']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TicketInformation',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def newticket( self, summary, type, severity, **kwargs ) :\n t = self.client.newticket( self, summary, type, severity, **kwargs )\n self.tickets.append( t )\n return t", "def create_ticket(self, subject, message, additional_fields):\n result_ticketid = \"0\"\n url = self.get_parameter(\"otrsRestUrl\")\n user = self.get_parameter(\"otrsRestUser\")\n password = self.get_parameter(\"otrsRestPassword\")\n queue = self.get_parameter(\"otrsQueue\")\n\n # parse additional fields\n additional_fields_map = {}\n for data in filter(None, additional_fields.split(\";\")):\n match = re.match(\"(.*?)=(.*)\", data)\n if match:\n data_field = match.group(1)\n data_value = match.group(2)\n additional_fields_map[data_field] = data_value\n\n request_url = \"%s/TicketCreate?UserLogin=%s&Password=%s\" % (url, user, password)\n request_headers = {\n \"Content-Type\": \"application/json\"\n }\n request_data = {}\n request_data[\"Ticket\"] = {}\n request_data[\"Ticket\"][\"Title\"] = subject\n request_data[\"Ticket\"][\"Type\"] = \"Incident\"\n request_data[\"Ticket\"][\"Priority\"] = \"3 normal\"\n request_data[\"Ticket\"][\"Queue\"] = \"Raw\"\n request_data[\"Ticket\"][\"State\"] = \"open\"\n request_data[\"Ticket\"][\"CustomerUser\"] = self.get_parameter(\"otrsCustomerMail\")\n request_data[\"Article\"] = {}\n request_data[\"Article\"][\"Subject\"] = subject\n request_data[\"Article\"][\"Body\"] = message\n request_data[\"Article\"][\"ContentType\"] = \"text/plain; charset=utf8\"\n request_data[\"DynamicField\"] = []\n for fieldname in additional_fields_map:\n request_data[\"DynamicField\"].append({\n \"Name\": fieldname,\n \"Value\": additional_fields_map[fieldname]\n })\n request_data_json = json.dumps(request_data)\n\n try:\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n response = requests.post(request_url, data=request_data_json, headers=request_headers,\n verify=False)\n # check response\n if response.status_code != 200:\n self._logger.error(\"Could not create ticket\")\n else:\n response_data = json.loads(response.text)\n try:\n result_ticketid = response_data[\"TicketID\"]\n self._logger.info(\"Ticket %s successfully created\", result_ticketid)\n except:\n self._logger.error(\"Could not create ticket\")\n except:\n self._logger.error(\"Could not connect to OTRS\")\n\n return result_ticketid", "def test_create_ticket(self):\n res = self.client().post(\"/api/v1/events/tickets\", json=self.ticket)\n self.assertEqual(res.status_code, 200)", "def create_ticket(self, ticket):\n ticket_url = self._zendesk_instance.create_ticket(data=ticket)\n return zendesk.get_id_from_url(ticket_url)", "def create(self, data):\n endpoint = f'{self.base_url}/tickets'\n response = self.make_http_request('post', endpoint, data)\n return response", "async def create_ticket(self, ticket_id, staff_id, name):\n await self.create_staff(staff_id, name)\n cursor = await self.get_data(\"SELECT * FROM `tickets` WHERE `ticket_id` = %s \",(ticket_id, ))\n if not cursor:\n await self.insert_data(\n \"UPDATE staff SET started = started+1, weekly_started = weekly_started+1 WHERE discordid = %s\",\n (staff_id, ))\n print(f\"Updated started by for {name}\")\n await self.insert_data(\n \"INSERT INTO `tickets`(`ticket_id`, `started_by`, `created_at`) VALUES (%s,%s,%s)\",\n (ticket_id, name, datetime.now()))\n\n print(f\"created ticket: {ticket_id}\")\n else:\n await self.insert_data(\n \"UPDATE tickets SET updated_at = null,updated_by = %s WHERE ticket_id =%s\",\n (name, ticket_id))\n print(f\"updated ticket: {ticket_id}\")", "def ticket_created(self, ticket):\n msg = \"Ticket: %d [%s %s] Created by %s: %s \" % (\n ticket.id, ticket['priority'], ticket['type'], \n ticket['reporter'], ticket['summary'])\n svc = NotifoSvc(self.nuser,self.nkey)\n response = svc.sendNotification({\n 'to':self.nuser,\n 'msg':msg\n })", "def handle_generate_ticket(msg, conn, state):\n #If the ticket has not been created or the current one has not been expired then creates a new one\n if state.isExpired:\n state.isExpired = False\n\n pubkey = msg['pubkey']\n pubkey = common.parse_point(pubkey)\n\n nonce = msg['nonce']\n print(\"A new ticket generation request has been received!\")\n print(\"Public Key: {}\\nnonce: {}\".format(pubkey, nonce))\n state.currentTicket = common.generate_ticket(pubkey, nonce)\n state.currentPubKey = pubkey\n network_handling.write_message(conn, RespGenTick(state.currentTicket))\n else:\n network_handling.write_message(conn, network_handling.RespError(\"The current ticket has not been expired yet!\"))", "def new_ticket(request):\n context = {}\n if request.method == \"POST\":\n form = forms.TicketSubmissionForm(request.POST)\n if form.is_valid():\n subject = request.POST['subject']\n description = request.POST['description'] + \"\\n\\n-----\"\n description += \"\\nVersion: %s\" % settings.GIT_RELEASE[:7]\n description += \"\\nSubmitted from: %s\" % request.META.get('REMOTE_ADDR')\n description += \"\\nDevice Info: %s\" % request.META.get('HTTP_USER_AGENT')\n attachments = request.FILES.getlist('attachments')\n requestor = request.user.email\n if request.user.first_name and request.user.last_name:\n requestor = request.user.name + \" <\" + request.user.email + \">\"\n resp = api.create_ticket(\"Database\", requestor, subject, description, attachments=attachments)\n if resp.get('id', None):\n ticket_id = resp['id']\n messages.success(request, \"Your ticket has been submitted. Thank you!\")\n\n # Send Slack notification\n slack_user = lookup_user(request.user.email)\n if not slack_user:\n slack_user = request.user.username\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": request.POST['description'],\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": slack_user\n }\n ticket = tfed_ticket(ticket_info)\n slack_post(settings.SLACK_TARGET_TFED_DB, text=request.POST['description'], content=ticket,\n username='Request Tracker')\n else:\n messages.add_message(\n request, messages.WARNING,\n 'Failed to open ticket: %s. Please contact the Webmaster.' % resp.get('message')\n )\n return HttpResponseRedirect(reverse(\"home\"))\n else:\n form = forms.TicketSubmissionForm()\n context['form'] = form\n context['title'] = 'Submit a Ticket'\n return render(request, 'form_semanticui.html', context)", "def map_service_ticket(sender, **kwargs):\n request = kwargs['request']\n ticket = request.GET.get('ticket', '')\n if ticket:\n session_key = request.session.session_key\n mapping, created = SessionServiceTicket.objects.get_or_create(service_ticket=ticket,\n session_key=session_key)\n mapping.save()", "def _ticket(self, ixf_member_data, subject, message, ix=False, net=False):\n\n subject = f\"{settings.EMAIL_SUBJECT_PREFIX}[IX-F] {subject}\"\n\n client = self.deskpro_client\n\n if not ixf_member_data.deskpro_id:\n old_ticket = DeskProTicket.objects.filter(\n subject=subject, deskpro_id__isnull=False\n ).first()\n if old_ticket:\n ixf_member_data.deskpro_id = old_ticket.deskpro_id\n ixf_member_data.deskpro_ref = old_ticket.deskpro_ref\n\n ticket = DeskProTicket.objects.create(\n subject=subject,\n body=message,\n user=self.ticket_user,\n deskpro_id=ixf_member_data.deskpro_id,\n deskpro_ref=ixf_member_data.deskpro_ref,\n )\n\n cc = []\n\n if ix:\n # if ix to be notified, cc suitable contacts\n cc += ixf_member_data.ix_contacts\n\n if net:\n # if net is to be notified, cc suitable contacts\n cc += ixf_member_data.net_contacts\n\n cc = list(set(cc))\n\n for email in cc:\n # we need to relate a name to the emails\n # since deskpro will make a person for the email address\n # we should attempt to supply the best possibly option\n\n ticket.cc_set.create(\n email=email,\n )\n\n try:\n client.create_ticket(ticket)\n ticket.published = datetime.datetime.now(datetime.timezone.utc)\n ticket.save()\n except Exception as exc:\n ticket.subject = f\"[FAILED]{ticket.subject}\"\n if hasattr(exc, \"data\"):\n # api error returned with validation error data\n ticket.body = f\"{ticket.body}\\n\\n{exc.data}\"\n else:\n # api error configuration issue\n ticket.body = f\"{ticket.body}\\n\\n{exc}\"\n ticket.save()\n return ticket", "def post_to_ticket(msg, author, tkt_id, env):\n from trac.ticket.notification import TicketNotifyEmail\n from trac.ticket import Ticket\n from trac.ticket.web_ui import TicketModule\n from trac.util.datefmt import utc\n\n now = datetime.now(utc)\n\n try:\n db = env.get_db_cnx()\n # Get the related trac ticket object\n ticket = Ticket(env, tkt_id, db)\n\n # determine sequence number...\n cnum = 0\n tm = TicketModule(env)\n for change in tm.grouped_changelog_entries(ticket, db):\n if change['permanent']:\n cnum += 1\n\n ticket.save_changes(author, msg, now, db, cnum + 1)\n db.commit()\n\n tn = TicketNotifyEmail(env)\n tn.notify(ticket, newticket=0, modtime=now)\n except Exception, e:\n msg = 'Unexpected error processing ticket ID %s: %s' % (tkt_id, e)\n print >>sys.stderr, msg", "def assign_ticket():\n if flask.request.method != 'POST':\n return flask.redirect(flask.request.referrer or\n flask.url_for('dashboard.dashboard_home'))\n\n # if not login.current_user.can_claim_ticket():\n # flask.flash('You are not eligible to claim a ticket.', 'error')\n # return flask.redirect(flask.request.referrer or\n # flask.url_for('dashboard.dashboard_home'))\n\n if (\n 'claim_code' not in flask.request.form or\n flask.request.form['claim_code'] == ''\n ):\n flask.flash('Invalid claim code.', 'error')\n return flask.redirect(flask.request.referrer or\n flask.url_for('dashboard.dashboard_home'))\n\n ticket = models.Ticket.get_by_claim_code(flask.request.form['claim_code'])\n\n if not ticket:\n flask.flash('No ticket with given claim code.', 'error')\n elif not ticket.can_be_claimed():\n flask.flash(\n flask.Markup(\n (\n 'That ticket can not be claimed. Please contact '\n '<a href=\"{0}\">the ticketing officer</a> for assistance.'\n ).format(\n APP.config['TICKETS_EMAIL_LINK']\n )\n ),\n 'error'\n )\n else:\n ticket.holder = login.current_user\n ticket.holder_name = flask.request.form['new_holder_name']\n ticket.claims_made += 1\n\n APP.log_manager.log_event(\n 'Assigned ticket',\n user=login.current_user,\n tickets=[ticket]\n )\n\n DB.session.commit()\n\n flask.flash('Ticket claimed.', 'success')\n\n return flask.redirect(flask.request.referrer or\n flask.url_for('dashboard.dashboard_home'))", "def get_ticket(controller, username, password):\n url = controller + \"ticket\"\n payload = {\"username\": username, \"password\": password}\n header = {\"content-type\": \"application/json\"}\n response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)\n r_json = response.json()\n ticket = r_json[\"response\"][\"serviceTicket\"]\n return ticket", "def _create_story(props):\n return self.teh.create_ticket(Type.USER_STORY, props=props)", "def create(\n self, pipeline: str, stage: str, properties: dict = None, **options\n ) -> Dict:\n if not pipeline or not stage:\n raise Exception(\"pipeline and stage are required to create a ticket!\")\n if not properties:\n properties = {}\n ticket_data = [{\"name\": x, \"value\": y} for x, y in properties.items()]\n ticket_data.append({\"name\": \"hs_pipeline\", \"value\": pipeline})\n ticket_data.append({\"name\": \"hs_pipeline_stage\", \"value\": stage})\n return self._call(\"objects/tickets\", data=ticket_data, method=\"POST\", **options)", "def create_wrapper_for_ticket_creation(summary, description, **kw):\n return TicketWrapper(summary=summary,\n description=description,\n **kw)", "def create_tickets(order):\n if has_created_tickets(order):\n return # no need to create tickets\n \n if order.meta.get('users_allocate'):\n multi_users = order.meta.get('users_allocate')\n multi_users = multi_users.get('ids')\n multi_users.insert(0, order.user_id)\n else:\n multi_users = None\n \n amount = 0\n for ticketorder in order.get_ticketorders():\n for i in range(ticketorder.amount):\n if multi_users and multi_users[amount] :\n ticket = Ticket(ticketorder.id, multi_users[amount])\n else :\n ticket = Ticket(ticketorder.id, order.user_id)\n ticketorder.tickets.append(ticket)\n Session.flush()\n blogger.info(\"created ticket %s, code %s.\" % (ticket.id, ticket.get_code()))\n amount += 1\n \n log_crm(\"order\", order.id, dict(action=\"created tickets\",\n amount=amount))\n return amount" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get create canvas/create new canvas >>> cnv = getCanvas ( 'glnewCanvas' , width = 1200 , height = 1000 )
def getCanvas ( name = 'glCanvas' , ## canvas name title = 'Ostap' , ## canvas title width = canvas_width , ## canvas width height = canvas_height ) : ## canvas height cnv = _canvases.get ( name , None ) if not cnv : ## create new canvas ## cnv = ROOT.TCanvas ( 'glCanvas', 'Ostap' , width , height ) cnv = ROOT.TCanvas ( name , 'Ostap' , width , height ) ## adjust newly created canvas ## @see http://root.cern.ch/root/html/TCanvas.html#TCanvas:TCanvas@4 groot = ROOT.ROOT.GetROOT() if not groot.IsBatch() : dw = width - cnv.GetWw() dh = height - cnv.GetWh() cnv.SetWindowSize ( width + dw , height + dh ) ## _canvases [ name ] = cnv return cnv
[ "def getCanvas ( name = 'glCanvas' , ## canvas name \n title = 'Ostap' , ## canvas title\n width = 1000 , ## canvas width\n height = 800 ) : ## canvas height\n cnv = _canvases.get ( name , None )\n if not cnv :\n ## create new canvas \n cnv = ROOT.TCanvas ( 'glCanvas', 'Ostap' , width , height )\n ## adjust newly created canvas\n ## @see http://root.cern.ch/root/html/TCanvas.html#TCanvas:TCanvas@4\n if not ROOT.gROOT.IsBatch() :\n dw = width - cnv.GetWw()\n dh = height - cnv.GetWh()\n cnv.SetWindowSize ( width + dw , height + dh )\n \n ## \n _canvases [ name ] = cnv\n \n return cnv", "def getCanvas(h, w):\n canvas = np.zeros((h, w, 3), dtype=np.uint8)\n canvas = clearCanvas(canvas)\n return canvas", "def Create(*args, **kwargs):\n return _glcanvas.GLCanvas_Create(*args, **kwargs)", "async def get_canvas(self) -> Canvas:\n data = await self.request('GET', 'get_pixels')\n size = await self.get_canvas_size()\n return Canvas(size, data)", "def _get_canvas() -> skia.Canvas:\n return __canvas", "def better_canvas():\n return bc.BetterCanvas()", "def _make_canvas(self, dc):\n \n # use GCDC\n if 'wxMac' not in wx.PlatformInfo:\n dc = wx.GCDC(dc)\n \n # make canvas\n return WXCanvas(dc)", "def GLCanvasWithContext(*args, **kwargs):\n val = _glcanvas.new_GLCanvasWithContext(*args, **kwargs)\n val._setOORInfo(val)\n return val", "def make_context(window):\n glfw.make_context_current(window)", "def get_canvas(self):\r\n return self.sc.get_canvas()", "def get_opencl(verbose=False):\n\tplatform = cl.get_platforms()\n\tmy_gpu_devices = platform[1].get_devices()\t#cl.device_type.GPU\n\tif verbose:\n\t\tprint(\"Platforms: \" + str(platform))\n\t\tprint(\"OpenCL devices: \" + str(my_gpu_devices))\n\tctx = cl.Context([my_gpu_devices[0]])\n\tqueue = cl.CommandQueue(ctx)\n\treturn ctx, queue", "def test_glfw_canvas_render_custom_canvas():\n\n import glfw\n\n class CustomCanvas: # implements wgpu.WgpuCanvasInterface\n def __init__(self):\n glfw.window_hint(glfw.CLIENT_API, glfw.NO_API)\n glfw.window_hint(glfw.RESIZABLE, True)\n self.__window = glfw.create_window(300, 200, \"canvas\", None, None)\n\n def get_window_id(self):\n if sys.platform.startswith(\"win\"):\n return int(glfw.get_win32_window(self.__window))\n elif sys.platform.startswith(\"darwin\"):\n return int(glfw.get_cocoa_window(self.__window))\n elif sys.platform.startswith(\"linux\"):\n is_wayland = \"wayland\" in os.getenv(\"XDG_SESSION_TYPE\", \"\").lower()\n if is_wayland:\n return int(glfw.get_wayland_window(self.__window))\n else:\n return int(glfw.get_x11_window(self.__window))\n else:\n raise RuntimeError(f\"Cannot get GLFW window id on {sys.platform}.\")\n\n def get_display_id(self):\n return wgpu.WgpuCanvasInterface.get_display_id(self)\n\n def get_physical_size(self):\n psize = glfw.get_framebuffer_size(self.__window)\n return int(psize[0]), int(psize[1])\n\n canvas = CustomCanvas()\n\n adapter = wgpu.request_adapter(canvas=canvas, power_preference=\"high-performance\")\n device = adapter.request_device()\n draw_frame = _get_draw_function(device, canvas)\n\n for i in range(5):\n time.sleep(0.01)\n glfw.poll_events()\n draw_frame()", "def new_window(\n width: int,\n height: int,\n *,\n renderer: int | None = None,\n tileset: tcod.tileset.Tileset | None = None,\n vsync: bool = True,\n sdl_window_flags: int | None = None,\n title: str | None = None,\n) -> Context:\n return new(\n width=width,\n height=height,\n renderer=renderer,\n tileset=tileset,\n vsync=vsync,\n sdl_window_flags=sdl_window_flags,\n title=title,\n )", "def _create_context():\n platforms = cl.get_platforms() # Select the first platform [0]\n if not platforms:\n raise EnvironmentError('No openCL platform (or driver) available.')\n\n # Return first found device\n for platform in platforms:\n devices = platform.get_devices()\n if devices:\n return cl.Context([devices[0]])\n\n raise EnvironmentError('No openCL devices (or driver) available.')", "def new(\n *,\n x: int | None = None,\n y: int | None = None,\n width: int | None = None,\n height: int | None = None,\n columns: int | None = None,\n rows: int | None = None,\n renderer: int | None = None,\n tileset: tcod.tileset.Tileset | None = None,\n vsync: bool = True,\n sdl_window_flags: int | None = None,\n title: str | None = None,\n argv: Iterable[str] | None = None,\n console: tcod.console.Console | None = None,\n) -> Context:\n if renderer is not None:\n warnings.warn(\n \"The renderer parameter was deprecated and will likely be removed in a future version of libtcod. \"\n \"Remove the renderer parameter to fix this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n renderer = RENDERER_SDL2\n if sdl_window_flags is None:\n sdl_window_flags = SDL_WINDOW_RESIZABLE\n if argv is None:\n argv = sys.argv\n if console is not None:\n columns = columns or console.width\n rows = rows or console.height\n argv_encoded = [ffi.new(\"char[]\", arg.encode(\"utf-8\")) for arg in argv] # Needs to be kept alive for argv_c.\n argv_c = ffi.new(\"char*[]\", argv_encoded)\n\n catch_msg: list[str] = []\n catch_handle = ffi.new_handle(catch_msg) # Keep alive.\n\n title_p = _handle_title(title) # Keep alive.\n\n params = ffi.new(\n \"struct TCOD_ContextParams*\",\n {\n \"tcod_version\": lib.TCOD_COMPILEDVERSION,\n \"window_x\": x if x is not None else lib.SDL_WINDOWPOS_UNDEFINED,\n \"window_y\": y if y is not None else lib.SDL_WINDOWPOS_UNDEFINED,\n \"pixel_width\": width or 0,\n \"pixel_height\": height or 0,\n \"columns\": columns or 0,\n \"rows\": rows or 0,\n \"renderer_type\": renderer,\n \"tileset\": _handle_tileset(tileset),\n \"vsync\": vsync,\n \"sdl_window_flags\": sdl_window_flags,\n \"window_title\": title_p,\n \"argc\": len(argv_c),\n \"argv\": argv_c,\n \"cli_output\": ffi.addressof(lib, \"_pycall_cli_output\"),\n \"cli_userdata\": catch_handle,\n \"window_xy_defined\": True,\n },\n )\n context_pp = ffi.new(\"TCOD_Context**\")\n error = lib.TCOD_context_new(params, context_pp)\n if error == lib.TCOD_E_REQUIRES_ATTENTION:\n raise SystemExit(catch_msg[0])\n _check_warn(error)\n return Context._claim(context_pp[0])", "def __init__(self, *args, **kwargs):\n _glcanvas.GLContext_swiginit(self,_glcanvas.new_GLContext(*args, **kwargs))", "def main():\n make_glut()", "def get_conf_canv(self, canvas, width, height):\n \n return lambda event: self.config_canvas(canvas, width, height)", "def _get_context(self):\n state = self.surface_changed\n if state & Node.SURFACE_CHANGED:\n if state == Node.SURFACE_CHANGED:\n self.reset_surface()\n cr = cairo.Context(self.surface)\n elif state & Node.SURFACE_SCALE and state & Node.SURFACE_ROTATE:\n # XXX: to be implemented\n self.create_surface_by_scale_rotate()\n elif state & Node.SURFACE_SCALE:\n self.create_surface_by_scale(self.sx, self.sy, \n self.scale_origin)\n cr = cairo.Context(self.surface)\n cr.scale(self.sx, self.sy)\n self.clear_context(cr)\n elif state & Node.SURFACE_ROTATE:\n self.create_surface_by_rotate(self.ang, self.rotate_origin)\n cr = cairo.Context(self.surface)\n delta = self.surface_width * 0.5\n cr.translate(delta, delta)\n cr.rotate(self.ang)\n cr.translate(-delta, -delta)\n cr.translate(-self.surface_x, -self.surface_y)\n self.clear_context(cr)\n else:\n cr = cairo.Context(self.surface)\n self.clear_context(cr)\n\n return cr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A bit simplified version for TCanvas print It Alows to create several output file types at once if extension is equal to `tar` or `tgz`, single (gzipped) tarfiles is created if extension is equal to `zip`, single ziparchive is created >>> canvas.print_ ( 'A' ) >>> canvas.save ( 'A' ) ditto >>> canvas >> 'fig' ditto
def _cnv_print_ ( cnv , fname , exts = ( 'pdf' , 'png' , 'eps' , 'C' , 'jpg' , 'gif' , 'json' , 'svg' ) ) : # cnv.Update () from ostap.logger.utils import rootWarning n , e = os.path.splitext ( fname ) el = e.lower() if n and el in all_extensions : with rootWarning () : cnv.Update () cnv.Print ( fname ) logger.debug ( 'Canvas --> %s' % fname ) return cnv if n and el in ( 'tgz' , 'gztar' , 'targz' , 'tar' , 'zip' , 'tbz' , 'tbz2' , 'tarbz' , 'tarbz2' , 'bztar' , 'bz2tar' , 'txz' , 'tlz' , 'tarxz' , 'tarlz' , 'xztar' , 'lztar') : files = [] for ext in exts : with rootWarning () : name = n + '.' + ext cnv.Print ( name ) logger.debug ( 'Canvas --> %s' % name ) if os.path.exists ( name ) and os.path.isfile ( name ) : files.append ( name ) if files and el in ( 'tgz' , 'targz' , 'gztar' ) : import tarfile with tarfile.open ( fname , "w:gz" ) as output : for f in files : output.add ( f ) if os.path.exists ( fname ) : logger.debug ( 'tgz-archive created %s' % fname ) elif files and el in ( 'tar' , ) : import tarfile with tarfile.open ( fname , "w" ) as output : for f in files : output.add ( f ) if os.path.exists ( fname ) : logger.debug ( 'tar-archive created %s' % fname ) elif files and el in ( 'tbz' , 'tarbz' , 'tarbz2' , 'tbz2' , 'bztar' , 'bz2tar' ) : import tarfile with tarfile.open ( fname , "w:bz2" ) as output : for f in files : output.add ( f ) if os.path.exists ( fname ) : logger.debug ( 'tbz-archive created %s' % fname ) elif files and el in ( 'txz' , 'tlz' , 'tarxz' , 'tarlz' , 'xztar' , 'lztar' ) and 3 <= python_version.major : import tarfile with tarfile.open ( fname , "w:xz" ) as output : for f in files : output.add ( f ) if os.path.exists ( fname ) : logger.debug ( 'txz-archive created %s' % fname ) elif files and el in ( 'zip' , ) : import zipfile with zipfile.ZipFile( fname , "w" ) as output : for f in files : output.write ( f ) if os.path.exists ( fname ) : logger.debug ( 'zip-archive created %s' % fname ) for f in files : try : os.remove ( f ) except OSError : pass return cnv for ext in exts : with rootWarning () : name = fname + '.' + ext cnv.Print ( name ) logger.debug ( 'Canvas --> %s' % name ) return cnv
[ "def cli(ctx, version, path, save_dir, runtype, verbose, very_verbose, max_sites):\n if version:\n click.echo(\"xtal2png version: {}\".format(__version__))\n return\n if verbose:\n setup_logging(loglevel=logging.INFO)\n if very_verbose:\n setup_logging(loglevel=logging.DEBUG)\n\n if not runtype and (path or save_dir):\n raise UsageError(\"Please specify --encode or --decode.\")\n\n _logger.debug(\"Beginning conversion to PNG format\")\n\n if runtype == \"encode\":\n check_path(path, \"CIF\")\n check_save_dir(save_dir)\n\n files = check_files(path, \"cif\")\n\n xc = XtalConverter(save_dir=save_dir, max_sites=max_sites)\n xc.xtal2png(files, save=True)\n return\n\n elif runtype == \"decode\":\n check_path(path, \"PNG\")\n check_save_dir(save_dir)\n\n files = check_files(path, \"png\")\n\n xc = XtalConverter(save_dir=save_dir, max_sites=max_sites)\n xc.png2xtal(files, save=True)\n return\n\n click.echo(ctx.get_help())", "def export_cmd(filename, format, sloppy):\n def exporter(cm, sloppy):\n stdoutoutput = False\n if filename == 'stdout':\n stdoutoutput = True\n else:\n output = utils.verify_filename(filename)\n if output['dir']:\n os.makedirs(output['path'], exist_ok=True)\n input_filename = os.path.splitext(os.path.basename(cm.path))[0]\n output['path'] = os.path.join(output['path'], '{f}.{ext}'.format(\n f=input_filename, ext=format))\n else:\n os.makedirs(os.path.dirname(output['path']), exist_ok=True)\n #---------- OBJ ----------\n if format.lower() == 'obj':\n if stdoutoutput:\n buf = cm.export2obj(sloppy)\n buf.seek(0)\n for l in buf.readlines():\n sys.stdout.write(l)\n else:\n print_cmd_status(\"Exporting CityJSON to OBJ (%s)\" % (output['path']))\n try:\n with click.open_file(output['path'], mode='w') as fo:\n re = cm.export2obj(sloppy)\n fo.write(re.getvalue())\n except IOError as e:\n raise click.ClickException('Invalid output file: \"%s\".\\n%s' % (output['path'], e))\n #---------- STL ----------\n elif format.lower() == 'stl':\n if stdoutoutput:\n buf = cm.export2stl(sloppy)\n buf.seek(0)\n for l in buf.readlines():\n sys.stdout.write(l)\n else: \n print_cmd_status(\"Exporting CityJSON to STL (%s)\" % (output['path']))\n try:\n with click.open_file(output['path'], mode='w') as fo:\n re = cm.export2stl(sloppy)\n fo.write(re.getvalue())\n except IOError as e:\n raise click.ClickException('Invalid output file: \"%s\".\\n%s' % (output['path'], e))\n #---------- GLB ----------\n elif format.lower() == 'glb':\n #-- TODO: glb stdout necessary?\n fname = os.path.splitext(os.path.basename(output['path']))[0]\n bufferbin = \"{}.glb\".format(fname)\n binfile = os.path.join(os.path.dirname(output['path']), bufferbin)\n print_cmd_status(\"Exporting CityJSON to glb %s\" % binfile)\n glb = cm.export2glb()\n # TODO B: how many buffer can there be in the 'buffers'?\n try:\n glb.seek(0)\n with click.open_file(binfile, mode='wb') as bo:\n bo.write(glb.getvalue())\n except IOError as e:\n raise click.ClickException('Invalid output file: \"%s\".\\n%s' % (binfile, e))\n #---------- B3DM ----------\n elif format.lower() == 'b3dm':\n #-- TODO: b3dm stdout necessary?\n fname = os.path.splitext(os.path.basename(output['path']))[0]\n b3dmbin = \"{}.b3dm\".format(fname)\n binfile = os.path.join(os.path.dirname(output['path']), b3dmbin)\n b3dm = cm.export2b3dm()\n print_cmd_status(\"Exporting CityJSON to b3dm %s\" % binfile)\n print_cmd_warning(\"Although the conversion works, the output is probably incorrect.\")\n try:\n b3dm.seek(0)\n with click.open_file(binfile, mode='wb') as bo:\n bo.write(b3dm.getvalue())\n except IOError as e:\n raise click.ClickException('Invalid output file: \"%s\".\\n%s' % (binfile, e))\n #---------- JSONL ----------\n elif format.lower() == 'jsonl':\n if stdoutoutput:\n with warnings.catch_warnings(record=True) as w:\n buf = cm.export2jsonl()\n print_cmd_warning(w)\n buf.seek(0)\n for l in buf.readlines():\n sys.stdout.write(l)\n else:\n print_cmd_status(\"Exporting CityJSON to JSON Lines (%s)\" % (output['path']))\n try:\n with click.open_file(output['path'], mode='w') as fo:\n with warnings.catch_warnings(record=True) as w:\n re = cm.export2jsonl()\n print_cmd_warning(w)\n fo.write(re.getvalue())\n except IOError as e:\n raise click.ClickException('Invalid output file: \"%s\".\\n%s' % (output['path'], e))\n def processor(cm):\n if (format != 'jsonl') and (cityjson.MODULE_TRIANGLE_AVAILABLE == False):\n str = \"OBJ|glTF|b3dm export skipped: Python module 'triangle' missing (to triangulate faces)\"\n print_cmd_alert(str)\n str = \"Install it: https://pypi.org/project/triangle/\"\n print_cmd_warning(str)\n raise click.ClickException('Abort.')\n else:\n exporter(cm, sloppy)\n return cm\n return processor", "def mkOutput(hpath, aos, plot=None, special=None):\n output = ''\n\n if plot is not None:\n output += str(plot)\n\n if special is not None:\n output += \"\\n\"\n output += \"# BEGIN SPECIAL %s\\n\" % hpath\n output += special\n output += \"# END SPECIAL\\n\\n\"\n\n from io import StringIO\n sio = StringIO()\n yoda.writeFLAT(aos, sio)\n output += sio.getvalue()\n\n return output", "def save_tiff_output(data: dispim.base.Volume, path: str, name: str, b_8bit: bool = False) -> None:\n generate_output_dir(path)\n out_path = os.path.join(path, f\"{name}.tif\")\n\n save_tiff(data, out_path)", "def do_dump(self, *args): \n output_dir = self.root.PEC.config.get(\"GENERAL\", \"output_dir\")\n splitr = (self.loaded_dataflow.__name__).split(self.root.PEC.sep)\n if len(splitr) > 1:\n dname = splitr[1]\n else: dname = splitr[0]\n dname_withtime = dname + str(self.root._load_time).split('.')[0] \n if args[0] != '': #check if there is something in the string.\n #if so then we will use it as our filename\n fname = output_dir + self.root.PEC.sep+args[0]\n else:\n fname = output_dir + self.root.PEC.sep + dname_withtime +\\\n \"_\" + str(self.root.iter_count+1) + \".dump\"\n \n try:\n outf = open(fname, \"wb\")\n outf.write(self.root.get_bytes())\n outf.close()\n except IOError, msg:\n raise \"Proteus Dump Error %s\" % msg\n\n print \"Successfully write %s contents to %s.\" %\\\n (self.root.__class__.__name__ , fname)", "def PlotToFileName(self) -> str:", "def to_png(root, structure_type=\"bbt\", dot_path=\"tree.dot\", png_path=\"tree.png\"):\n to_dot(root, structure_type, dot_path)\n png.create_png(dot_path, png_path)", "def savePlot(self, canvas, plot_meta, printOut=True):\n subd = os.path.join(self.out_folder, plot_meta.subdir)\n try:\n os.makedirs(subd)\n logger.info(\"Made subdirectory %s\" % subd)\n except:\n pass\n logger.info(\"Plot subdirectory is \" + subd)\n\n fname = plot_meta.title\n\n formats = [\"pdf\"]\n if not self.skip_png:\n formats += [\"png\"]\n for format in formats:\n outpath = os.path.join(subd, fname + \".\" + format)\n logger.info(\"Saving plot to %s\" % outpath)\n\n if printOut:\n print \"Saving plot to %s\" % outpath\n #Can fail to export to PNG when libASImage is not available\n try:\n canvas.SaveAs(outpath)\n plot_meta.update(outpath)\n except Exception as e:\n logger.error(\"Couldn't save image: \" + str(e))", "def output(self, eta, percent, file, filenum=1, total_files=1):\n pass", "def tar_and_save(outfile, *args):\n with tarfile.open(outfile, 'w') as tf:\n for filename in args:\n tf.add(filename, arcname=os.path.basename(filename))", "def cli_tile_format(\n usage_help: str = \"File format to save image tiles, defaults = '.jpg'\",\n) -> callable:\n return click.option(\n \"--tile-format\",\n type=str,\n default=\".jpg\",\n help=usage_help,\n )", "def print_tikz(reporting_root, prediction_edges, gold_edges, edge_to_relation, words, split_name):\n words = list(words)\n for i, word in enumerate(words):\n word = word.replace(\"$\", \"\\$\").replace(\"&\", \"+\").replace(\"%\", \"\\%\")\n if has_numbers(word):\n word = f\"${word}$\"\n words[i] = word\n\n with open(os.path.join(reporting_root, \"visualize.tikz\"), \"a\") as fout:\n string = \"\\\\begin{figure}\"\n string += \"\\\\resizebox{\\\\textwidth}{!}{\" + \"\\n\"\n string += \"\"\"\\\\begin{dependency}[edge unit distance=5ex]\n\\\\begin{deptext}[column sep=2cm]\n\"\"\"\n string += \"\\\\& \".join([x for x in words]) + \" \\\\\\\\\" + \"\\n\"\n string += \"\\\\end{deptext}\" + \"\\n\"\n for i_index, j_index in gold_edges:\n string += \"\\\\depedge{{{}}}{{{}}}{{{}}}\\n\".format(\n i_index + 1, j_index + 1, edge_to_relation.get((i_index, j_index), \".\")\n )\n for i_index, j_index in prediction_edges:\n string += \"\\\\depedge[edge style={{red!60!}}, edge below]{{{}}}{{{}}}{{{}}}\\n\".format(\n i_index + 1, j_index + 1, edge_to_relation.get((i_index, j_index), \"wrong\")\n )\n string += \"\\\\end{dependency}\\n\"\n string += \"}\\n\"\n string += \"\\\\end{figure}\"\n string += \"\\\\clearpage\"\n fout.write(\"\\n\\n\")\n fout.write(string)", "def compose_display():\r\n print(\"### Compose a composition ###\\n\"\r\n \"Here you can choose a file with composing instruction.\\n\"\r\n \"Our function will compose it for you.\")", "def make_pdf(opt):\n\n img_count = len(glob(os.path.join(opt.dirname, opt.basename, \"%s-*.png\" % opt.basename)))\n\n # set all layers visible\n svg_file = os.path.join(opt.dirname, opt.basename, \"%s.svg\" % opt.basename)\n dom = ElementTree.parse(open(svg_file))\n changed = []\n for n in range(img_count):\n layer = dom.find(\".//{%s}g[@id='layer_%04d']\" % (SVG, (n+1)))\n layer.set('style', '')\n changed.append(len(layer) > 1)\n dom.write(svg_file)\n\n basepath = os.path.join(opt.basename, opt.basename)\n\n for n in range(img_count):\n if opt.changed_only and not changed[n]:\n cmd = \"rm -f %s_%04d.pdf\" % (basepath, n)\n else:\n cmd = (\"inkscape --without-gui --file=%s.svg --export-pdf=%s_%04d.pdf \"\n \"--export-id=%s --export-id-only\" % (\n basepath,\n basepath, n,\n 'layer_%04d' % (n+1)))\n print(cmd)\n os.system(cmd)\n\n cmd = (\"gs -sDEVICE=pdfwrite -dNOPAUSE -dBATCH -dSAFER \"\n \"-sOutputFile=%s_edit.pdf %s_????.pdf\" % (\n basepath,\n basepath\n ))\n print(cmd)\n os.system(cmd)", "def _choose_output(self, path_parts, fmt='html'):\n if len(path_parts) > 1 and not path_parts[-1]:\n path_parts.pop(-1)\n else:\n om = path_parts.pop(-1)\n if re.match(r'^[a-zA-Z0-9\\.!_-]+$', om):\n fn = om.split('!')[0] # Strip off !mode suffixes\n for suffix in self.OUTPUT_SUFFIXES:\n if fn.endswith(suffix) or suffix == ('.' + fn):\n return self._command('output', [om], method=False)\n raise UsageError('Invalid output format: %s' % om)\n return self._command('output', [fmt], method=False)", "def export_as():\n\tglobal export_path\n\tfiles = [(\"Text files\",\"*.docx\"),\n\t\t\t (\"PDF files\",\"*.pdf\"),\n\t\t\t (\"all files\",\"*.*\")] \n\ttry:\n\t\texport_path = asksaveasfile(filetypes = files, defaultextension = files).name \n\texcept:\n\t\treturn\n\t\n\tget_file(export_path)", "def export(self, filename='test', ext='obj'):\n\t\tdeselect_all(True)\n\t\tif ext == 'obj':\n\t\t\tbpy.ops.export_scene.obj(filepath='{}.{}'.format(filename, ext))\n\t\telse:\n\t\t\traise NotImplementedError\n\t\tprint('File has been successfully saved as {}'.format(filename))", "def writexz(edges, bounds, filename, scale, space):\n #start = time.clock()\n file = open(filename, 'wb')\n inkscapeheader(file)\n figdata(file, edges, 'xz', bounds, scale, space)\n inkscapefooter(file)\n file.close()\n print 'Successfully exported ', Blender.sys.basename(filename)# + seconds", "def save_all(self, basename, ftype='png'):\n if basename is None:\n plt.ion()\n plt.show()\n return\n\n for key, val in self._fig_dict.items():\n fig = val['fig']\n fig.savefig(\"%s_%s.%s\" % (basename, key, ftype))\n plt.close(fig)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all known canvases
def getCanvases () : return _canvases.keys()
[ "def try_all_gpus():\r\n ctx_list = []\r\n try:\r\n for i in range(16):\r\n ctx = mx.gpu(i)\r\n _ = nd.array([0], ctx=ctx)\r\n ctx_list.append(ctx)\r\n except:\r\n pass\r\n if not ctx_list:\r\n ctx_list = [mx.cpu()]\r\n return ctx_list", "def canvas_items(self):\n return copy.copy(self.__canvas_items)", "def getCanvas ( name = 'glCanvas' , ## canvas name \n title = 'Ostap' , ## canvas title\n width = canvas_width , ## canvas width\n height = canvas_height ) : ## canvas height\n cnv = _canvases.get ( name , None )\n if not cnv :\n ## create new canvas \n ## cnv = ROOT.TCanvas ( 'glCanvas', 'Ostap' , width , height )\n cnv = ROOT.TCanvas ( name , 'Ostap' , width , height )\n ## adjust newly created canvas\n ## @see http://root.cern.ch/root/html/TCanvas.html#TCanvas:TCanvas@4\n groot = ROOT.ROOT.GetROOT() \n if not groot.IsBatch() :\n dw = width - cnv.GetWw()\n dh = height - cnv.GetWh()\n cnv.SetWindowSize ( width + dw , height + dh )\n \n ## \n _canvases [ name ] = cnv\n \n return cnv", "def getCanvas ( name = 'glCanvas' , ## canvas name \n title = 'Ostap' , ## canvas title\n width = 1000 , ## canvas width\n height = 800 ) : ## canvas height\n cnv = _canvases.get ( name , None )\n if not cnv :\n ## create new canvas \n cnv = ROOT.TCanvas ( 'glCanvas', 'Ostap' , width , height )\n ## adjust newly created canvas\n ## @see http://root.cern.ch/root/html/TCanvas.html#TCanvas:TCanvas@4\n if not ROOT.gROOT.IsBatch() :\n dw = width - cnv.GetWw()\n dh = height - cnv.GetWh()\n cnv.SetWindowSize ( width + dw , height + dh )\n \n ## \n _canvases [ name ] = cnv\n \n return cnv", "def test_all_captures(self):\n\n dir = os.path.join(os.path.dirname(__file__), \"../../../res/captures\")\n\n for c in os.listdir(dir):\n filename = \"../../../res/captures/{}\".format(c)\n try:\n img = cv2.imread(filename)\n except:\n continue\n\n if (img is None):\n continue\n\n playfield = capture.crop_to_playfield(img)", "def get_categories():\r\n return VIDEOS.iterkeys()", "def selected_gpencil_frames(self, context):\n ctrl_points = set()\n for o in context.selected_objects:\n if o.type == 'GPENCIL':\n for l in o.data.layers:\n for f in l.frames:\n if f.select:\n ctrl_points.add(f.frame_number)\n return sorted(ctrl_points)", "def remove_all_canvas_items(self):\n for canvas_item in reversed(copy.copy(self.__canvas_items)):\n self._remove_canvas_item(canvas_item)", "def get_all_channels(self):\r\n return self.all()", "def get_canvas(self):\r\n return self.sc.get_canvas()", "def allViews(self):\n shapeSel = self.findShapeSel()\n for each in shapeSel:\n cmds.imagePlane(each, e=True, showInAllViews=True)\n cmds.select(cl=True)", "def img_sets():\n return [\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train',\n 'tvmonitor']", "def get_ckpts(self):\n return self.checkpointing_environment_manager.get_objects()", "def _get_canvas() -> skia.Canvas:\n return __canvas", "def get_categories():\n return VIDEOS.keys()", "def canvas_items_at_point(self, x, y):\n return self._canvas_items_at_point(self.visible_canvas_items, x, y)", "def get_channels():", "def get_all(cls, connection):\n resp = connection._get('/1/tenants/%s/networkCameras' % connection.tenant.tenant_id)\n netcams = resp.json()['data']\n return [CogniacNetworkCamera(connection, netcam) for netcam in netcams]", "async def get_canvas(self) -> Canvas:\n data = await self.request('GET', 'get_pixels')\n size = await self.get_canvas_size()\n return Canvas(size, data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform partition of Canvas into pads with no intermargins canvas = ... nx = 3 , ny = 2 pads = canvas.partition ( nx , ny )
def canvas_partition ( canvas , nx , ny , left_margin = margin_left , right_margin = margin_right , bottom_margin = margin_bottom , top_margin = margin_right , hSpacing = 0.0 , vSpacing = 0.0 ) : if not isinstance ( nx , int ) or nx<= 0 : raise AttributeError('partition: invalid nx=%s' % nx ) if not isinstance ( ny , int ) or ny<= 0 : raise AttributeError('partition: invalid ny=%s' % ny ) ## get the window size wsx = abs ( canvas.GetWindowWidth () ) wsy = abs ( canvas.GetWindowHeight () ) # ## if parameters given in the absolute units, convert them into relative coordinates # if not 0 < left_margin < 1 : left_margin = abs ( left_margin ) / wsx if not 0 < right_margin < 1 : right_margin = abs ( right_margin ) / wsx if not 0 < bottom_margin < 1 : bottom_margin = abs ( bottom_margin ) / wsy if not 0 < top_margin < 1 : top_margin = abs ( top_margin ) / wsy if not 0 < vSpacing < 1 : vSpacing = abs ( vSpacing ) / wsy if not 0 < hSpacing < 1 : hSpacing = abs ( hSpacing ) / wsx # ## check consistency # if 1 <= left_margin : raise AttributeError('partition: invalid left margin=%f' % left_margin ) if 1 <= right_margin : raise AttributeError('partition: invalid right margin=%f' % right_margin ) if 1 <= bottom_margin : raise AttributeError('partition: invalid bottom margin=%f' % bottom_margin ) if 1 <= top_margin : raise AttributeError('partition: invalid top margin=%f' % top_margin ) ## delete the pad dictionary del canvas.pads ## make new empty dictionary pads = {} vStep = ( 1.0 - bottom_margin - top_margin - (ny-1) * vSpacing ) / ny if 0 > vStep : raise AttributeError('partition: v-step=%f' % vStep ) hStep = ( 1.0 - left_margin - right_margin - (nx-1) * hSpacing ) / nx if 0 > hStep : raise AttributeError('partition: h-step=%f' % hStep ) hposr, hposl, hmarr, hmarl, hfactor = 0.,0.,0.,0.,0. vposr, vposd, vmard, vmaru, vfactor = 0.,0.,0.,0.,0. for ix in range ( nx ) : if 0 == ix : hposl = 0 hposr = left_margin + hStep hfactor = hposr - hposl hmarl = left_margin / hfactor hmarr = 0.0 elif nx == ix + 1 : hposl = hposr + hSpacing hposr = hposl + hStep + right_margin hfactor = hposr - hposl hmarl = 0.0 hmarr = right_margin / hfactor else : hposl = hposr + hSpacing hposr = hposl + hStep hfactor = hposr - hposl hmarl = 0.0 hmarr = 0.0 for iy in range(ny) : if 0 == iy : vposd = 0.0 vposu = bottom_margin + vStep vfactor = vposu - vposd vmard = bottom_margin / vfactor vmaru = 0.0 elif ny == iy + 1 : vposd = vposu + vSpacing vposu = vposd + vStep + top_margin vfactor = vposu - vposd; vmard = 0.0 vmaru = top_margin / vfactor else : vposd = vposu + vSpacing vposu = vposd + vStep vfactor = vposu - vposd vmard = 0.0 vmaru = 0.0 canvas.cd(0) pname = 'glPad_%s_%d_%d' % ( canvas.GetName() , ix , iy ) groot = ROOT.ROOT.GetROOT() pad = groot.FindObject ( pname ) if pad : del pad pad = ROOT.TPad ( pname , '' , hposl , vposd , hposr , vposu ) logger.verbose ( ' Create pad[%d,%d]=(%f,%f,%f,%f),[%f,%f,%f,%f] %s ' % ( ix , iy , hposl , vposd , hposr , vposu , hmarl , hmarr , vmard , vmaru , pad.GetName() ) ) pad.SetLeftMargin ( hmarl ) pad.SetRightMargin ( hmarr ) pad.SetBottomMargin ( vmard ) pad.SetTopMargin ( vmaru ) pad.SetFrameBorderMode ( 0 ) pad.SetBorderMode ( 0 ) pad.SetBorderSize ( 0 ) ROOT.SetOwnership ( pad , True ) if not hasattr ( canvas , 'pads' ) : canvas.pads = {} pads[ ( ix , iy ) ] = pad ## fill pads structure for iy in reversed ( range ( ny ) ) : for ix in range ( nx ) : key = ix , iy canvas.pads [ key ] = pads[ key ] return canvas.pads
[ "def partition_pixels(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def _do_partition(total, maxelements, around=None, maxdz=None):\n if (around is None) != (maxdz is None):\n raise ValueError(\"Cannot define center or radius alone.\")\n\n if maxelements == 1:\n if around is not None and maxdz < abs(total - around[-maxelements]):\n return []\n else:\n return [[total]]\n res = []\n\n # get range to cover\n if around is None:\n first = 0\n last = total\n limit = None\n else:\n first = max(0, around[-maxelements] - maxdz)\n last = min(total, around[-maxelements] + maxdz)\n for x in range(first, last + 1):\n if around is not None:\n limit = maxdz - abs(x - around[-maxelements])\n for p in IntegerPartitions._do_partition(\n total - x, maxelements - 1, around, limit\n ):\n res.append([x] + p)\n return res", "def test_partition2(self):\n\n inputShape = (50, 20, 4)\n inputLayer = NxInputLayer(inputShape)\n hiddenLayer = NxDepthwiseConv2D(3, strides=(2, 2), padding='same',\n validatePartitions=True)\n outputLayer = NxDepthwiseConv2D(2, padding='same',\n validatePartitions=True)\n model = NxModel(inputLayer.input,\n outputLayer(hiddenLayer(inputLayer.input)))\n\n model.partition()\n\n model.clearTemp()", "def window_partition(x: Tensor, window_size: int) -> tuple[Tensor, tuple[int, int]]:\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)", "def _partition_cells(self):\n\n def label(l, y, x):\n # A center and some part of its subgrid may be out of bounds.\n if (x >= 0 and x < self.cols and y >= 0 and y < self.rows):\n self.labels[y, x] = l\n\n centers = [(0, 0), (1, 2), (2, 4), (3, 6), (4, 8), (3, -1), (4, 1), (5, 3),\n (6, 5), (7, 7), (-1, 5), (7, 0), (0, 7)]\n for center in centers:\n label(0, *center)\n for i, neigh in enumerate(self.neighbors1sparse(*center)):\n label(i + 1, *neigh)", "def partition(n):\n\tassert type(n) is int \n\tassert n > 0\n\n\treturn _partition(n, n)", "def test_partitionModel4(self):\n\n inputShape = (15, 15, 3)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxConv2D(3, 3, strides=(2, 2), padding='same',\n validatePartitions=True)(inputLayer.input)\n\n model = NxModel(inputLayer.input, outputLayer,\n numCandidatesToCompute=10)\n\n model.partition()\n\n model.clearTemp()", "def partition(total, maxelements, around=None, maxdz=None):\n if around is not None:\n return IntegerPartitions._do_partition(\n total, maxelements, tuple(around), maxdz\n )\n else:\n return IntegerPartitions._do_partition(total, maxelements)", "def test_partition2(self):\n\n inputShape = (40, 32, 2)\n inputLayer = NxInputLayer(inputShape)\n flattenLayer = NxFlatten()(inputLayer.input)\n hiddenLayer = NxDense(100, validatePartitions=True)\n outputLayer = NxDense(10, validatePartitions=True)\n model = NxModel(inputLayer.input,\n outputLayer(hiddenLayer(flattenLayer)))\n\n model.partition()\n\n model.clearTemp()", "def test_partition1(self):\n\n inputShape = (5, 4)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxConv1D(2, 3, validatePartitions=True)\n model = NxModel(inputLayer.input, outputLayer(inputLayer.input))\n\n model.partition()\n\n model.clearTemp()", "def test_partitionPooling2(self):\n\n inputShape = (30, 40, 4)\n inputLayer = NxInputLayer(inputShape)\n hiddenLayer = NxAveragePooling2D(2, padding='same',\n validatePartitions=True)\n outputLayer = NxAveragePooling2D(3, strides=(1, 1),\n validatePartitions=True)\n model = NxModel(inputLayer.input,\n outputLayer(hiddenLayer(inputLayer.input)))\n\n model.partition()\n\n model.clearTemp()", "def conv2d_partition_function(ref_call, new_args, ctx):\n data_cond, data = partition_expr_check(new_args[0])\n kernel_cond, kernel = partition_expr_check(new_args[1])\n\n assert not kernel_cond\n if data_cond:\n data = new_args[0].realize()\n ret = _forward_op(ref_call, [data, kernel])\n return QPartitionExpr(ret)", "def test_partitionPooling(self):\n\n inputShape = (5, 5, 4)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxAveragePooling2D(2, validatePartitions=True)\n model = NxModel(inputLayer.input, outputLayer(inputLayer.input))\n\n model.partition()\n\n model.clearTemp()", "def a_partition(par):\n if par.m_q < 0:\n raise NotImplementedError(\"Q<0 not implemented.\")\n \n _parts = [_partition_gs, _partition_mq, _partition_left]\n for c_pairs in _parts:\n pairs = c_pairs(par)\n if is_valid(pairs, par) and not is_singular(pairs, par): \n return pairs\n\n # never get here\n raise RuntimeError(\"Failed to generate a_partition for %s\" % par)", "def partition(Py_ssize_t_n, seq, pad='__no__pad__'): # real signature unknown; restored from __doc__\n pass", "def __draw_grid(self):\n MARGIN = self.MARGIN\n for i in range(4):\n x0 = (4-i) * MARGIN + MARGIN\n y0 = i * MARGIN\n x1 = 160-(4-i)*MARGIN + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(3-i, 5+i+1):\n x0 = j * MARGIN + MARGIN\n y0 = (i+1) * MARGIN\n x1 = j * MARGIN + MARGIN\n y1 = 80\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(4, 4+9):\n x0 = 0 + MARGIN\n y0 = i * MARGIN\n x1 = 160 + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(9):\n x0 = i * MARGIN + MARGIN\n y0 = 80\n x1 = i * MARGIN + MARGIN\n y1 = 80 + MARGIN*8\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(3):\n x0 = (i+1) * MARGIN + MARGIN\n y0 = (i+13)* MARGIN\n x1 = 160-(i+1)*MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(7-i, i, -1):\n x0 = j * MARGIN + MARGIN\n y0 = 80 + MARGIN*8\n x1 = j * MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)", "def partition_d1(start_value, end_value, partition_count):\n start_x = start_value\n dx = (end_value - start_value) / partition_count\n\n partitions = []\n for partition_i in range(1, partition_count + 1):\n if partition_i == partition_count:\n partitions.append((start_x, end_value))\n else:\n partitions.append((start_x, start_x + dx))\n\n start_x += dx\n return partitions", "def partitionMemories(ir):\n for mems in partitions:\n partition_name=mems[0]\n dimention_to_partition=int(mems[1])\n settings=mems[2:][0]\n dim, dataType = getArrayInfo(partition_name)\n\n # Settings for fully partitioning\n if settings[0]=='*':\n print(\"\\tFully partitioning array \"+partition_name)\n partition_values=[[x] for x in xrange(dim[dimention_to_partition])]\n\n # Settings for block and cyclic partitions\n elif settings[0]==\"b\" or settings[0]==\"c\":\n blocks=int(settings[1])\n if blocks<1:\n blocks=1\n elif blocks>dim[dimention_to_partition]:\n blocks=dim[dimention_to_partition]\n\n # Splitting the array in blocks\n if settings[0]==\"b\":\n print(\"\\tPartitioning array \"+partition_name+\" in \"+str(blocks)+\" blocks of memory\")\n k, m = divmod(dim[dimention_to_partition], blocks)\n partition_values = [range(i*k+min(i, m),(i+1)*k+min(i+1,m)) for i in xrange(blocks)]\n\n # Splitting the array cyclically\n elif settings[0]==\"c\":\n print(\"\\tPartitioning array \"+partition_name+\" in \"+str(blocks)+\" memories cyclically\")\n partition_values = [range(dim[dimention_to_partition])[i::blocks] for i in xrange(blocks)]\n\n partition_dim=[dim[:dimention_to_partition]+[len(x)]+dim[dimention_to_partition+1:] for x in partition_values]\n createPartitions(partition_name,partition_values,partition_dim,dataType)\n updateGEP(partition_name,partition_values,partition_dim,dataType,dimention_to_partition)", "def window_unpartition(windows: Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]) -> Tensor:\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deleter for the created pad structure
def _cnv_del_pads_ ( self ) : while self.pads : key , pad = self.pads.popitem () if pad : logger.verbose ( 'delete pad %s' % pad .GetName() ) del pad
[ "def __del__(self):\n self.usb_port.close()", "def __del__( self ):\n\t\tLiFlame.degrid()", "def _destroy(self):\n self._dict._destroy()", "def __del__(self):\n self.cleanup()", "def deinit(self) -> None:\n self._is_deinited()\n self._pad.deinit()\n self._cursor.deinit()\n self._cursor = None\n self._event = None", "def del_kb(self):\n self.kb = None", "def delete(self):\n self.tap.delete()\n self.port.close()", "def __del__(self):\n self.clear()", "def delete(self):\n self.device_buffer.delete() # pytype: disable=attribute-error\n self.device_buffer = deleted_buffer\n self._npy_value = None", "def _default_deleter(self, obj):\n try:\n delattr(obj, self._name)\n except AttributeError:\n pass\n except TypeError:\n raise", "def deinit(self):\n self.ds2482.device_reset()", "def destroy(self, **kwargs):\n log_method_call(self, device=self.device,\n type=self.type, status=self.status)\n self.teardown()\n DeviceFormat.destroy(self, **kwargs)", "def onDeinit(self):", "def _DelObject(self, _id):\n if self._vbo is None:\n return\n index = self._indices[_id]\n num_values = type(self).__num_values\n del self._indices[_id]\n if not (index == self._max_index):\n self._empty_indices.append(index)\n else:\n self._max_index -= 1\n self._vbo[\n index * num_values:(index + 1) * num_values] = nzeros(num_values, \"f\")", "def __del__(self):\n self.__tag_registration.stop_tag_reading()", "def destroy(self):\n for item in self.__dict__:\n self.removeDevice(item)", "def delete(obj):", "def _del(self, _del):\n\n self.__del = _del", "def __del__(self):\n clear_c_mpz_t(self)", "def deleteDeagLayer(self):\n\n if self.deag_layer:\n # deregister a box select callback for deag zones\n self.pyslip.setBoxSelectCallback(self.deag_layer, None)\n\n self.pyslip.deleteLayer(self.deag_layer)\n self.deag_layer = None\n\n if self.deag_label_layer:\n self.pyslip.deleteLayer(self.deag_label_layer)\n self.deag_label_layer = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split canvas in ydirection into nonequal pads, proportionally to heights >>> canvas = ... >>> pads = canvas.vsplit ( [1,2,1] )
def canvas_vsplit ( canvas , heights , left_margin = margin_left , right_margin = margin_right , bottom_margin = margin_bottom , top_margin = margin_top , vSpacing = 0.0 ) : ## get the window size wsx = abs ( canvas.GetWindowWidth () ) wsy = abs ( canvas.GetWindowHeight () ) # ## if parametes given in the absolute units, convert them into relative coordinates # if not 0 < left_margin < 1 : left_margin = abs ( left_margin ) / wsx if not 0 < right_margin < 1 : right_margin = abs ( right_margin ) / wsx if not 0 < bottom_margin < 1 : bottom_margin = abs ( bottom_margin ) / wsy if not 0 < top_margin < 1 : top_margin = abs ( top_margin ) / wsy if not 0 < vSpacing < 1 : vSpacing = abs ( vSpacing ) / wsy hSpacing = 0 hposr, hposl, hmarr, hmarl, hfactor = 0.,0.,0.,0.,0. vposr, vposd, vmard, vmaru, vfactor = 0.,0.,0.,0.,0. nx = 1 ny = len ( heights ) vSize = ( 1.0 - bottom_margin - top_margin - ( ny - 1 ) * vSpacing ) hSize = ( 1.0 - left_margin - right_margin - ( nx - 1 ) * hSpacing ) vStep = ( 1.0 - bottom_margin - top_margin - ( ny - 1 ) * vSpacing ) / ny if 0 > vStep : raise AttributeError('partition: v-step=%f' % vStep ) hStep = ( 1.0 - left_margin - right_margin - ( nx - 1 ) * hSpacing ) / nx if 0 > hStep : raise AttributeError('partition: h-step=%f' % hStep ) sumy = sum ( heights ) / vSize hy = [ h*vSize/sum(heights) for h in reversed ( heights ) ] hposl = 0 hposr = left_margin + hStep hfactor = hposr - hposl hmarl = left_margin / hfactor hmarr = 0.0 del canvas.pads pads = {} ix = 0 for iy , height in enumerate ( hy ) : if 0 == iy : vposd = 0.0 vposu = bottom_margin + height vfactor = vposu - vposd vmard = bottom_margin / vfactor vmaru = 0.0 elif ny == iy + 1 : vposd = vposu + vSpacing vposu = vposd + height + top_margin vfactor = vposu - vposd vmard = 0.0 vmaru = top_margin / vfactor else : vposd = vposu + vSpacing vposu = vposd + height vfactor = vposu - vposd vmard = 0.0 vmaru = 0.0 canvas.cd ( 0 ) pname = 'glPad_%s_%d_%d' % ( canvas.GetName() , ix , iy ) groot = ROOT.ROOT.GetROOT() pad = groot.FindObject ( pname ) if pad : del pad pad = ROOT.TPad ( pname , '' , hposl , vposd , hposr , vposu ) logger.verbose ( ' Create pad[%d,%d]=(%f,%f,%f,%f),[%f,%f,%f,%f] %s ' % ( ix , iy , hposl , vposd , hposr , vposu , hmarl , hmarr , vmard , vmaru , pad.GetName() ) ) pad.SetLeftMargin ( hmarl ) pad.SetRightMargin ( hmarr ) pad.SetBottomMargin ( vmard ) pad.SetTopMargin ( vmaru ) pad.SetFrameBorderMode ( 0 ) pad.SetBorderMode ( 0 ) pad.SetBorderSize ( 0 ) ROOT.SetOwnership ( pad , True ) pads[ (0,iy) ] = pad ## fill pads structure for iy in reversed ( range ( ny ) ) : key = 0 , iy canvas.pads [ key ] = pads [ key ] return canvas.pads
[ "def canvas_partition ( canvas , \n nx ,\n ny ,\n left_margin = margin_left , \n right_margin = margin_right , \n bottom_margin = margin_bottom , \n top_margin = margin_right ,\n hSpacing = 0.0 ,\n vSpacing = 0.0 ) :\n\n if not isinstance ( nx , int ) or nx<= 0 :\n raise AttributeError('partition: invalid nx=%s' % nx )\n if not isinstance ( ny , int ) or ny<= 0 :\n raise AttributeError('partition: invalid ny=%s' % ny )\n\n ## get the window size\n wsx = abs ( canvas.GetWindowWidth () ) \n wsy = abs ( canvas.GetWindowHeight () ) \n\n #\n ## if parameters given in the absolute units, convert them into relative coordinates\n #\n \n if not 0 < left_margin < 1 : left_margin = abs ( left_margin ) / wsx\n if not 0 < right_margin < 1 : right_margin = abs ( right_margin ) / wsx\n if not 0 < bottom_margin < 1 : bottom_margin = abs ( bottom_margin ) / wsy\n if not 0 < top_margin < 1 : top_margin = abs ( top_margin ) / wsy \n if not 0 < vSpacing < 1 : vSpacing = abs ( vSpacing ) / wsy\n if not 0 < hSpacing < 1 : hSpacing = abs ( hSpacing ) / wsx\n\n #\n ## check consistency \n # \n if 1 <= left_margin :\n raise AttributeError('partition: invalid left margin=%f' % left_margin )\n if 1 <= right_margin :\n raise AttributeError('partition: invalid right margin=%f' % right_margin )\n if 1 <= bottom_margin :\n raise AttributeError('partition: invalid bottom margin=%f' % bottom_margin )\n if 1 <= top_margin :\n raise AttributeError('partition: invalid top margin=%f' % top_margin )\n\n ## delete the pad dictionary \n del canvas.pads \n \n ## make new empty dictionary \n pads = {} \n \n vStep = ( 1.0 - bottom_margin - top_margin - (ny-1) * vSpacing ) / ny\n if 0 > vStep : raise AttributeError('partition: v-step=%f' % vStep )\n \n hStep = ( 1.0 - left_margin - right_margin - (nx-1) * hSpacing ) / nx \n if 0 > hStep : raise AttributeError('partition: h-step=%f' % hStep )\n\n hposr, hposl, hmarr, hmarl, hfactor = 0.,0.,0.,0.,0.\n vposr, vposd, vmard, vmaru, vfactor = 0.,0.,0.,0.,0.\n \n for ix in range ( nx ) :\n \n if 0 == ix : \n hposl = 0\n hposr = left_margin + hStep\n hfactor = hposr - hposl\n hmarl = left_margin / hfactor\n hmarr = 0.0 \n elif nx == ix + 1 :\n hposl = hposr + hSpacing \n hposr = hposl + hStep + right_margin\n hfactor = hposr - hposl \n hmarl = 0.0\n hmarr = right_margin / hfactor \n else : \n hposl = hposr + hSpacing\n hposr = hposl + hStep\n hfactor = hposr - hposl\n hmarl = 0.0\n hmarr = 0.0\n\n for iy in range(ny) :\n if 0 == iy : \n vposd = 0.0\n vposu = bottom_margin + vStep\n vfactor = vposu - vposd\n vmard = bottom_margin / vfactor\n vmaru = 0.0 \n elif ny == iy + 1 : \n vposd = vposu + vSpacing\n vposu = vposd + vStep + top_margin\n vfactor = vposu - vposd;\n vmard = 0.0\n vmaru = top_margin / vfactor \n else :\n vposd = vposu + vSpacing\n vposu = vposd + vStep\n vfactor = vposu - vposd\n vmard = 0.0\n vmaru = 0.0\n\n canvas.cd(0)\n pname = 'glPad_%s_%d_%d' % ( canvas.GetName() , ix , iy )\n groot = ROOT.ROOT.GetROOT()\n pad = groot.FindObject ( pname )\n if pad : del pad\n pad = ROOT.TPad ( pname , '' , hposl , vposd , hposr , vposu )\n\n logger.verbose ( ' Create pad[%d,%d]=(%f,%f,%f,%f),[%f,%f,%f,%f] %s ' % (\n ix , iy ,\n hposl , vposd , hposr , vposu , \n hmarl , hmarr , vmard , vmaru , pad.GetName() ) ) \n \n pad.SetLeftMargin ( hmarl )\n pad.SetRightMargin ( hmarr )\n pad.SetBottomMargin ( vmard )\n pad.SetTopMargin ( vmaru )\n \n pad.SetFrameBorderMode ( 0 )\n pad.SetBorderMode ( 0 )\n pad.SetBorderSize ( 0 )\n\n ROOT.SetOwnership ( pad , True )\n \n if not hasattr ( canvas , 'pads' ) : canvas.pads = {}\n pads[ ( ix , iy ) ] = pad\n\n ## fill pads structure \n for iy in reversed ( range ( ny ) ) : \n for ix in range ( nx ) :\n key = ix , iy \n canvas.pads [ key ] = pads[ key ]\n \n return canvas.pads", "def split(head, x, y, dx, dy):\n covered_area = sum(head)\n if dx >= dy:\n width = covered_area / dy\n return (x, y, width, dy), (x + width, y, dx - width, dy)\n else:\n height = covered_area / dx\n return (x, y, dx, height), (x, y + height, dx, dy - height)", "def split(self):\n # split based on flow direction\n top = [p for p in self.panels if p.gamma<=0]\n bot = [p for p in self.panels if p.gamma>=0]\n return PanelArray(top),PanelArray(bot[::-1])", "def vertical_divider():\n divider = QtGui.QFrame()\n divider.setFrameShape(QtGui.QFrame.VLine)\n divider.setFrameShadow(QtGui.QFrame.Sunken)\n return divider", "def split_vi(x, y=None, ignore_x=[0], ignore_y=[0]):\n _, _, _ , hxgy, hygx, _, _ = vi_tables(x, y, ignore_x, ignore_y)\n # false merges, false splits\n return numpy.array([hygx.sum(), hxgy.sum()])", "def VerticalDivider():\r\n divider = QFrame()\r\n divider.setFrameShape(QFrame.VLine)\r\n divider.setFrameShadow(QFrame.Raised)\r\n return divider", "def createVerticalStripesList(self, count, maxWidth):\n width = self.matrix.shape[0]\n stripes = []\n\n for i in range(count):\n step = int(width / count)\n left = randint(i * step, (i + 1) * step - maxWidth)\n stripes.append(VerticalStripe(left, left + randint(1, maxWidth)))\n\n return stripes", "def cube_points(c,wid):\r\n p = []\r\n # bottom\r\n p.append([c[0]-wid,c[1]-wid,c[2]-wid])\r\n p.append([c[0]-wid,c[1]+wid,c[2]-wid])\r\n p.append([c[0]+wid,c[1]+wid,c[2]-wid])\r\n p.append([c[0]+wid,c[1]-wid,c[2]-wid])\r\n p.append([c[0]-wid,c[1]-wid,c[2]-wid]) #same as first to close plot\r\n \r\n # top\r\n p.append([c[0]-wid,c[1]-wid,c[2]+wid])\r\n p.append([c[0]-wid,c[1]+wid,c[2]+wid])\r\n p.append([c[0]+wid,c[1]+wid,c[2]+wid])\r\n p.append([c[0]+wid,c[1]-wid,c[2]+wid])\r\n p.append([c[0]-wid,c[1]-wid,c[2]+wid]) #same as first to close plot\r\n \r\n # vertical sides\r\n p.append([c[0]-wid,c[1]-wid,c[2]+wid])\r\n p.append([c[0]-wid,c[1]+wid,c[2]+wid])\r\n p.append([c[0]-wid,c[1]+wid,c[2]-wid])\r\n p.append([c[0]+wid,c[1]+wid,c[2]-wid])\r\n p.append([c[0]+wid,c[1]+wid,c[2]+wid])\r\n p.append([c[0]+wid,c[1]-wid,c[2]+wid])\r\n p.append([c[0]+wid,c[1]-wid,c[2]-wid])\r\n \r\n return array(p).T", "def MsesSplit(x, y):\n #Get index of leading edge on upper surface\n iLE = FindLE(x)\n #Split upper and lower surface, reverse order upper surface\n up = y[iLE::-1]\n lo = y[iLE+1:]\n return up, lo", "def panelize(x,y):\n if len(x)<2: # check input lengths\n raise ValueError(\"point arrays must have len>1\")\n if len(x)!=len(y): # check input lengths\n raise ValueError(\"x and y must be same length\")\n\n panels = [Panel(x[i], y[i], x[i+1], y[i+1]) for i in range(len(x)-1)]\n return PanelArray(panels)", "def draw_vertical(img, x, y_min, y_max, thickness=3, color=[255, 0, 0], copy_=True):\n new = img\n if copy_:\n new = copy.copy(img)\n\n if type(x) == list:\n for k in x:\n new[y_min:y_max + 1,\n k] = np.array([color] * (y_max - y_min + 1))\n else:\n for k in range(x, min(x + thickness, Window_size - 1)):\n new[y_min:y_max + 1,\n k] = np.array([color] * (y_max - y_min + 1))\n return new", "def split_frame_vertically(frame: Image, slices: int) -> List:\n split_frames = []\n width, height = frame.size\n left = 0\n slice_size = width / slices\n count = 1\n for s in range(slices):\n if count == slices:\n right = width\n else:\n right = int(count * slice_size)\n\n crop_box = (left, 0, right, height)\n split_frames.append(frame.crop(crop_box))\n left += slice_size\n count += 1\n return split_frames", "def splitShower( binnedPoints ):\n\n emptyBin = [ (1 if len(i) > 0 else 0) for i in binnedPoints ]\n\n nBins = len(emptyBin)\n\n sectionLengths = []\n secLen = 0\n for i in emptyBin:\n if i == 1:\n if secLen < 0:\n sectionLengths.append(secLen)\n secLen = 1\n else:\n secLen += 1\n else:\n if secLen > 0:\n sectionLengths.append(secLen)\n secLen = -1\n else:\n secLen -= 1\n sectionLengths.append(secLen)\n\n return sectionLengths", "def cube_points(c, wid):\r\n p = []\r\n # bottom\r\n p.append([c[0]-wid, c[1]-wid, c[2]-wid])\r\n p.append([c[0]-wid, c[1]+wid, c[2]-wid])\r\n p.append([c[0]+wid, c[1]+wid, c[2]-wid])\r\n p.append([c[0]+wid, c[1]-wid, c[2]-wid])\r\n p.append([c[0]-wid, c[1]-wid, c[2]-wid]) #same as first to close plot\r\n \r\n # top\r\n p.append([c[0]-wid, c[1]-wid, c[2]+wid])\r\n p.append([c[0]-wid, c[1]+wid, c[2]+wid])\r\n p.append([c[0]+wid, c[1]+wid, c[2]+wid])\r\n p.append([c[0]+wid, c[1]-wid, c[2]+wid])\r\n p.append([c[0]-wid, c[1]-wid, c[2]+wid]) #same as first to close plot\r\n \r\n # vertical sides\r\n p.append([c[0]-wid, c[1]-wid, c[2]+wid])\r\n p.append([c[0]-wid, c[1]+wid, c[2]+wid])\r\n p.append([c[0]-wid, c[1]+wid, c[2]-wid])\r\n p.append([c[0]+wid, c[1]+wid, c[2]-wid])\r\n p.append([c[0]+wid, c[1]+wid, c[2]+wid])\r\n p.append([c[0]+wid, c[1]-wid, c[2]+wid])\r\n p.append([c[0]+wid, c[1]-wid, c[2]-wid])\r\n \r\n return array(p).T", "def splitHeightmap(heightmap):\n img = heightmap.img\n size = n.array(heightmap.size)\n fac = n.array([0.8,0.8])\n sub = n.floor(size[::-1]*fac).astype(int)\n sub1 = img[:sub[0], :sub[1]]\n sub2 = img[:sub[0], -sub[1]:]\n sub3 = img[-sub[0]:,-sub[1]:]\n sub4 = img[-sub[0]:,:sub[1]]\n subs = [sub1, sub2, sub3, sub4]\n hs = [Heightmap(s, sub[::-1], heightmap.step) for s in subs]\n return hs", "def split_window(self, height):\n\n raise NotImplementedError()", "def calculate_sides_from_width_height(self):\n self.side = self.width/2", "def calculate_vertical_components(self, points, prev_horizontal, prev_vertical, px_size):\n\n if prev_horizontal is None:\n # It's a new species, so it starts at the bottom\n points[1] = 0\n points[3] = 0\n else:\n points[1] = prev_horizontal['quad'].points[7]\n points[3] = prev_horizontal['quad'].points[5]\n\n if prev_vertical is None:\n # It's the first on generation, so it starts at the bottom\n points[5] = 0\n points[7] = px_size\n else:\n points[5] = prev_vertical['quad'].points[7]\n points[7] = prev_vertical['quad'].points[7] + px_size", "def DrawVertical(self, canvas):\n self.icon.Draw(self.color, canvas)\n if self.connectedPoint:\n self.connectedIcon.Draw(self.color, canvas)\n for segment in self.verticalSegments:\n segment.Draw(canvas, self.color)", "def striped_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = longitudinal + np.arange(stripes_count) * klass._stripe_spacing\n ends = longitudinal + np.arange(stripes_count) * klass._stripe_spacing + klass._stripe_length\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform partition of Canvas into 1x2 nonequal pads with no intermargins >>> canvas = ... >>> pads = canvas.pull_partition ( 4.0 ) toppad 4times larger
def canvas_pull ( canvas , ratio = 4.0 , left_margin = margin_left , right_margin = margin_right , bottom_margin = margin_bottom , top_margin = margin_top , vSpacing = 0.0 ) : return canvas_vsplit ( canvas , heights = ( 1 , ratio ) , left_margin = left_margin , right_margin = right_margin , bottom_margin = bottom_margin , top_margin = top_margin , vSpacing = vSpacing )
[ "def canvas_partition ( canvas , \n nx ,\n ny ,\n left_margin = margin_left , \n right_margin = margin_right , \n bottom_margin = margin_bottom , \n top_margin = margin_right ,\n hSpacing = 0.0 ,\n vSpacing = 0.0 ) :\n\n if not isinstance ( nx , int ) or nx<= 0 :\n raise AttributeError('partition: invalid nx=%s' % nx )\n if not isinstance ( ny , int ) or ny<= 0 :\n raise AttributeError('partition: invalid ny=%s' % ny )\n\n ## get the window size\n wsx = abs ( canvas.GetWindowWidth () ) \n wsy = abs ( canvas.GetWindowHeight () ) \n\n #\n ## if parameters given in the absolute units, convert them into relative coordinates\n #\n \n if not 0 < left_margin < 1 : left_margin = abs ( left_margin ) / wsx\n if not 0 < right_margin < 1 : right_margin = abs ( right_margin ) / wsx\n if not 0 < bottom_margin < 1 : bottom_margin = abs ( bottom_margin ) / wsy\n if not 0 < top_margin < 1 : top_margin = abs ( top_margin ) / wsy \n if not 0 < vSpacing < 1 : vSpacing = abs ( vSpacing ) / wsy\n if not 0 < hSpacing < 1 : hSpacing = abs ( hSpacing ) / wsx\n\n #\n ## check consistency \n # \n if 1 <= left_margin :\n raise AttributeError('partition: invalid left margin=%f' % left_margin )\n if 1 <= right_margin :\n raise AttributeError('partition: invalid right margin=%f' % right_margin )\n if 1 <= bottom_margin :\n raise AttributeError('partition: invalid bottom margin=%f' % bottom_margin )\n if 1 <= top_margin :\n raise AttributeError('partition: invalid top margin=%f' % top_margin )\n\n ## delete the pad dictionary \n del canvas.pads \n \n ## make new empty dictionary \n pads = {} \n \n vStep = ( 1.0 - bottom_margin - top_margin - (ny-1) * vSpacing ) / ny\n if 0 > vStep : raise AttributeError('partition: v-step=%f' % vStep )\n \n hStep = ( 1.0 - left_margin - right_margin - (nx-1) * hSpacing ) / nx \n if 0 > hStep : raise AttributeError('partition: h-step=%f' % hStep )\n\n hposr, hposl, hmarr, hmarl, hfactor = 0.,0.,0.,0.,0.\n vposr, vposd, vmard, vmaru, vfactor = 0.,0.,0.,0.,0.\n \n for ix in range ( nx ) :\n \n if 0 == ix : \n hposl = 0\n hposr = left_margin + hStep\n hfactor = hposr - hposl\n hmarl = left_margin / hfactor\n hmarr = 0.0 \n elif nx == ix + 1 :\n hposl = hposr + hSpacing \n hposr = hposl + hStep + right_margin\n hfactor = hposr - hposl \n hmarl = 0.0\n hmarr = right_margin / hfactor \n else : \n hposl = hposr + hSpacing\n hposr = hposl + hStep\n hfactor = hposr - hposl\n hmarl = 0.0\n hmarr = 0.0\n\n for iy in range(ny) :\n if 0 == iy : \n vposd = 0.0\n vposu = bottom_margin + vStep\n vfactor = vposu - vposd\n vmard = bottom_margin / vfactor\n vmaru = 0.0 \n elif ny == iy + 1 : \n vposd = vposu + vSpacing\n vposu = vposd + vStep + top_margin\n vfactor = vposu - vposd;\n vmard = 0.0\n vmaru = top_margin / vfactor \n else :\n vposd = vposu + vSpacing\n vposu = vposd + vStep\n vfactor = vposu - vposd\n vmard = 0.0\n vmaru = 0.0\n\n canvas.cd(0)\n pname = 'glPad_%s_%d_%d' % ( canvas.GetName() , ix , iy )\n groot = ROOT.ROOT.GetROOT()\n pad = groot.FindObject ( pname )\n if pad : del pad\n pad = ROOT.TPad ( pname , '' , hposl , vposd , hposr , vposu )\n\n logger.verbose ( ' Create pad[%d,%d]=(%f,%f,%f,%f),[%f,%f,%f,%f] %s ' % (\n ix , iy ,\n hposl , vposd , hposr , vposu , \n hmarl , hmarr , vmard , vmaru , pad.GetName() ) ) \n \n pad.SetLeftMargin ( hmarl )\n pad.SetRightMargin ( hmarr )\n pad.SetBottomMargin ( vmard )\n pad.SetTopMargin ( vmaru )\n \n pad.SetFrameBorderMode ( 0 )\n pad.SetBorderMode ( 0 )\n pad.SetBorderSize ( 0 )\n\n ROOT.SetOwnership ( pad , True )\n \n if not hasattr ( canvas , 'pads' ) : canvas.pads = {}\n pads[ ( ix , iy ) ] = pad\n\n ## fill pads structure \n for iy in reversed ( range ( ny ) ) : \n for ix in range ( nx ) :\n key = ix , iy \n canvas.pads [ key ] = pads[ key ]\n \n return canvas.pads", "def partition_pixels(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def window_unpartition(windows: Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]) -> Tensor:\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x", "def _do_partition(total, maxelements, around=None, maxdz=None):\n if (around is None) != (maxdz is None):\n raise ValueError(\"Cannot define center or radius alone.\")\n\n if maxelements == 1:\n if around is not None and maxdz < abs(total - around[-maxelements]):\n return []\n else:\n return [[total]]\n res = []\n\n # get range to cover\n if around is None:\n first = 0\n last = total\n limit = None\n else:\n first = max(0, around[-maxelements] - maxdz)\n last = min(total, around[-maxelements] + maxdz)\n for x in range(first, last + 1):\n if around is not None:\n limit = maxdz - abs(x - around[-maxelements])\n for p in IntegerPartitions._do_partition(\n total - x, maxelements - 1, around, limit\n ):\n res.append([x] + p)\n return res", "def pads_adam(trim = [True,True,True], layer = 1):\n threePad = Device('cell')\n \n base = pg.straight(size = (150,250),layer=layer)\n post = pg.straight(size = (25,25),layer=layer)\n \n b1 = threePad.add_ref(base)\n b2 = threePad.add_ref(base)\n b2.movex(170)\n b3 = threePad.add_ref(base)\n b3.movex(-170)\n \n p1 = threePad.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movey(225)\n p2 = threePad.add_ref(post)\n p2.move(origin=p2.center,destination=p1.center).movex(90)\n p3 = threePad.add_ref(post)\n p3.move(origin=p3.center,destination=p1.center).movex(-90)\n \n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r1)\n r2 = pr.route_basic(b2.ports[1],p2.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r2)\n r3 = pr.route_basic(b3.ports[1],p3.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r3)\n\n OUT = pg.outline(threePad,distance = 5,precision=0.0001,layer = layer)\n\n for i in range(len(trim)):\n if trim[i] == True:\n trimpoly = pg.rectangle(size=(35,5),layer = layer)\n t1 = OUT.add_ref(trimpoly)\n t1.move(origin=t1.center,destination=(-15+90*i,365))\n OUT = pg.boolean(OUT,t1,'A-B',precision=1e-4,layer=layer)\n OUT.add_port(name = 1, midpoint=(-15,360),width=25,orientation=90) \n OUT.add_port(name = 2, midpoint=(75,360),width=25,orientation=90)\n OUT.add_port(name = 3, midpoint=(165,360),width=25,orientation=90)\n return OUT", "def periodic_pad(tensor, size):\r\n\r\n padded = tf.concat([tensor, tf.zeros([4*batch_size,cropsize,size,channels])], axis=2)\r\n padded = tf.concat([tf.zeros([4*batch_size,cropsize,size,channels]), tensor], axis=2)\r\n\r\n padded = tf.concat([tensor, tensor[:, :, 0:size, :]], axis=1)\r\n padded = tf.concat([tensor[:, :, (cropsize-size-1):cropsize, :], tensor], axis=1)\r\n\r\n return padded", "def partition(Py_ssize_t_n, seq, pad='__no__pad__'): # real signature unknown; restored from __doc__\n pass", "def partition(total, maxelements, around=None, maxdz=None):\n if around is not None:\n return IntegerPartitions._do_partition(\n total, maxelements, tuple(around), maxdz\n )\n else:\n return IntegerPartitions._do_partition(total, maxelements)", "def split(image_path, out_name, outdir):\r\n img = Image.open(image_path)\r\n width, height = img.size\r\n upper = 0\r\n left = 0\r\n slice_size = width/4\r\n slices = 4\r\n\r\n count = 1\r\n for slice in range(slices):\r\n #if we are at the end, set the lower bound to be the bottom of the image\r\n if count == slices:\r\n right = width\r\n else:\r\n right = int(count * slice_size) \r\n\r\n bbox = (left, upper, right, height)\r\n working_slice = img.crop(bbox)\r\n left += slice_size\r\n #save the slice\r\n working_slice.save(os.path.join(outdir, out_name + \"_\" + str(count)+\".jpg\"))\r\n count +=1", "def draw_stripes(klass, lane, surface, starts, ends, lats):\n starts = np.clip(starts, 0, lane.length)\n ends = np.clip(ends, 0, lane.length)\n for k in range(len(starts)):\n if abs(starts[k] - ends[k]) > 0.5 * klass._stripe_length:\n pygame.draw.line(surface, surface.WHITE,\n (surface.vec2pix(lane.position(starts[k], lats[k]))),\n (surface.vec2pix(lane.position(ends[k], lats[k]))),\n max(surface.pix(klass.STRIPE_WIDTH), 1))", "def padded_slices_for_regions(markers, pad=5):\n for i, sl in enumerate(ndi.find_objects(markers)):\n region = slspad(sl, pad)\n yield region", "def draw_pads ( objects ,\n pads ,\n fontsize = 36 ,\n trim_left = False ,\n trim_right = False ) :\n\n assert isinstance ( fontsize , int ) and 5 < fontsize , 'Invalid fontsize %s [pixels] ' % fontsize\n \n for obj , pad_ in zip ( objects , pads ) : \n \n if isinstance ( pad_ , ROOT.TPad ) : pad = pad_\n else : pad = pads [ pad_ ] \n \n c = pad.GetCanvas()\n if c : c.cd(0)\n \n pad.draw ()\n pad.cd ()\n\n ## redefine the label font and size \n for attr in ( 'GetXaxis' , 'GetYaxis' , 'GetZaxis' ) :\n if not hasattr ( obj , attr ) : continue\n \n axis = getattr ( obj , attr )()\n if not axis : continue\n \n fnp = axis.GetLabelFont ()\n fn , prec = divmod ( fnp , 10 ) \n if 3 != prec :\n ## redefine label font \n fnp = fn * 10 + 3\n axis.SetLabelFont ( fnp )\n\n ## redefine label size \n axis.SetLabelSize ( fontsize )\n\n if ( trim_left or trim_right ) and hasattr ( obj , 'GetXaxis' ) :\n \n axis = obj.GetXaxis()\n xmin = axis.GetXmin()\n xmax = axis.GetXmax()\n delta = xmax - xmin\n \n if trim_left and isinstance ( trim_left , float ) :\n xmin += trim_left * delta\n elif trim_left :\n xmin += 0.001 * delta\n \n if trim_right and isinstance ( trim_right , float ) :\n xmax -= trim_right * delta\n elif trim_right :\n xmax -= 0.001 * delta \n \n axis.SetLimits ( xmin , xmax )\n\n ## draw object on the pad \n obj.draw ()\n \n if c : c.cd(0)", "def pad_basic(base_size=(200,200), port_size =10, taper_length=100, layer=1):\n P = Device('pad')\n \n base = pg.rectangle(size=base_size)\n taper = pg.taper(length=taper_length,width1=base_size[0],width2=port_size)\n taper.rotate(90)\n taper.move(destination=(base_size[0]/2,base_size[1]))\n \n P.add_ref([base,taper])\n P.flatten(single_layer=layer)\n P.add_port(name=1,midpoint=(base_size[0]/2,base_size[1]+taper_length),orientation=90,width=port_size)\n\n return P", "def partition_d1(start_value, end_value, partition_count):\n start_x = start_value\n dx = (end_value - start_value) / partition_count\n\n partitions = []\n for partition_i in range(1, partition_count + 1):\n if partition_i == partition_count:\n partitions.append((start_x, end_value))\n else:\n partitions.append((start_x, start_x + dx))\n\n start_x += dx\n return partitions", "def pads_adam_fill(style = 'right',layer = 1):\n pad_cover = Device('pad_cover')\n base = pg.straight(size = (140,240),layer=layer)\n post = pg.straight(size = (20,50),layer=layer)\n \n \n \n if style =='center':\n b1 = pad_cover.add_ref(base)\n \n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movey(225)\n \n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n if style == 'right': \n b1 = pad_cover.add_ref(base)\n b1.movex(170)\n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movex(90).movey(225)\n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n if style =='left': \n b1 = pad_cover.add_ref(base)\n b1.movex(-170)\n\n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movex(-90).movey(225)\n\n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n\n# OUT = pg.outline(pad_cover,distance = 5,precision=0.0001,layer = layer)\n pad_cover.add_port(name=1,port=p1.ports[1])\n return pad_cover", "def striped_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = longitudinal + np.arange(stripes_count) * klass._stripe_spacing\n ends = longitudinal + np.arange(stripes_count) * klass._stripe_spacing + klass._stripe_length\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)", "def _divide(self, size, board):\n p_size = size // 2\n topleft, topright, bottomleft, bottomright = [], [], [], []\n for i in range(p_size):\n j = i + p_size\n topleft += board[size*i:size*i+p_size]\n topright += board[size*i+p_size:size*(i+1)]\n bottomleft += board[size*j:size*j+p_size]\n bottomright += board[size*j+p_size:size*(j+1)]\n\n return topleft, topright, bottomleft, bottomright", "def zeropad(self, padwidth, padheight):\n assert isinstance(padwidth, int) and isinstance(padheight, int) \n if not self.isloaded():\n self.shape(shape=(self.height()+2*padheight, self.width()+2*padwidth)) # manually set shape to avoid preview \n self._ffmpeg = self._ffmpeg.filter('pad', 'iw+%d' % (2*padwidth), 'ih+%d' % (2*padheight), '%d'%padwidth, '%d'%padheight)\n elif padwidth > 0 or padheight > 0:\n self.array( np.pad(self.array(), ((0,0), (padheight,padheight), (padwidth,padwidth), (0,0)), mode='constant'), copy=False) # this is very expensive, since np.pad() must copy (once in np.pad >=1.17) \n return self", "def window_partition(x: Tensor, window_size: int) -> tuple[Tensor, tuple[int, int]]:\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)", "def __draw_grid(self):\n MARGIN = self.MARGIN\n for i in range(4):\n x0 = (4-i) * MARGIN + MARGIN\n y0 = i * MARGIN\n x1 = 160-(4-i)*MARGIN + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(3-i, 5+i+1):\n x0 = j * MARGIN + MARGIN\n y0 = (i+1) * MARGIN\n x1 = j * MARGIN + MARGIN\n y1 = 80\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(4, 4+9):\n x0 = 0 + MARGIN\n y0 = i * MARGIN\n x1 = 160 + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(9):\n x0 = i * MARGIN + MARGIN\n y0 = 80\n x1 = i * MARGIN + MARGIN\n y1 = 80 + MARGIN*8\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(3):\n x0 = (i+1) * MARGIN + MARGIN\n y0 = (i+13)* MARGIN\n x1 = 160-(i+1)*MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(7-i, i, -1):\n x0 = j * MARGIN + MARGIN\n y0 = 80 + MARGIN*8\n x1 = j * MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw sequence of object on sequence of pads, the label font size is adjusted to be uniform (in pixels) >>> pads = ... >>> frames = ... >>> draw_pads ( frames , pads , fontsize = 25 )
def draw_pads ( objects , pads , fontsize = 36 , trim_left = False , trim_right = False ) : assert isinstance ( fontsize , int ) and 5 < fontsize , 'Invalid fontsize %s [pixels] ' % fontsize for obj , pad_ in zip ( objects , pads ) : if isinstance ( pad_ , ROOT.TPad ) : pad = pad_ else : pad = pads [ pad_ ] c = pad.GetCanvas() if c : c.cd(0) pad.draw () pad.cd () ## redefine the label font and size for attr in ( 'GetXaxis' , 'GetYaxis' , 'GetZaxis' ) : if not hasattr ( obj , attr ) : continue axis = getattr ( obj , attr )() if not axis : continue fnp = axis.GetLabelFont () fn , prec = divmod ( fnp , 10 ) if 3 != prec : ## redefine label font fnp = fn * 10 + 3 axis.SetLabelFont ( fnp ) ## redefine label size axis.SetLabelSize ( fontsize ) if ( trim_left or trim_right ) and hasattr ( obj , 'GetXaxis' ) : axis = obj.GetXaxis() xmin = axis.GetXmin() xmax = axis.GetXmax() delta = xmax - xmin if trim_left and isinstance ( trim_left , float ) : xmin += trim_left * delta elif trim_left : xmin += 0.001 * delta if trim_right and isinstance ( trim_right , float ) : xmax -= trim_right * delta elif trim_right : xmax -= 0.001 * delta axis.SetLimits ( xmin , xmax ) ## draw object on the pad obj.draw () if c : c.cd(0)
[ "def SetupPad(): # -> TPad\n yplot = 0.65\n yratio = 0.33\n y3 = 0.99\n y2 = y3-yplot\n y1 = y2-yratio\n x1 = 0.01\n x2 = 0.99\n\n pad = ROOT.TPad(\"pad\", \"plot pad\", x1, y1, x2, y3)\n pad.SetTopMargin(0.05)\n pad.SetBottomMargin(0.12)\n pad.SetLeftMargin(0.14)\n pad.SetRightMargin(0.05)\n\n return pad", "def draw_frame(df, t, dpi=100, fps=20, display_num=False, display_time=False, show_players=True,\n highlight_color=None, highlight_player=None, shadow_player=None, text_color='white', flip=False, **anim_args):\n fig, ax = draw_pitch(dpi=dpi)\n\n dfFrame = get_frame(df, t, fps=fps)\n \n if show_players:\n for pid in dfFrame.index:\n if pid==0:\n #se for bola\n try:\n z = dfFrame.loc[pid]['z']\n except:\n z = 0\n size = 1.2+z\n lw = 0.9\n color='black'\n edge='white'\n zorder = 100\n else:\n #se for jogador\n size = 3\n lw = 2\n edge = dfFrame.loc[pid]['edgecolor']\n\n if pid == highlight_player:\n color = highlight_color\n else:\n color = dfFrame.loc[pid]['bgcolor']\n if dfFrame.loc[pid]['team']=='attack':\n zorder = 21\n else:\n zorder = 20\n\n ax.add_artist(Ellipse((dfFrame.loc[pid]['x'],\n dfFrame.loc[pid]['y']),\n size/X_SIZE*100, size/Y_SIZE*100,\n edgecolor=edge,\n linewidth=lw,\n facecolor=color,\n alpha=0.8,\n zorder=zorder))\n\n try:\n s = str(int(dfFrame.loc[pid]['player_num']))\n except ValueError:\n s = ''\n text = plt.text(dfFrame.loc[pid]['x'],dfFrame.loc[pid]['y'],s,\n horizontalalignment='center', verticalalignment='center',\n fontsize=8, color=text_color, zorder=22, alpha=0.8)\n\n text.set_path_effects([path_effects.Stroke(linewidth=1, foreground=text_color, alpha=0.8),\n path_effects.Normal()])\n \n return fig, ax, dfFrame", "def pads_adam_fill(style = 'right',layer = 1):\n pad_cover = Device('pad_cover')\n base = pg.straight(size = (140,240),layer=layer)\n post = pg.straight(size = (20,50),layer=layer)\n \n \n \n if style =='center':\n b1 = pad_cover.add_ref(base)\n \n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movey(225)\n \n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n if style == 'right': \n b1 = pad_cover.add_ref(base)\n b1.movex(170)\n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movex(90).movey(225)\n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n if style =='left': \n b1 = pad_cover.add_ref(base)\n b1.movex(-170)\n\n p1 = pad_cover.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movex(-90).movey(225)\n\n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 20,layer=layer)\n pad_cover.add_ref(r1)\n\n# OUT = pg.outline(pad_cover,distance = 5,precision=0.0001,layer = layer)\n pad_cover.add_port(name=1,port=p1.ports[1])\n return pad_cover", "def __init__(self, parent=None, id=-1, title='Unconnected Pypad Client', \r\n position=(50, 50), size=(700, 600), \\\r\n titled='Pypad Drawing', positiond=(750, 50), sized=(500, 600)):\r\n self.t = PypadGuiText(parent, id, title, position, size) \r\n self.d = PypadGuiDrawing(parent, id, titled, positiond, sized)\r\n self.t.Show()\r\n self.d.Show()", "def draw_frames():\n win.fill(WHITE)\n pygame.draw.rect(win, BLACK, (RECTS_POS[0][0], RECTS_POS[0][1],\n RECT_SIDE, RECT_SIDE), 1)\n pygame.draw.rect(win, BLACK, (RECTS_POS[1][0], RECTS_POS[1][1],\n RECT_SIDE, RECT_SIDE), 1)\n pygame.draw.rect(win, BLACK, (RECTS_POS[2][0], RECTS_POS[2][1],\n RECT_SIDE, RECT_SIDE), 1)", "def pad(self, width=100, height=100):\n self.width = width\n self.height = height\n\n self.seq = (\n pad(img, width=self.width, height=self.height) for img in self.seq\n )\n\n return self", "def drawDucks(duckSize):\n pass #TODO drawduck ", "def pad_basic(base_size=(200,200), port_size =10, taper_length=100, layer=1):\n P = Device('pad')\n \n base = pg.rectangle(size=base_size)\n taper = pg.taper(length=taper_length,width1=base_size[0],width2=port_size)\n taper.rotate(90)\n taper.move(destination=(base_size[0]/2,base_size[1]))\n \n P.add_ref([base,taper])\n P.flatten(single_layer=layer)\n P.add_port(name=1,midpoint=(base_size[0]/2,base_size[1]+taper_length),orientation=90,width=port_size)\n\n return P", "def draw_labels(self, screen):\n font = pygame.font.SysFont('Arial', self.font_size)\n\n for i, label in enumerate(self.source_labels):\n if self.source_state == i:\n bgcol = (0, 0, 255)\n else:\n bgcol = (0, 0, 0)\n text_surface = font.render(label, True, (255, 255, 255, 255), bgcol)\n textrect = text_surface.get_rect()\n textrect.centerx = self.source_button_rects[i].x + self.source_button_width/2\n textrect.centery = self.source_button_rects[i].y + self.source_button_height/2\n\n screen.blit(text_surface, textrect)\n\n for i, label in enumerate(self.sync_labels):\n if self.sync_state == i:\n bgcol = (0, 255, 0)\n else:\n bgcol = (0, 0, 0)\n text_surface = font.render(label, True, (255, 255, 255, 255), bgcol)\n textrect = text_surface.get_rect()\n textrect.centerx = self.sync_button_rects[i].x + self.sync_button_width/2\n textrect.centery = self.sync_button_rects[i].y + self.sync_button_height/2\n\n screen.blit(text_surface, textrect)", "def zeropad(self, padwidth, padheight):\n \n assert isinstance(padwidth, int) and isinstance(padheight, int)\n super().zeropad(padwidth, padheight) \n self._tracks = {k:t.offset(dx=padwidth, dy=padheight) for (k,t) in self.tracks().items()}\n return self", "def plot_collection(self):\n frame = self.cards[0]\n pad = np.zeros((frame.shape[0], 20, frame.shape[2]), dtype=np.uint8)\n for card in self.cards[1:]:\n frame = np.append(frame, pad, axis=1)\n frame = np.append(frame, card, axis=1)\n\n im = Image.fromarray(frame)\n im.save(f\"{self.output_dir}/FrameCollection.png\")", "def pad(batch, embeddings_dict, pad_token = 'EOS'): # TODO: Don't rely on embeddings dict for EOS tokens\n\n lengths = [len(x) for x in batch]\n max_length = max(lengths)\n padded_embeddings = np.stack([pad_sequence(seq, max_length, embeddings_dict, pad_token) for seq in batch])\n batch = padded_embeddings\n\n return batch", "def pad(self,\n length: int,\n pad_id: Optional[int] = 0,\n pad_type_id: Optional[int] = 0,\n pad_token: Optional[str] = \"[PAD]\",\n direction: Optional[str] = \"right\"):\n pass", "def batch_with_dynamic_pad(self,\n images_captions_masks,\n batch_size,\n queue_capacity,\n add_summaries=False):\n enqueue_list = []\n for image, caption, mask, cls_lbl in images_captions_masks:\n enqueue_list.append([image, caption, mask, cls_lbl])\n\n image_batch, caption_batch, mask_batch, cls_lbl_batch = tf.train.batch_join(\n enqueue_list,\n batch_size=batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n name=\"batch_and_pad\")\n\n if add_summaries:\n lengths = tf.add(tf.reduce_sum(mask_batch, 1), 1)\n tf.summary.scalar(\"caption_length/batch_min\", tf.reduce_min(lengths))\n tf.summary.scalar(\"caption_length/batch_max\", tf.reduce_max(lengths))\n tf.summary.scalar(\"caption_length/batch_mean\", tf.reduce_mean(lengths))\n\n return image_batch, caption_batch, mask_batch,cls_lbl_batch", "def draw(self, players):\r\n\r\n\t\tfor line in self.board:\r\n\t\t\tprint(line)\r\n\r\n\t\tprint('Name : Space')\r\n\t\tprint('------------')\r\n\t\tfor player in players:\r\n\t\t\tif player.isPlaying():\r\n\t\t\t\tprint(player.getName() + ': ' + str(player.getSpace()))", "def pad(cls, sequences, padding, pad_len=None):\n max_len = max([len(s) for s in sequences])\n pad_len = pad_len or max_len\n assert pad_len >= max_len, 'pad_len {} must be greater or equal to the longest sequence {}'.format(pad_len, max_len)\n for i, s in enumerate(sequences):\n sequences[i] = [padding] * (pad_len - len(s)) + s\n return np.array(sequences)", "def pads_adam(trim = [True,True,True], layer = 1):\n threePad = Device('cell')\n \n base = pg.straight(size = (150,250),layer=layer)\n post = pg.straight(size = (25,25),layer=layer)\n \n b1 = threePad.add_ref(base)\n b2 = threePad.add_ref(base)\n b2.movex(170)\n b3 = threePad.add_ref(base)\n b3.movex(-170)\n \n p1 = threePad.add_ref(post)\n p1.move(origin=p1.center,destination=b1.center).movey(225)\n p2 = threePad.add_ref(post)\n p2.move(origin=p2.center,destination=p1.center).movex(90)\n p3 = threePad.add_ref(post)\n p3.move(origin=p3.center,destination=p1.center).movex(-90)\n \n r1 = pr.route_basic(b1.ports[1],p1.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r1)\n r2 = pr.route_basic(b2.ports[1],p2.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r2)\n r3 = pr.route_basic(b3.ports[1],p3.ports[2],path_type='straight',width_type='straight',num_path_pts=50, width1 = 25,layer=layer)\n threePad.add_ref(r3)\n\n OUT = pg.outline(threePad,distance = 5,precision=0.0001,layer = layer)\n\n for i in range(len(trim)):\n if trim[i] == True:\n trimpoly = pg.rectangle(size=(35,5),layer = layer)\n t1 = OUT.add_ref(trimpoly)\n t1.move(origin=t1.center,destination=(-15+90*i,365))\n OUT = pg.boolean(OUT,t1,'A-B',precision=1e-4,layer=layer)\n OUT.add_port(name = 1, midpoint=(-15,360),width=25,orientation=90) \n OUT.add_port(name = 2, midpoint=(75,360),width=25,orientation=90)\n OUT.add_port(name = 3, midpoint=(165,360),width=25,orientation=90)\n return OUT", "def concat_annotated_spacer(voc_clips_dir, voc_clips_list, spacer_wav, save_dir, save_name, label, margin):\n assert save_name.endswith('_annotations')\n \n clips = [voc_clips_dir+i for i in voc_clips_list if i.endswith('.wav') and 'background' and 'test' not in i]\n #print('processing', len(clips), 'clips')\n labels = []\n start_times = []\n stop_times = []\n clips_list = []\n start_time = 0 #first start time is 0\n durations = []\n total_duration = 0 #starting duration of the big wav is 0\n\n if spacer_wav != None:\n \n for clip in tqdm(clips):\n\n #read the filler wav\n fs, spacer = wavfile.read(spacer_wav)\n\n #get its duration\n spacer_dur = float(len(spacer)/fs)\n\n #read the voc wav\n fs, wav = wavfile.read(clip)\n\n #get its duration\n dur = float(len(wav)/fs)\n\n #update duration\n durations.append(dur + spacer_dur)\n total_duration = sum(durations)\n\n #reset start and stop times\n start_time = start_time + margin #update this below\n stop_time = total_duration - spacer_dur - margin #stop time for this clip in the big wav\n\n #add some random noise to the spacer\n noise = np.random.normal(0, 10, np.array(spacer).shape)\n spacer = spacer + noise\n spacer_noise = [int(i) for i in spacer]\n\n #update lists\n start_times.append(start_time)\n stop_times.append(stop_time)\n clips_list.extend(wav)\n clips_list.extend(spacer_noise)\n labels.append(label)\n\n #new start time is the current total duration\n start_time = total_duration\n\n #add the other label\n if label=='whistle':\n labels.append('cry')\n start_times.append('')\n stop_times.append(0)\n\n if label=='cry':\n labels.append('whistle')\n start_times.append('')\n stop_times.append(0)\n\n #compile the dataframe and write \n annotation = pd.DataFrame()\n annotation['name'] = labels\n annotation['start_seconds'] = start_times\n annotation['stop_seconds'] = stop_times\n annotation.to_csv(save_dir+save_name+'.csv', index=False)\n\n #save the audio\n wav_save_name = save_name.split('_annotations')[0]+'.wav'\n\n\n clips_list = np.array(clips_list)\n clips_list = clips_list.astype('int16')\n wavfile.write(save_dir+wav_save_name, fs, np.array(clips_list))\n print('done.')\n\n return annotation\n\n else:\n\n for clip in tqdm(clips):\n\n #read the voc wav\n fs, wav = wavfile.read(clip)\n\n #get its duration\n dur = float(len(wav)/fs)\n print('voc_dur:', dur)\n\n #update duration\n durations.append(dur)\n total_duration = sum(durations)\n\n #reset start and stop times\n start_time = start_time + margin #update this below\n stop_time = total_duration - margin #stop time for this clip in the big wav\n\n #update lists\n start_times.append(start_time)\n stop_times.append(stop_time)\n clips_list.extend(wav)\n labels.append(label)\n\n #new start time is the current total duration\n start_time = total_duration\n\n #add the other label\n if label=='whistle':\n labels.append('cry')\n start_times.append('')\n stop_times.append(0)\n\n if label=='cry':\n labels.append('whistle')\n start_times.append('')\n stop_times.append(0)\n\n #compile the dataframe and write \n annotation = pd.DataFrame()\n annotation['name'] = labels\n annotation['start_seconds'] = start_times\n annotation['stop_seconds'] = stop_times\n annotation.to_csv(save_dir+save_name+'.csv', index=False)\n\n #save the audio\n wav_save_name = save_name.split('_annotations')[0]+'.wav'\n\n print('test print')\n print('writing big wav')\n \n print('clips_list:',clips_list)\n clips_list = np.array(clips_list)\n clips_list = clips_list.astype('int16')\n print('int16 clips_list:',clips_list)\n wavfile.write(save_dir+wav_save_name, fs, np.array(clips_list))\n print('done.')\n\n return annotation", "def custom_pad(batch_observations):\n seqs = [x[0].sentence for x in batch_observations]\n lengths = torch.tensor([len(x) for x in seqs], device=\"cpu\")\n label_shape = batch_observations[0][1].shape\n maxlen = int(max(lengths))\n label_maxshape = [maxlen for x in label_shape]\n labels = [-torch.ones(*label_maxshape, device=\"cpu\") for x in seqs]\n for index, x in enumerate(batch_observations):\n length = x[1].shape[0]\n if len(label_shape) == 1:\n labels[index][:length] = x[1]\n elif len(label_shape) == 2:\n labels[index][:length, :length] = x[1]\n else:\n raise ValueError(\"Labels must be either 1D or 2D right now; got either 0D or >3D\")\n labels = torch.stack(labels)\n return seqs, labels, lengths, batch_observations" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls Pygame rect superconstructor and adds an associated type value
def __init__(self, type, x, y, width, height): super(TypedRect, self).__init__(x, y, width, height) self.type = type
[ "def create_rect():\n pass", "def __init__(self, length, width, transform):\n self.length = length\n self.width = width\n self.area = length * width\n self.transform = transform\n bottom_left = (-length/2, -width/2)\n top_left = (-length/2, width/2)\n top_right = (length/2, width/2)\n bottom_right = (length/2, -width/2)\n ext = [bottom_left, top_left, top_right, bottom_right, bottom_left]\n\n polygon = Polygon(ext)\n super(Rectangle, self).__init__(polygon, transform, \"rectangle\")", "def __init__(\n self,\n x: COORDINATE_TYPE = 0,\n y: COORDINATE_TYPE = 0, # pylint: disable=C0103\n width: COORDINATE_TYPE = 0,\n height: COORDINATE_TYPE = 0,\n ):\n if any((isinstance(x, float), isinstance(y, float), isinstance(width, float), isinstance(height, float))):\n self.coreRect = CoreRectF(x, y, width, height)\n else:\n self.coreRect = CoreRectI(x, y, width, height)", "def __init__(self, color, x, y):\n\n # Call the parent class (Sprite) constructor\n super(Block,self).__init__()\n\n # Create the image of the block of appropriate size\n # The width and height are sent as a list for the first parameter.\n self.image = pygame.Surface([block_width, block_height])\n\n # Fill the image with the appropriate color\n self.image.fill(color)\n\n # Fetch the rectangle object that has the dimensions of the image\n self.rect = self.image.get_rect()\n\n # Move the top left of the rectangle to x,y.\n # This is where our block will appear..\n self.rect.x = x\n self.rect.y = y", "def __init__(self):\n this = _coin.new_SoTextureCoordinate2()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, left, top, bottom, right):\n self.left = left # Value of the left boudnary in the rectangle\n self.top = top # Value of the top boudnary in the rectangle\n self.bottom = bottom # Value of the bottom boudnary in the rectangle\n self.right = right # Value of the right boudnary in the rectangle", "def __init__(self, img):\n super(CollidableSprite, self).__init__(img)\n self.btype = None\n self.cshape = None", "def __init__(self, box_pos, color):\r\n\r\n self._box_pos = box_pos\r\n self._color = color\r\n self._played = False", "def __init__(self):\n this = _coin.new_SoTextureCoordinateObject()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(\n self, x: float, y: float, width: float, height: float, *, padding: float = 0\n ):\n self.x = x - padding\n self.y = y - padding\n self.width = width + padding * 2\n self.height = height + padding * 2\n if self.width < 0 or self.height < 0:\n raise ValueError(f\"Rect must have width and height >= 0: {self}\")", "def __init__(self):\n this = _coin.new_SoVRMLTextureCoordinate()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, position: list):\r\n self.offset = 64\r\n super().__init__([position[0] - self.offset/2, position[1] - self.offset/2])\r\n self.image = pg.image.load('images/ball.png')\r\n self.mask = pg.mask.from_surface(self.image)\r\n self.max_speed = 20\r\n self.mass = 1\r\n self.drag = .002", "def __init__(self):\n this = _coin.new_SoTextureCoordinateBinding()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateDefault()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, position: list):\r\n self.offset = 128\r\n super().__init__([position[0] - self.offset/2, position[1] - self.offset/2])\r\n self.image = pg.image.load(\"images/goliath.png\")\r\n self.mask = pg.mask.from_surface(self.image)\r\n self.acceleration = .06\r\n self.drag = .008\r\n self.mass = .25\r\n self.max_speed = 6.5", "def __init__(self, name=None, rect=None,\n hBarWidth=0, vBarWidth=0, tileSheet=\"\", **kwargs):\n \n self.rect = rect if rect else pygame.Rect()\n \n super().__init__(name=name, rect=self.rect, **kwargs)\n \n self.hBarWidth = hBarWidth\n self.vBarWidth = vBarWidth\n \n # Set the tilesheet\n self._tileSheet = None\n self.setTileSheet(tileSheet)", "def __init__(self, name, idle):\n \n ObjectType.ObjectTypes[name] = self\n self.name = name\n #Data\n #Animations\n self.animations = {}\n self.animations['idle'] = idle\n self.width = idle.width\n self.height = idle.height", "def __init__(self,my_settings,screen,ship):\r\n super().__init__()\r\n self.screen = screen\r\n \"\"\" Create a bullet rect at (0,0) and then set correct position \"\"\"\r\n self.rect = pygame.Rect(0, 0, my_settings.bullet_width, my_settings.bullet_height) # create bullet's rect attribute\r\n self.rect.centerx = ship.rect.centerx # move the bullet accordingly with the ship\r\n #self.rect.centery = ship.rect.centery # set bullet's center to be the same as the ship's rect.center\r\n self.rect.top = ship.rect.top # set the top of the bullet's rect to match the top of the ship's rect\r\n\r\n # store the bullet's position as a decimal value\r\n self.y = float(self.rect.y)\r\n\r\n self.color = my_settings.bullet_color\r\n self.speed_factor = my_settings.bullet_speed_factor", "def __init__(self):\n this = _coin.new_SoTextureCoordinate3()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies objectspecific gravity to deltaY
def apply_grav(self): if self.deltaY == 0: self.deltaY = 1 else: self.deltaY += self.gravity
[ "def gravity(self): \n if self.change_y == 0:\n \n self.change_y = 1\n \n else:\n self.change_y +=self.grav\n \n \n if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:\n self.change_y = 0\n self.rect.y = SCREEN_HEIGHT - self.rect.height", "def calc_gravity(self):\n if self.yspeed == 0:\n self.yspeed = self.base_y_gravity\n else:\n self.yspeed += self.gravity_acceleration", "def apply_gravity(self):\n self.velocity += self.gravity", "def update_y(self) -> None:\n self.y_vel += self.gravity\n self.position.y += self.y_vel", "def gravity(self) -> None:\n #will only exert it if the player is in free fall and below terminal velocity\n if self.rect.y != self.WIN.get_height() - (2*self.radius) - 1:\n if self.vector.y < 9:\n #if above the screen it falls faster\n if self.rect.y > self.WIN.get_height():\n self.changeYVector(0.11)\n else:\n self.changeYVector(0.08)", "def gravity(vy, on_platform):\n if not on_platform:\n vy += G\n else:\n vy = 0\n return vy", "def gravity(s, obj):\n return vector(0.0, -(ft2WU(32.2))*obj.M, 0.0)", "def gravity(objects, directionX=float, magnitude=float, directionY=float, perVertex=bool, directionZ=float, attenuation=float, maxDistance=\"string\", position=\"string\", name=\"string\"):\n pass", "def _gravity(self):\n Rg = self.R * self.g\n\n # calculate gravity force\n self.u_temp[self.wet_horizontal_links] -= (\n Rg\n * self.Ch_link[self.wet_horizontal_links]\n / self.h_link[self.wet_horizontal_links]\n * self.S[self.wet_horizontal_links]\n * self.dt_local\n )\n self.v_temp[self.wet_vertical_links] -= (\n Rg\n * self.Ch_link[self.wet_vertical_links]\n / self.h_link[self.wet_vertical_links]\n * self.S[self.wet_vertical_links]\n * self.dt_local\n )\n self.update_boundary_conditions(\n u=self.u_temp,\n v=self.v_temp,\n u_node=self.u_node_temp,\n v_node=self.v_node_temp,\n )", "def updateGravities( self ):\n\n # return immediately if no gravity between bodies\n if self.gravconst == 0:\n return\n\n # start with the acceleration of gravity\n xaccels = [ self.gravaccel * cos( self.gravangle ) ] * len( self.bodies )\n yaccels = [ self.gravaccel * sin( self.gravangle ) ] * len( self.bodies )\n\n # compute the acceleration between each pair of bodies\n for i in range( len( self.bodies ) - 1 ):\n for j in range( i + 1, len( self.bodies ) ):\n xposi = self.bodies[i].getX()\n yposi = self.bodies[i].getY()\n xposj = self.bodies[j].getX()\n yposj = self.bodies[j].getY()\n radi = self.bodies[i].getRad()\n radj = self.bodies[j].getRad()\n distsquared = ( xposi - xposj ) ** 2 + ( yposi - yposj ) ** 2\n if distsquared != 0:\n accel = self.gravconst * radi**2 * radj**2 / (\n distsquared )\n xaccelchange = accel * ( xposj - xposi ) / sqrt(\n distsquared )\n yaccelchange = accel * ( yposj - yposi ) / sqrt(\n distsquared )\n xaccels[i] += xaccelchange\n yaccels[i] += yaccelchange\n xaccels[j] -= xaccelchange\n yaccels[j] -= yaccelchange\n\n # modify acceleration for each pair\n for i in range( len( self.bodies ) ):\n self.bodies[i].setAccel( xaccels[i], yaccels[i] )\n return", "def setGravity( self, direction, accel ):\n self.gravangle = direction\n self.gravaccel = accel\n for body in self.bodies:\n body.setGravity( direction, accel )\n return", "def update_position_dead(self):\n self.rect.y += self.y_velocity * self.dtime", "def gravity(self, star):\n dis = self.distance(star)\n fx = Constant.GRAVITY_CONST * self.mass * star.mass / dis ** 3 * (star.pos.x - self.pos.x)\n fy = Constant.GRAVITY_CONST * self.mass * star.mass / dis ** 3 * (star.pos.y - self.pos.y)\n return Vector(fx, fy)", "def physics_update(self) -> None:\n if not self.stopped:\n self.velocity += helpers.V(magnitude=self.gravity_power, angle=180)", "def force_of_gravity(self):\n return self.mass * self.atmospheric_conditions['gravity']", "def _update(self, dt):\n # Walking vvv\n\n speed = FLYING_SPEED if self.flying else WALKING_SPEED\n d = dt * speed # distance covered in that CPU tick\n dx, dy, dz = self.get_motion_vector()\n # New position in the space - prior to checking gravity method\n dx, dy, dz = dx * d, dy * d, dz * d\n\n # Gravity vvv\n\n if not self.flying:\n # Update vertical speed: if you're falling - speed up until terminal\n # velocity - because ... that's what happens when you fall\n # If you're jumping - slow down until you begin to actually fall\n self.dy -= dt * GRAVITY\n self.dy = max(self.dy, - TERMINAL_VELOCITY)\n dy += self.dy * dt\n\n # Object collisions\n x, y, z = self.position\n x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)\n self.position = (x, y, z)", "def apply_gravity(self, g):\n for element in self._elements:\n element.calculate_gravity(g)", "def physics_update(self, delta_time):\n\n\t\tself.position += delta_time*self.velocity*Vector.unit_from_angle(self.direction)\n\t\tself.velocity += delta_time*self.acceleration\n\n\t\t# Deal with floating-point instability\n\t\tif abs(self.velocity) < 0.9:\n\t\t\tself.velocity = 0\n\n\t\tif math.fabs(self.velocity) > MAX_VELOCITY:\n\t\t\tself.velocity *= MAX_VELOCITY/(math.fabs(self.velocity))", "def gravity(transforms: Transform, _grav: Gravity):\n for transform in iter(transforms):\n transform.x += Gravity.VECTOR.x\n transform.y += Gravity.VECTOR.y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves all rect members on x axis by delta value
def moveX(self): for rect in self.rects: rect.x += self.deltaX #self.drawing_rect.x += self.deltaX
[ "def deltaX(self, xDelta):\n self.x += xDelta\n self.rect.x = round(self.x)", "def add_x(self, x):\n self._xy[0] += x\n self.rect.x = self._xy[0]", "def move_x(self, val: int) -> None:\n self.x_pos += val", "def move(self, delta_x=0, delta_y=0):\n self.x += Decimal(str(delta_x))\n self.y += Decimal(str(delta_y))", "def move_x(self, amount=40):\n self.x_coor += amount\n self.pos = [self.x_coor - self.node_size / 2, self.y_coor - self.node_size / 2]", "def setX(self, x):\n self.x = float(x)\n self.rect.x = round(x)", "def move_left(self):\n self.x -= 1", "def move(self, x_offset, y_offset):\n self.rect = pygame.Rect(\n (self.rect.x + x_offset, self.rect.y + y_offset), (self.rect.width, self.rect.height))", "def drag(self,x,y):\n (diffx, diffy)=self.diff\n self.x=x+diffx\n self.y=y+diffy", "def update(self):\n self.rect.topleft = (self.x * BOX_LENGTH, self.y * BOX_LENGTH)", "def _fix_rect(self):\n # Offset logic is to always work with copies, to avoid\n # flying effects from multiple calls to _fix_rect\n # See footwork in draw\n if hasattr(self.rect, 'collidepoint'):\n self.rect = self.rect.move(self.scene.OFFSET)\n else:\n self.rect = [x.move(self.scene.OFFSET) for x in self.rect]", "def move(self):\n\n\t\t# Ensure final move speed is at most moveSpeed\n\t\t\n\t\tself.rect = self.rect.move([self.moving[\"x\"] * self.moveSpeed, self.moving[\"y\"] * self.moveSpeed])", "def bounce_x(self):\n self.x_move *= -1\n self.move_speed *= .95", "def update(self):\n\n self.x += (self.settings.alien_x_speed * self.settings.alien_direction)\n self.rect.x = self.x", "def update(self):\n # Move pillar to the left.\n self.rect.x -= self.settings.ground_animation_speed", "def move_x(self, amount):\n logger.debug('Point {} moved by {} in the x direction'.format(self.to_repr(), amount))\n\n self.xcoord += amount", "def move_rects(rect_objs):\n\n for shape in rect_objs:\n rect = shape['rect']\n velocity = shape['velocity']\n move_rect(rect, velocity)", "def update_x_y(self):\n self.x = self.pos % 15\n self.y = self.pos // 15", "def update(self):\n #update the ship's x value not the rect\n if self.moving_right and self.rect.right< self.screen_rect.right:\n self.x += self.setting.ship_speed\n if self.moving_left and self.rect.left>0:\n self.x -= self.setting.ship_speed\n\n # update the rect object from self.x\n self.rect.x = self.x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves all rect members on y axis by delta value
def moveY(self): for rect in self.rects: rect.y += self.deltaY #self.drawing_rect.y += self.deltaY
[ "def deltaY(self, yDelta):\n self.y += yDelta\n self.rect.y = round(self.y)", "def move_y(self, val: int) -> None:\n self.y_pos += val", "def add_y(self, y):\n self._xy[1] += y\n self.rect.y = self._xy[1]", "def move_y(self, amount=40):\n self.y_coor += amount\n self.pos = [self.x_coor - self.node_size / 2, self.y_coor - self.node_size / 2]", "def stopY(self):\r\n self.deltaY = 0", "def moveDown(self):\r\n if (self.index < len(self.positions) - 1):\r\n self.rect.y = self.positions[self.index + 1] #Opposite of above. Get the next highest array index and set it as the Y to make the arrow go down\r\n self.index += 1", "def updateY(self):\n index=0\n for r in self.roots:\n index=self.roots.index(r)\n # if this is the first root node in Canvas \n if index>0:\n r.y =self.roots[index-1].y+OFFSET*(self.roots[index-1].height)\n index +=1 \n self.do_updateY(r)", "def moveX(self):\r\n for rect in self.rects:\r\n rect.x += self.deltaX\r\n #self.drawing_rect.x += self.deltaX\r", "def update(self):\n self.y += (self.settings.alien_speed * self.settings.alien_direction)\n self.rect.y = self.y", "def move_y(self, amount):\n logger.debug('Point {} moved by {} in the y direction'.format(self.to_repr(), amount))\n\n self.ycoord += amount", "def update_y(self) -> None:\n self.y_vel += self.gravity\n self.position.y += self.y_vel", "def update(self):\n # Bullet position update\"\"\"\n self.y -= self.settings.bullet_speed\n # Updating the position of the bullet rectangle\"\"\"\n self.rect.y = self.y", "def flipy(self):\n for v in self.vlist:\n # flip the y-axis\n v.y = -v.y", "def move_rects(rect_objs):\n\n for shape in rect_objs:\n rect = shape['rect']\n velocity = shape['velocity']\n move_rect(rect, velocity)", "def ball_reverse(self):\n self.__dy = -self.__dy", "def move(self):\n\n\t\t# Ensure final move speed is at most moveSpeed\n\t\t\n\t\tself.rect = self.rect.move([self.moving[\"x\"] * self.moveSpeed, self.moving[\"y\"] * self.moveSpeed])", "def update_position_dead(self):\n self.rect.y += self.y_velocity * self.dtime", "def moveUp(self):\r\n if (self.index > 0):\r\n self.rect.y = self.positions[self.index - 1] #Decrease the array index to get the next lowest value - moving the arrow up\r\n self.index -= 1", "def move_waypoint_y(self, val: int) -> None:\n self.waypoint_y += val" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stops vertical movement of all rect members
def stopY(self): self.deltaY = 0
[ "def exit(self):\r\n self.rect.x = 1000\r\n self.rect.y = 750", "def robot_down(self):\t\r\n\t self.y = self.y + 1\r\n\t if self.y > 9:\r\n\t\t self.y = 9", "def moveY(self):\r\n for rect in self.rects:\r\n rect.y += self.deltaY\r\n #self.drawing_rect.y += self.deltaY\r", "def updateEraseRect(self):\n\t\treturn", "def draw_stop_screen(self):\n self.screen.fill(Color.BLACK)\n self.display_message(\"stop \" + self.state.last_enemy_move_direction)", "def moveDown(self):\r\n if (self.index < len(self.positions) - 1):\r\n self.rect.y = self.positions[self.index + 1] #Opposite of above. Get the next highest array index and set it as the Y to make the arrow go down\r\n self.index += 1", "def stopBackgroundMoving(self):\n self.moving = False", "def moveDown(self):\n if self.vel.stopped():\n self.vel.y += self.speed", "def stopScrolling(self):\n pass", "def move_down(self):\n for block in self.blocks:\n block.move_down()", "def stop(self):\n\n self.moving_state = self.STOP\n\n self.motor_left1.throttle = None\n self.motor_left2.throttle = None\n self.motor_right1.throttle = None\n self.motor_right2.throttle = None", "def updateEraseRect(self):\n\t\tx0, y0, x1, y1 = self.getMinMaxXY()\n\t\tx0 += self._xpos\n\t\ty0 += self._ypos\n\t\tx1 += self._xpos\n\t\ty1 += self._ypos\n\t\tself.eraseRect = (x0, y0, x1, y1)\n\t\tself.UpdateOriginalPoints()\n\t\tself.CalculateBoundingBox()", "def move_down(self) -> None:\n if self.velocity == (0, -VELOCITY_NORM):\n return\n self.velocity = (0, VELOCITY_NORM)", "def stop_highlights():\r\n\r\n for v in sg.VEHICLES:\r\n v.stop_highlight()", "def robot_up(self):\t\r\n\t self.y = self.y - 1\r\n\t if self.y < 0:\r\n\t\t self.y = 0", "def stop(animo):\n\n animo.isWalking = False\n animo.sprite.change_x = 0\n animo.sprite.change_y = 0\n key = 'idle_' + animo.face\n change_tex_set(animo, key)", "def off_screen(self, width):\r\n for bullet in self.p1_bullet:\r\n if bullet.is_off_screen(width):\r\n bullet.alive = False\r\n for bullet in self.p2_bullet:\r\n if bullet.is_off_screen(width):\r\n bullet.alive = False", "def mover_rectilineamente_a_los_ww(self):\n for i in range(0,len(self.white_walkers)):\n self.white_walkers[i].mover_rectilineamente()", "def end_turn(self):\n for unit in self.me.units:\n unit.reset_movement()\n self.hide_unit_range()\n self.me.collect_from_cities() # maybe this should be here?" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts game ID to name using the API.
def game_id_to_name(session, game_id): # Make a query. query = f"games" payload = {'id': game_id} # Make a request. response = request_query(session, query, payload) if not response: print(f"gameIDtoName error. No response from API. Game ID: {game_id}") return None # Parse the response. try: quote = response.json() game = quote['data'][0]['name'] return game except (KeyError, TypeError, ValueError): print("gameIDtoName error. Can't parse the response. " f"Game ID: {game_id}") return None
[ "def id_to_name(player_id):\n query = \"SELECT name FROM players WHERE id=%s\"\n parameter = (player_id,)\n\n with connect_to_db() as database:\n database['cursor'].execute(query, parameter)\n player_name = database['cursor'].fetchone()[0]\n\n return player_name", "def name_for_id(id):\n\n if id in allID:\n if \"names\" in allID[id] and len(allID[id][\"names\"]) > 0:\n return allID[id][\"names\"][0]\n return \"UNKNOWN \" + str(id)", "def name_from_id(id):\n if id in EXISTING:\n return EXISTING[id]\n\n h = int(id, 16)\n i = h % len(ADJS)\n adj = ADJS[i]\n\n h = int(''.join(list(reversed(str(h)[1:]))))\n nouns = [n for n in NOUNS if n[:3] != adj[:3]] # try to filter out redundancies\n i = h % len(nouns)\n noun = nouns[i]\n\n return 'The Planet of {} {}'.format(adj, noun)", "def getChampionNameFromID(championID):\n staticdata = requests.get('https://prod.api.pvp.net/api/lol/static-data/na/v1.2/champion/'+str(championID)+'?api_key=d9f79478-95ed-4def-8060-e8945b834809')\n staticdatajson = staticdata.json()\n championName = staticdatajson['name']\n return championName", "def getNameById(id):\n return countriesById[int(id)].name", "def id_to_actor_name(id_num):\n new_names = {value: key for key, value in namesdb.items()}\n return new_names[id_num]", "def _make_game_id(row):\n game_id = u'{} {}-{}'.format(\n row['date'].strftime(\"%Y-%m-%d\"),\n row['home_team'],\n row['away_team']\n )\n return game_id", "def get_name(mosaic_id):\n return f'{Mosaic.name_prefix}{Mosaic.get_id_str(mosaic_id)}'", "def get_game_name(item_soup):\n logger.debug('Getting name')\n game_name = None\n strip_pattern = '^(Save) [0-9]*\\%* (on)+ '\n try:\n # Get the URL\n game_url = item_soup.find('a', 'summersale_dailydeal')['href']\n # Get the page content and parse it.\n game_soup = BeautifulSoup(request.urlopen(game_url))\n # Set the name\n game_name = game_soup.find('title').string\n game_name = sub(strip_pattern, '', game_name).replace('on Steam', '')\n except (TypeError, AttributeError):\n logger.debug('item_soup must not have been a BeautifulSoup item, or game_soup could not find a div with '\n 'apphub_AppName.')\n logger.debug('Returning {}'.format(game_name))\n return game_name", "def getLongName(id):", "def name(player):\n return player['name']", "def conversion_name(self) -> str:\n return self.__data[\"name\"]", "def name_from_id(self, id):\n try:\n res = self.render('image', '/images/%s' % id).name\n if res:\n return res\n except ClientException:\n pass\n raise ImageException(\"No image found for id %s\" % id)", "def make_name(name):\n return \"int_%s_%s%s\" % (impl_instance.iontype, name, self.sample_resource_md5)", "def getShortName(id):", "def get_season_from_game_id(game_id):\n if game_id[4] == '9':\n return '20' + game_id[3] + game_id[4] + '-' + str(int(game_id[3]) + 1) + '0'\n else:\n return '20' + game_id[3] + game_id[4] + '-' + game_id[3] + str(int(game_id[4]) + 1)", "def getRatName(bot, ratid):\n if (str(ratid) is not '0') and str(ratid) in savedratnames.keys():\n return savedratnames.get(ratid)['name'], savedratnames.get(ratid)['platform']\n if str(ratid) == 'None':\n return 'unknown', 'unknown'\n try:\n result = callapi(bot=bot, method='GET', uri='/rats/' + str(ratid))\n except ratlib.api.http.APIError:\n print('got Api error during api call')\n return 'unknown', 'unknown'\n try:\n data = result['data'][0]['attributes']\n name = data['name']\n platform = data['platform']\n ret = name, platform\n except:\n print('Couldn\\'t parse Ratname from api response for ratid' + str(ratid))\n ret = 'unknown', 'unknown'\n # print('returning '+str(ret)+' as name for '+ratid)\n return ret", "def game_details(id):\n game = Game.query.filter(\n Game.api_id == id).first()\n\n if not game:\n game = add_game_to_db(id)\n\n collection_api_ids = get_collection_api_ids(g.user)\n\n return render_template('game_detail.html', game=game, collection_api_ids=collection_api_ids)", "async def get_name_from_user(discord_id, *, return_player):\r\n user_doc = await get_user_document(discord_id)\r\n if not user_doc[\"osu_id\"]:\r\n return None\r\n else:\r\n if return_player:\r\n return user_doc[\"osu_id\"]\r\n else:\r\n return user_doc[\"team_name\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the enemy from the expedition
def retreat(self, enemy): self.expedition.remove(enemy)
[ "def enemy_destroyed(self):\n self.scoreboard['enemies_destroyed'] += 1", "def ignore_enemy(self, name: str) -> None:\n\n if name in self._enemies:\n self._enemies.remove(name)", "def add_enemy(self, e):\n self.enemies.append(e)", "def _dec_enemy_count(self):\n if (self.state == Editor.State.wave and\n self.wave_edit_mode == Editor.WaveEditMode.enemy_count and\n self.selected_wave is not None and\n self.selected_wave.enemy_count > 1):\n self.selected_wave.enemy_count -= 1", "def remove_hero(self, name):\n if self.name in self.heroes:\n z = index(self.name)\n self.heroes.pop(z)\n else:\n return 0", "def _remove_life(self):\n if self.__lives > 0:\n self.__screen.show_message(*GameRunner.HIT_WARN)\n self.__screen.remove_life()\n self.__lives -= 1", "def attack(self, enemy):\n\t\tkill = random.choice([True, False])\n\n\t\tif kill:\n\t\t\tenemy.die()\n\t\telse:\n\t\t\tself.die()", "def remove_agent(self, *, agent_name: str) -> None:", "def select_enemy(self, name: str) -> None:\n\n if name not in self._targets and name != self._name:\n self._enemies.append(name)", "def enemy_team(self, team_index):\n other_teams = self.teams[:]\n other_teams.remove(self.teams[team_index])\n if len(other_teams) != 1:\n raise UniverseException(\"Expecting one enemy team. Found %i.\" % len(other_teams))\n return other_teams[0]", "def remove_hero(self, name):\n hero_index = self.find_hero(name)\n if(hero_index == -999):\n return -999\n\n if(self.heroes[hero_index].is_alive):\n self.living_heroes -= 1\n self.heroes.pop(hero_index)", "def observe_death(self,enemy):\n #print 'death: resetting agent to ', self.initial_positions[enemy]\n self.observe_exact(enemy, self.initial_positions[enemy])", "def remove_entity_from_inventory(self, x, y):\n tile = self.tiles[x][y]\n entity = tile.inventory\n \n if entity is None:\n raise LogicException(\"Tried to remove inventory from (%d,%d) but there was nothing there.\" % (x, y))\n\n entity.x = -1\n entity.y = -1\n entity.owner = None\n\n tile.inventory = None\n self.entities.remove(entity)\n return entity", "def attack(self,enemy):\n enemy.takeDamage(self.damage)", "def boss_defeated(g):\n\n finalRoom.del_action((\"attack\",))\n\n print(\"This attack appears to have been the final one. You watch as the bird collapses, and begins to shrink. It shrinks back to\\nits normal size, now laying in the middle of the room.\")\n g.player.boss_defeated = True", "def kill(self, poof=None):\n\t\t#print \"Killing\", self.name\n\t\tif poof != None:\n\t\t\tself.spawn.append(PoofEntity(self.geom, poof))\n\t\tself.remove()", "def remove_player(self):\n if self.num_player > 0:\n self.num_player -= 1\n self.available_place += 1\n self.update_full_status()\n self.save()", "async def pboss_remove(self, ctx, *, term):\n dbcog = await self.get_dbcog()\n pdicog = self.bot.get_cog(\"PadInfo\")\n\n term = term.lower()\n m = await dbcog.find_monster(term, ctx.author.id)\n if m is None:\n await ctx.send(f\"No monster found for `{term}`. Make sure you didn't use quotes.\")\n return\n\n base = dbcog.database.graph.get_base_monster(m)\n\n if base.monster_id not in self.settings.boss():\n await ctx.send(\"Boss mechanics item doesn't exist.\")\n return\n if not await get_user_confirmation(ctx,\n \"Are you sure you want to globally remove the boss data for {}?\".format(\n base.name_en)):\n return\n\n self.settings.rmBoss(base.monster_id)\n await ctx.tick()", "def remove_piece(s, (x,y)):\n\t\ts.matrix[x][y].occupant = None", "def remove_item(game):\n player = game.player\n item = game.rooms[player.position['next location']].door['unlock']\n player.inventory.remove(item)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We've just used the Category information from the goals app to create labels, so let's remove any of the preexisting labels from questions, since they are now irrelevant.
def remove_prior_survey_labels(apps, schema_editor): for q in apps.get_model("survey", "BinaryQuestion").objects.all(): q.labels.clear() for q in apps.get_model("survey", "LikertQuestion").objects.all(): q.labels.clear() for q in apps.get_model("survey", "MultipleChoiceQuestion").objects.all(): q.labels.clear() for q in apps.get_model("survey", "OpenEndedQuestion").objects.all(): q.labels.clear()
[ "def clear_labels(self):\n from casepro.statistics.models import DailyCount, datetime_to_date\n\n day = datetime_to_date(self.created_on, self.org)\n for label in self.labels.all():\n DailyCount.record_removal(day, DailyCount.TYPE_INCOMING, label)\n\n Labelling.objects.filter(message=self).delete()", "def noQuestionsInCategory(self):\n print(\"NO QUESTIONS REMAINING IN CATEGORY - Spin Again\")\n self.startNextTurn()", "def unlabel(self, *labels):\n from casepro.statistics.models import DailyCount, datetime_to_date\n\n existing_labellings = Labelling.objects.filter(message=self, label__in=labels).select_related(\"label\")\n\n day = datetime_to_date(self.created_on, self.org)\n for labelling in existing_labellings:\n DailyCount.record_removal(day, DailyCount.TYPE_INCOMING, labelling.label)\n\n Labelling.objects.filter(id__in=[l.id for l in existing_labellings]).delete()", "def telector_hide():\n\n global labels_ui\n if labels_ui is not None:\n labels_ui.destroy()\n labels_ui = None\n ctx.tags = []", "def handle_no_labels(self, namespace):\n pass", "def unset_labels(self, workspace_id: str, category_name, uris: Sequence[str]):\n dataset_name = utils.get_dataset_name(uris[0])\n logic.labels_in_memory[workspace_id][dataset_name] = logic.get_labels(workspace_id, dataset_name)\n all_uris = self.get_all_text_elements_uris(dataset_name)\n for uri in uris:\n if uri not in all_uris:\n raise Exception(f'Trying to unset labels for uri \"{uri}\" which does not exist')\n logic.labels_in_memory[workspace_id][dataset_name][uri].pop(category_name)\n # Save updated labels dict to disk\n logic.save_labels_data(dataset_name, workspace_id)", "def labels_unique():\n label_list = ['clear','haze', 'partly_cloudy','cloudy',\n 'primary', 'agriculture', 'water', 'habitation', \n 'road', 'cultivation', 'blooming', 'selective_logging',\n 'slash_burn', 'conventional_mine', 'bare_ground', \n 'artisinal_mine', 'blow_down']\n return label_list", "def _update_labels(self):\n if self.cleaned_up:\n return\n self.history_list.update_labels()\n self.address_list.update_labels()\n self.utxo_list.update_labels()\n self.update_completions()\n self.labels_updated_signal.emit()\n # clear flag\n self.labels_need_update.clear()", "def test_label_service_replace_labels(self):\n pass", "def clean_up(self):\n self.labels = {}\n for sentence in self.sentences:\n sentence.probs = []", "def remove_label_by_query(j, jql, label_to_remove):\n issues = j.search_issues(jql)\n for issue in issues:\n new_labels = [label for label in issue.fields.labels\n if label != label_to_remove]\n yield _label_issue(issue, new_labels)", "def text_labels(self):\n types = [\n \"First_Paragraph\",\n \"What_you_need_to_know\",\n \"MarketDefinition\"\n ]\n for t in types:\n # self.get_word_vector(t)\n self.shorten_word_vector(t)\n self.update_labelid_intext()\n return\n local_sql = MysqlConnecttion(\"local\")\n # booth_sql = MysqlConnecttion(\"booth\")\n query = r\"select distinct docid from Magic_Quadrants where removed = 0\"\n rows = local_sql.excute_with_result(query)\n mq_set = set()\n for row in rows:\n mq_set.add(row[0])\n doc_map = {}\n query = \"select DocID, DocNo from Documents\"\n rows = local_sql.excute_with_result(query)\n for row in rows:\n docid = row[0]\n docno = row[1]\n doc_map[docid] = docno\n for docid in mq_set:\n print docid\n docno = doc_map[docid]\n query = r\"update mq_text_label set DocNo = '%s' where DocID = '%s'\"%(docno, docid)\n local_sql.excute(query)\n # booth_sql.excute(query)", "def remove_label(self, label):\n for label_obj in self.labels:\n if label_obj.label.lower() != label.lower():\n continue\n self.labels.remove(label_obj)\n db_session.commit()", "def reset_labels(dataset):\r\n dataset['label'] = dataset['originalLabel']\r\n return dataset", "def emptyCat(self,cat,LDmodel):\n for h in LDmodel.catDict.LDictHierarchies():\n if h[0] == str(cat):\n self.dropWordSet(h[1],self.getWords(h[0]))\n self.catDict[str(int(cat))] = (self.getDesc(cat),set())", "def unused_labels(self):\n used_labels = set()\n\n def extract_labels(expr):\n for m in self.label_re.finditer(expr):\n used_labels.add(self.labels[m.group(1)])\n\n for action in (a for a in self.actions.values() if isinstance(a, Action)):\n if isinstance(action, Signal):\n for param in action.parameters:\n extract_labels(param)\n if isinstance(action, Block):\n for statement in action.statements:\n extract_labels(statement)\n if action.condition:\n for statement in action.condition:\n extract_labels(statement)\n if action.savepoints:\n for savepoint in action.savepoints:\n if savepoint.statements:\n for statement in savepoint.statements:\n extract_labels(statement)\n\n return sorted(set(self.labels.values()).difference(used_labels))", "def check_and_remove_labels(appdata, controller):\n labels = appdata.pop(\"labels\")\n assert labels.pop(\"name\") == controller.name\n assert labels.pop(\"org\") == controller.organization.slug \\\n if controller.organization else ''\n domains = [u\".\".join([controller.app_id, settings.HUB_DOMAIN])]\n domains.extend(controller.domain_urls.split())\n assert sorted(labels.pop(\"HAPROXY_0_VHOST\").split(\",\")) == sorted(domains)\n\n haproxy_group = labels.pop(\"HAPROXY_GROUP\")\n\n if controller.external_visibility:\n assert sorted(labels.pop(\"domain\").split()) == sorted(domains)\n traefik_domains = labels.pop(\"traefik.frontend.rule\")\n traefik_domains = traefik_domains.split(\":\", 2)[-1].split(\",\")\n traefik_domains = [d.strip() for d in traefik_domains]\n assert sorted(traefik_domains) == sorted(domains)\n assert haproxy_group == \"external\"\n else:\n assert haproxy_group == \"internal\"\n\n # We may have duplicate keys in here, but hopefully the database always\n # return the objects in the same order.\n lvs = {lv.name: lv.value for lv in controller.label_variables.all()}\n assert labels == lvs", "def hide_group_feedback(self):\n\t\tself.group_message_label['text'] = ''", "def FAQ_UNHAPPY_FEEDBACK(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the Azure network class.
def __init__(self, az_account: 'account.AZAccount') -> None: self.az_account = az_account self.network_client = network.NetworkManagementClient( self.az_account.credentials, self.az_account.subscription_id)
[ "def __init__(self):\n # We initialize the \"networks\" attribute with an empty dictionary.\n self.networks = {}", "def _configure_manager(self):\r\n self._manager = CloudNetworkManager(self, resource_class=CloudNetwork,\r\n response_key=\"network\", uri_base=\"os-networksv2\")", "def setUpClass(cls):\n super(NeutronCreateNetworkTest, cls).setUpClass()\n cls.current_os_release = openstack_utils.get_os_release()\n\n # set up clients\n cls.neutron_client = (\n openstack_utils.get_neutron_session_client(cls.keystone_session))\n cls.neutron_client.format = 'json'", "def __init__(self):\n self.docker: docker.DockerClient = docker.DockerClient(\n base_url=\"unix:/{}\".format(str(SOCKET_DOCKER)), version=\"auto\", timeout=900\n )\n self.network: DockerNetwork = DockerNetwork(self.docker)", "def __init__(self):\n rospy.init_node('route_network')\n self.config = None\n\n # advertise visualization marker topic\n self.pub = rospy.Publisher('route_network', RouteNetwork,\n latch=True, queue_size=10)\n self.graph = None\n rospy.wait_for_service('get_geographic_map')\n self.get_map = rospy.ServiceProxy('get_geographic_map',\n GetGeographicMap)\n\n # register dynamic reconfigure callback, which runs immediately\n self.reconf_server = ReconfigureServer(Config, self.reconfigure)", "def __init__(__self__,\n resource_name: str,\n args: CoreNetworkArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InternalNetworkArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _init_network(self):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn_target = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.loss_fn = build_loss(self.loss_type)\n\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n\n # create optimizer\n self.dqn_optim = optim.Adam(\n self.dqn.parameters(),\n lr=self.optim_cfg.lr_dqn,\n weight_decay=self.optim_cfg.weight_decay,\n eps=self.optim_cfg.adam_eps,\n )\n\n # load the optimizer and model parameters\n if self.load_from is not None:\n self.load_params(self.load_from)", "def __init__(self, neutron=None):\n super(TransportZone, self).__init__()\n self.log = logger.setup_logging(self.__class__.__name__)\n self.schema_class = 'transport_zone_schema.TransportZoneSchema'\n\n if neutron is not None:\n self.set_connection(neutron.get_connection())\n\n self.set_create_endpoint('/transport-zones')\n self.set_state_endpoint('/transport-zones/%s/state')\n self.id = None", "def _initialize_network(self):\n self.model = self._get_model()\n if hasattr(self.model, 'fc'):\n self.optimizer = optim.Adam(self.model.fc.parameters(), lr=self.config['learning_rate'])\n else:\n self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=self.config['learning_rate'])\n self.model.to(self.device)", "def __init__(self, cloud):\n\n self.cloud = cloud\n self._conn = None\n\n # Initialize compute controller.\n assert issubclass(self.ComputeController, BaseComputeController)\n self.compute = self.ComputeController(self)\n\n # Initialize DNS controller.\n if self.DnsController is not None:\n assert issubclass(self.DnsController, BaseDNSController)\n self.dns = self.DnsController(self)\n\n # Initialize network controller.\n if self.NetworkController is not None:\n assert issubclass(self.NetworkController, BaseNetworkController)\n self.network = self.NetworkController(self)", "def SetupCommon(self):\n self.network_name = self._GetResourceName()\n self.subnetwork_name = self._GetResourceName()\n\n self.Run('compute networks create {} --subnet-mode=custom'.format(\n self.network_name))\n self.Run('compute networks subnets create {0} --network {1} '\n '--region {2} --range {3}'.format(self.subnetwork_name,\n self.network_name, self.region,\n self.SUBNET_RANGE))", "def _setup_network(self):\n self.network = moose.LIF( 'network', self.N );\n moose.le( '/network' )\n self.network.vec.Em = self.el\n self.network.vec.thresh = self.vt\n self.network.vec.refractoryPeriod = self.refrT\n self.network.vec.Rm = self.Rm\n self.network.vec.vReset = self.vr\n self.network.vec.Cm = self.Cm\n if not noiseInj:\n self.network.vec.inject = self.Iinject\n else:\n ## inject a constant + noisy current\n ## values are set in self.simulate()\n self.noiseTables = moose.StimulusTable('noiseTables',self.N)\n moose.connect( self.noiseTables, 'output', \\\n self.network, 'setInject', 'OneToOne')", "def __init__(self, logical_services_node=None):\n super(ServicesNodeNatConfig, self).__init__()\n self.log = logger.setup_logging(self.__class__.__name__)\n self.schema_class = 'nat_config_schema.NatConfigSchema'\n\n if logical_services_node is not None:\n self.set_connection(logical_services_node.get_connection())\n\n self.set_create_endpoint(\"/lservices-nodes/\" + logical_services_node.id + \"/service-bindings/nat/config\")\n self.id = None", "def __init__(self):\n super(TNL3ServicePlugin, self).__init__()\n self._tn_info = None\n # self._driver = None\n self.task_manager = tasks.TaskManager()\n self.task_manager.start()\n self.tn_init()", "def _InitTransport(self):\n if self.transport is None:\n self.transport = \\\n self.transport_class(self._GetAddress(),\n timeouts=self.timeouts,\n allow_non_master=self.allow_non_master)", "def __init__(self, logical_services_node=None):\n super(LogicalServicesInterface, self).__init__()\n# self.log = logger.setup_logging(self.__class__.__name__)\n self.schema_class = 'logical_services_interface_schema.LogicalServicesInterfaceSchema'\n\n if logical_services_node is not None:\n self.set_connection(logical_services_node.get_connection())\n\n self.set_create_endpoint(\"/lservices-routers/\" + logical_services_node.id + \"/interfaces\")\n self.id = None", "def __init__(self):\n super().__init__('BeagleboneGreenWirelessConnection')\n\n self._commsThreadRun = True # Initialize the thread enable boolean\n self._sendQueue = deque() # Initialize the send queue\n self._recvQueue = deque() # Initialize the recieve queue\n\n # Configure the logger\n self._logger = logging.getLogger('BeagleboneGreenWirelessConnection')\n self._logger.setLevel(LOG_LEVEL_PRINT) # Only {LOG_LEVEL} level or above will be saved\n # fh = logging.FileHandler('../Logs/BeagleboneGreenWirelessConnection.log', 'w')\n # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n # fh.setFormatter(formatter)\n # fh.setLevel(LOG_LEVEL_SAVE) # Only {LOG_LEVEL} level or above will be saved\n # self._logger.addHandler(fh)\n\n # Set ip and port\n self._port = 12345\n self._ip = '192.168.7.2'\n\n self._state = 'Initialized'\n\n # Start never ending thread\n communicationThread = threading.Thread(target=self._innerThread, name=\"CommunicationThread\")\n communicationThread.daemon = True # Set thread as daemonic\n communicationThread.start()", "def __init__(self):\n name = \"Ensure that ALB is configured with defensive or strictest desync mitigation mode\"\n id = \"CKV_AWS_328\"\n supported_resources = [\"aws_lb\", \"aws_alb\", \"aws_elb\"]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates required elements for creating a network interface.
def _CreateNetworkInterfaceElements( self, name_prefix: str, region: Optional[str] = None) -> Tuple[Any, ...]: if not region: region = self.az_account.default_region # IP address public_ip_name = '{0:s}-public-ip'.format(name_prefix) # Virtual Network vnet_name = '{0:s}-vnet'.format(name_prefix) # Subnet subnet_name = '{0:s}-subnet'.format(name_prefix) # Network security group nsg_name = '{0:s}-nsg'.format(name_prefix) client_to_creation_data = { self.network_client.public_ip_addresses: { 'resource_group_name': self.az_account.default_resource_group_name, 'public_ip_address_name': public_ip_name, 'parameters': { 'location': region, 'public_ip_allocation_method': 'Dynamic' } }, self.network_client.virtual_networks: { 'resource_group_name': self.az_account.default_resource_group_name, 'virtual_network_name': vnet_name, 'parameters': { 'location': region, 'address_space': {'address_prefixes': ['10.0.0.0/16']} } }, self.network_client.subnets: { 'resource_group_name': self.az_account.default_resource_group_name, 'virtual_network_name': vnet_name, 'subnet_name': subnet_name, 'subnet_parameters': {'address_prefix': '10.0.0.0/24'} }, self.network_client.network_security_groups: { 'resource_group_name': self.az_account.default_resource_group_name, 'network_security_group_name': nsg_name, 'parameters': { 'location': region, # Allow SSH traffic 'security_rules': [{ 'name': 'Allow-SSH', 'direction': 'Inbound', 'protocol': 'TCP', 'source_address_prefix': '*', 'destination_address_prefix': '*', 'source_port_range': '*', 'destination_port_range': 22, 'access': 'Allow', 'priority': 300 }] } } } # type: Dict[str, Any] result = [] try: for client, data in client_to_creation_data.items(): request = common.ExecuteRequest( client, 'begin_create_or_update', data)[0] request.wait() result.append(request.result()) except azure_exceptions.AzureError as exception: raise errors.ResourceCreationError( 'Could not create network interface elements: {0!s}'.format( exception), __name__) from exception return tuple(result)
[ "def _setup_interface(self):\n\n # Create and set the interface up.\n self._ip.link(\"add\", ifname=self.interface, kind=\"dummy\")\n dev = self._ip.link_lookup(ifname=self.interface)[0]\n self._ip.link(\"set\", index=dev, state=\"up\")\n\n # Set up default route for both IPv6 and IPv4\n self._ip.neigh(\"add\", dst='169.254.1.1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.neigh(\"add\", family=AF_INET6, dst='fe80::1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.addr(\"add\", index=dev, address=\"169.254.1.2\", mask=24)\n self._ip.route(\"add\", gateway=\"169.254.1.1\", oif=dev)\n self._ip.route(\"add\", family=AF_INET6, gateway='fe80::1', oif=dev)\n\n # Set the loopback up as well since some of the packets go through there.\n lo = self._ip.link_lookup(ifname=\"lo\")[0]\n self._ip.link(\"set\", index=lo, state=\"up\")\n\n # Return internal interface ID for later use\n return dev", "def createNIC( self, options ):\n\t\tud.debug(ud.ADMIN, ud.INFO, 'NIC create(%r)' % (options,))\n\t\tiface = uvmmn.Interface()\n\t\tif options[ 'nictype' ].startswith( 'network:' ):\n\t\t\ttyp, src = options[ 'nictype' ].split( ':', 1 )\n\t\t\tiface.type = iface.map_type( name = typ )\n\t\t\tiface.source = src\n\t\telse:\n\t\t\tiface.type = iface.map_type( name = options[ 'nictype' ] )\n\t\t\tiface.source = options[ 'source' ]\n\t\tif options[ 'driver' ] != 'auto':\n\t\t\tiface.model = options['driver']\n\t\tif options[ 'mac' ]:\n\t\t\tiface.mac_address = options[ 'mac' ]\n\t\tif options.get('target'):\n\t\t\tiface.target = options['target']\n\n\t\treturn iface", "def create(module):\n module.node.api('interfaces').create(module.attributes['name'])", "def _create_peering_interface(asys: UserAS, ixp: IXP):\n br = _get_peering_br(asys)\n ip = IXPMember.objects.filter(ixp=ixp, host=br.host).values_list('public_ip', flat=True)[0]\n port = _find_free_port(asys, ipaddress.ip_address(ip), 50000, 51000)\n return br.interfaces.create(public_ip=str(ip), public_port=port)", "def create_data_interface(vserver_name: str, interface_name: str, node_name: str, ip_address: str, ip_netmask: str) -> None:\n\n data = {\n 'name': interface_name,\n 'ip': {'address': ip_address, 'netmask': ip_netmask},\n 'enabled': True,\n 'scope': 'svm',\n 'svm': {'name': vserver_name},\n 'port': {'name': 'e0d', 'node': node_name},\n 'location': {\n 'auto_revert': True,\n 'broadcast_domain': {'name': 'Default'},\n }\n }\n\n ip_interface = IpInterface(**data)\n\n try:\n ip_interface.post()\n print(\"Ip Interface %s created successfully\" % ip_interface.ip.address)\n except NetAppRestError as err:\n print(\"Error: IP Interface was not created: %s\" % err)\n return", "def create_interface(self, node_id, interface_id, name=None, mac=None):\n # generate ip4 data\n ip4 = None\n ip4_mask = None\n if self.ip4:\n ip4 = str(self.ip4.addr(node_id))\n ip4_mask = self.ip4.prefixlen\n\n # generate ip6 data\n ip6 = None\n ip6_mask = None\n if self.ip6:\n ip6 = str(self.ip6.addr(node_id))\n ip6_mask = self.ip6.prefixlen\n\n # random mac\n if not mac:\n mac = MacAddress.random()\n\n return core_pb2.Interface(\n id=interface_id,\n name=name,\n ip4=ip4,\n ip4mask=ip4_mask,\n ip6=ip6,\n ip6mask=ip6_mask,\n mac=str(mac)\n )", "def create_network(self):\n print(\"Building mininet topology.\")\n\n self.topo = ExerciseTopo(self.hosts, self.switches.keys(), self.links,\n self.settings.logs_path, self.settings.cpu_port)\n\n switchClass = configureP4Switch(\n sw_path=self.settings.behavioral_model,\n json_path=self.settings.switch_json,\n log_console=True,\n pcap_dump=self.settings.pcaps_path,\n cpu_port=self.settings.cpu_port)\n\n self.net = Mininet(topo = self.topo,\n link = TCLink,\n host = P4Host,\n switch = switchClass,\n controller = None)", "def create_interface(self, device, data):\n raise NotImplementedError()", "def _create_interface(name, ip, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"dummy\")\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)", "def ex_create_network_interface(self, subnet, name=None,\r\n description=None,\r\n private_ip_address=None):\r\n raise NotImplementedError(self._not_implemented_msg)", "def create_host_only_network_interface(self):\n (host_interface, progress) = self._call(\"createHostOnlyNetworkInterface\")\n host_interface = IHostNetworkInterface(host_interface)\n progress = IProgress(progress)\n return (host_interface, progress)", "def create_network():\n with settings(warn_only=True):\n run(f'docker network create {network_name}')", "def private_network_setup(self):\n key_pair = self.create_keypair()\n security_group = self._create_security_group()\n security_groups = [{'name': security_group['name']}]\n inst1 = self._create_vm(key_pair=key_pair,\n security_groups=security_groups)\n host_name = inst1[\"OS-EXT-SRV-ATTR:hypervisor_hostname\"]\n host_zone = inst1['OS-EXT-AZ:availability_zone']\n av_zone = host_zone + ':' + host_name\n inst2 = self._create_vm(key_pair=key_pair,\n security_groups=security_groups,\n av_zone=av_zone)\n\n host_client, sw_names = self._create_vswitch(host_name, private_sw=True)\n\n ip1 = '22.22.22.2'\n net_mask = '24'\n inst1_nic_args = self._add_nic_to_vm(inst1, sw_names['privateSwitch'],\n host_client)\n linux_client1, inst1_new_nic_name = self._set_vm_ip(\n inst1, key_pair, inst1_nic_args['MAC'], ip1, net_mask)\n ip2 = '22.22.22.3'\n inst2_nic_args = self._add_nic_to_vm(inst2, sw_names['privateSwitch'],\n host_client)\n linux_client2, inst2_new_nic_name = self._set_vm_ip(\n inst2, key_pair, inst2_nic_args['MAC'], ip2, net_mask)\n private_setup = dict()\n private_setup['instances'] = [inst1, inst2]\n private_setup['linux_clients'] = [linux_client1, linux_client2]\n private_setup['new_nics'] = [inst1_new_nic_name, inst2_new_nic_name]\n private_setup['linux_ips'] = [ip1, ip2]\n private_setup['key_pair'] = key_pair\n\n return private_setup", "def _to_interface(self, element, name=None):\r\n\r\n interface_id = findtext(element=element,\r\n xpath='networkInterfaceId',\r\n namespace=NAMESPACE)\r\n\r\n state = findtext(element=element,\r\n xpath='status',\r\n namespace=NAMESPACE)\r\n\r\n # Get tags\r\n tags = self._get_resource_tags(element)\r\n\r\n name = name if name else tags.get('Name', interface_id)\r\n\r\n # Build security groups\r\n groups = self._get_security_groups(element)\r\n\r\n # Build private IPs\r\n priv_ips = []\r\n for item in findall(element=element,\r\n xpath='privateIpAddressesSet/item',\r\n namespace=NAMESPACE):\r\n\r\n priv_ips.append({'private_ip': findtext(element=item,\r\n xpath='privateIpAddress',\r\n namespace=NAMESPACE),\r\n 'private_dns': findtext(element=item,\r\n xpath='privateDnsName',\r\n namespace=NAMESPACE),\r\n 'primary': findtext(element=item,\r\n xpath='primary',\r\n namespace=NAMESPACE)})\r\n\r\n # Build our attachment dictionary which we will add into extra later\r\n attributes_map = \\\r\n RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment']\r\n attachment = self._get_extra_dict(element, attributes_map)\r\n\r\n # Build our extra dict\r\n attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface']\r\n extra = self._get_extra_dict(element, attributes_map)\r\n\r\n # Include our previously built items as well\r\n extra['tags'] = tags\r\n extra['attachment'] = attachment\r\n extra['private_ips'] = priv_ips\r\n extra['groups'] = groups\r\n\r\n return EC2NetworkInterface(interface_id, name, state, extra=extra)", "def test_add_interface_router(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n self.router = neutron_utils.create_router(\n self.neutron, self.os_creds, self.net_config.router_settings)\n validate_router(\n self.neutron, self.keystone, self.net_config.router_settings.name,\n self.os_creds.project_name, True)\n\n self.interface_router = neutron_utils.add_interface_router(\n self.neutron, self.router, self.network.subnets[0])\n validate_interface_router(self.interface_router, self.router,\n self.network.subnets[0])", "def create_interface(self, name, peer_id=None):\n try:\n if self.ipdb_controller:\n name_iface_peer = name+'_'+peer_id\n self.ipdb_controller.create(ifname=name, kind='veth', peer=name_iface_peer).commit()\n return name, name_iface_peer\n except Exception:\n logging.error('Cannot create interface')\n return '',''", "def build_network(self):\n\n topo = self.MyTopo( hosts_matrix=self.hosts_matrix, \n switch_matrix=self.switch_matrix,\n switch_dps=self.switch_dps)\n self.net = Mininet(\n topo=topo, \n controller=RemoteController(\n name=\"faucet\",\n ip=\"127.0.0.1\",\n port=6653\n ))", "def test_create_host_with_interface(self):\n h = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i = self.plugin.createAndAddInterface(h, \"1.2.3.4\")\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n self.assertTrue(i is not None, \"interface should have an ID\")\n host = self._model_controller.getHost(h)\n self.assertTrue(len(host.getAllInterfaces()) == 1, \"Host should have one interface\")\n self.assertTrue(host.getInterface(i) is not None, \"The interface should be the one we've just create\")", "def create_interface(module, switch, ip_ipv4, ip_ipv6, port, addr_type, CHANGED_FLAG, task, msg):\n output = ''\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vrouter-show location %s format name no-show-headers ' % switch\n vrouter_name = run_command(module, cli, task, msg).split()[0]\n\n if addr_type == 'ipv4':\n ip = ip_ipv4\n elif addr_type == 'ipv6':\n ip = ip_ipv6\n elif addr_type == 'ipv4_ipv6':\n ip = ip_ipv4\n ip2 = ip_ipv6\n\n cli = clicopy\n cli += ' vrouter-interface-show l3-port %s ip %s ' % (port, ip)\n cli += ' format switch no-show-headers '\n existing_vrouter = run_command(module, cli, task, msg).split()\n existing_vrouter = list(set(existing_vrouter))\n\n point_to_point = False\n if vrouter_name not in existing_vrouter:\n # Add vrouter interface.\n cli = clicopy\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + ip\n if addr_type == 'ipv4_ipv6':\n cli += ' ip2 ' + ip2\n cli += ' l3-port ' + port\n if module.params['pn_jumbo_frames'] is True:\n cli += ' mtu 9216'\n if module.params['pn_if_nat_realm']:\n cli += ' if-nat-realm ' + module.params['pn_if_nat_realm']\n run_command(module, cli, task, msg)\n # Add BFD config to vrouter interface.\n config_args = ''\n if module.params['pn_subnet_ipv4'] == '31' or module.params['pn_subnet_ipv6'] == '127':\n point_to_point = True\n if module.params['pn_bfd']:\n config_args = ' bfd-min-rx %s bfd-multiplier %s' % (module.params['pn_bfd_min_rx'],\n module.params['pn_bfd_multiplier'])\n if config_args or point_to_point:\n cli = clicopy\n cli += ' vrouter-interface-show vrouter-name ' + vrouter_name\n cli += ' l3-port %s format nic no-show-headers ' % port\n nic = run_command(module, cli, task, msg).split()[1]\n\n cli = clicopy\n cli += ' vrouter-interface-config-add '\n cli += ' vrouter-name %s nic %s ' % (vrouter_name, nic)\n if config_args:\n cli += config_args\n if point_to_point:\n cli += ' ospf-network-type point-to-point'\n run_command(module, cli, task, msg)\n CHANGED_FLAG.append(True)\n\n output += ' %s: Added vrouter interface with ip %s' % (\n switch, ip\n )\n if addr_type == 'ipv4_ipv6':\n output += ' ip2 ' + ip2\n output += ' on %s \\n' % vrouter_name\n if module.params['pn_bfd']:\n output += ' %s: Added BFD configuration to %s \\n' % (switch,\n vrouter_name)\n if point_to_point:\n output += ' %s: Added OSPF network type as point-to-point to %s \\n' % (switch, vrouter_name)\n\n return CHANGED_FLAG, output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just enter blank for the prompt 'testpmd> '
def blank_enter(self): time.sleep(2) self.dut.send_expect(" ", "testpmd> ")
[ "def step_see_prompt(context):\n context.cli.expect('wharfee> ')", "def step_expect_prompt(context):\n context.cli.expect('wharfee> ')", "def test_prompting(self):\n pass", "def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')", "def test_guitab_print_tab_blank(monkeypatch, capfd):\n\n user_input = iter(['-p', '-d'])\n monkeypatch.setattr('builtins.input', lambda _: next(user_input))\n main()\n out, err = capfd.readouterr()\n assert out == welcome_message + global_test_data.print_blank_tab", "def prompt(self):\n self._prompt_docstring()\n self.format()", "def _set_prompt(self):\n if not self.db:\n self.prompt = '>> '\n return\n else:\n self.prompt = \"{}> \".format(self.cwd.title)\n return", "def get_banner():\n return \"** Shell prompt **\\n\"", "def vqa_prompt(self, question, answer=None) -> str:", "def do_prompt(self, flag):\n\n if flag == 'on':\n self.prompt = '(%s:%d) ' % (self.__plugin.name, os.getpid())\n elif flag == 'off':\n self.prompt = ''", "def prompt(self, message):\n raise NotImplementedError()", "def do_prompt(self, flag):\n\n if flag == 'on':\n self.prompt = '(%s:%d) ' % (self.name, os.getpid())\n elif flag == 'off':\n self.prompt = ''", "def no_command(self):\n\n self.write(\"\"\"%s %s: no command specified'\nTry `iptables -h\\' or \\'iptables --help\\' for more information.\\n\"\"\" %\n (command_iptables.APP_NAME, command_iptables.APP_VERSION))\n self.exit()", "def test_nash_basic(game_str):\n with stdin(game_str), stderr() as err:\n assert run(\"nash\"), err.getvalue()", "def launch_app(self, pmd_param=\" \"):\n self.pmdout.start_testpmd(\"all\", param=pmd_param)", "def prompt(self):\n return self.eval_prompt(self.env.get('PS2', u'> ')\n if self.is_multiline else self.env.get('PS1', u'% '))", "def before_prompt():\n sys.stdout.write(BEFORE_PROMPT)\n # Flushing is important as the command timing feature is based on\n # BEFORE_OUTPUT and BEFORE_PROMPT\n sys.stdout.flush()", "def after_prompt():\n sys.stdout.write(AFTER_PROMPT)", "def promptFromScratch(self):\n\n # Draw prompt message\n self.stdscr.addstr(self.height - 1, 0, self.prompt)\n self.stdscr.addstr(self.height - 1, len(self.prompt) + 1,\n self.string[self.view:self.view + self.width -\n len(self.prompt) - 2])\n\n # Fill with spaces if nothing is here\n if self.position == len(self.string):\n spacepos = len(self.prompt) + 1 + self.position - self.view\n self.stdscr.addstr(self.height - 1, spacepos, ' ' *\n (self.width - 1 - spacepos))\n\n # Place cursor\n self.stdscr.move(self.height - 1, len(self.prompt) + 1 + self.position -\n self.view)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the detail info from the output of pmd cmd 'show port info '.
def get_detail_from_port_info(self, key_str, regx_str, port): out = self.dut.send_expect("show port info %d" % port, "testpmd> ") find_value = self.get_value_from_str(key_str, regx_str, out) return find_value
[ "def get_ports(module, switch, peer_switch, task, msg):\n cli = pn_cli(module)\n cli += ' switch %s port-show hostname %s' % (switch, peer_switch)\n cli += ' format port no-show-headers '\n return run_command(module, cli, task, msg).split()", "def getPortDescription(self):\n portnum = self.getPortNumber()\n return port_descriptions[portnum]", "def list_ports(self):\n\n for i in range(len(self.ports)):\n print(\"{} : {}\".format(i, self.ports[i]))", "def print_info(self):\n\n print \"\"\"src_port: %d\\t dst_port: %d\\t sequence_num: %d\\t ack_num: %d\n data_offset: %d\\t urg: %d\\t ack: %d\\t psh: %d\\t rst: %d\\t syn: %d\\t fin: %d\\t\n window_size: %d\\t checksum: %s\\t urgent_pointer: %s\\t opt_paddings: %s\"\"\" % (\n self.src_port, self.dst_port, self.sequence_num,\n self.ack_num, self.data_offset, self.flag_urg, \n self.flag_ack, self.flag_psh, self.flag_rst, \n self.flag_syn, self.flag_fin, self.window_size, \n self.checksum, self.urgent_pointer, self.opt_paddings)", "def get_ports(params):\n output = subprocess.Popen((\"/usr/bin/snmpwalk\", \"-On\", \"-v2c\", \"-c\",\n params['community'], params['target'], port_name_prefix),\n stdout=subprocess.PIPE).communicate()[0]\n ports = {}\n for line in output.split(\"\\n\"):\n m = re.match(\n r'[.0-9]+\\.(\\d+) = STRING: \"?(?:ethernet)?([0-9/]+)\"?', line)\n if m:\n if not params['pattern'] or re.match(params['pattern'], m.group(2)):\n ports[m.group(2)] = m.group(1)\n return ports", "def _get_port_profile_status(self):\n return self.__port_profile_status", "def _printer_details(self,printer):\n\n\t\tresult = {}\n\t\texpr = re.compile('\\s+([^\\s\\:]+)\\:\\s*(.*?)$')\n\t\t(stdout,stderr,status) = self._shell_command(['/usr/bin/lpstat','-l','-p',printer],{'LANG':'C'})\n\t\tif status == 0:\n\t\t\tfor line in stdout.split(\"\\n\"):\n\t\t\t\tmobj = expr.match(line)\n\t\t\t\tif mobj:\n\t\t\t\t\tresult[mobj.group(1).lower()] = mobj.group(2)\n\t\tresult['server'] = self._hostname\n\t\treturn result", "def get_port(self) -> int:\n return self.settings['prometheus_port']", "def show_ports(self):\n print(\"These are your detected MIDI devices:\", '\\n')\n for port in self.__ports:\n print(self.__ports.index(port), \" -> \", port)", "def get_card_info(self,device):\n cmd = \"vgc-monitor -d %s | grep \\\"Card Info\\\"\"%device\n o = self.run_command_chk_rc(cmd)\n out = o['output'][1]\n out_a = out.split(\":\")\n\n return out_a[1].strip()", "def _info():\n\n # This is what prints out in the default command page, it should be\n # as short as possible.\n emitter.publish(\"Proxy and VPN access to DC/OS cluster\")\n return 0", "def _get_last_received_port_profile_info(self):\n return self.__last_received_port_profile_info", "def mem_info(verbose = False) -> None:", "def get_info(cls, port):\n # Exception handling if no connection can be made to the device.\n try:\n device = ArduinoVISADevice(port)\n return device.get_hardware_info()\n except:\n return", "def get_info_link(self, hostname, port):\n # gets switch login info that sent syslog\n ip, username, password = self.get_syslog_host_tower_info(hostname)\n # log into AOS-CX switch\n login_url = \"https://\" + ip + \":443/rest/v1/\"\n sesh = session.login(login_url, username, password)\n try:\n response = lldp.get_lldp_neighbor_info(int_name=port,\n s=sesh, url=login_url,\n depth=3)\n if not response:\n self.logger.error(\"Failed REST called to \"\n \"AOS-CX: {0}\".format(ip))\n session.logout(s=sesh, url=login_url)\n exit(-1)\n ip_addr = None\n if response[\"interface\"][\"name\"] == port:\n ip_addr_tmp = response[\"neighbor_info\"][\"mgmt_ip_list\"]\n # In case both IPv4 and IPv6 addresses are found, IPv4 is used\n if ',' in str(ip_addr_tmp):\n ip_addr_split = ip_addr_tmp.split(',')\n for address in ip_addr_split:\n if ':' not in address:\n ip_addr = address\n # Protects against MAC address populating for mgmt address\n elif ':' not in str(ip_addr_tmp):\n ip_addr = ip_addr_tmp\n else:\n self.logger.error(\"\\nERROR: IPv4 address not populated on\"\n \"{0} - found {1} \".format(port,\n ip_addr_tmp))\n mac_addr = response[\"chassis_id\"]\n device_name = response[\"neighbor_info\"][\"chassis_name\"]\n session.logout(s=sesh, url=login_url)\n return [ip_addr, mac_addr, device_name]\n except Exception as error:\n self.logger.error(\"ERROR: %s\", error)\n session.logout(s=sesh, url=login_url)\n exit(-1)\n # registers error if port not found on core switch\n self.logger.error(\"ERROR: Failed to retrieve \"\n \"LLDP info port %s not found on %s\", port, ip)\n session.logout(s=sesh, url=login_url)\n exit(-1)", "def get_ports(self):\n raise NotImplementedError() #pragma: no cover", "def netflow_collector_port(self):\n return self.data[\"netflow_collector_port\"]", "def showDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n print \"%11s : %s\" % (k, str(v)[:60])", "def print_serial_ports():\n print(\"## Available serial ports:\")\n for p in serial_ports():\n print(\"## \", p)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified port MAC.
def get_port_mac(self, port_id): return self.get_detail_from_port_info("MAC address: ", "([0-9A-F]{2}:){5}[0-9A-F]{2}", port_id)
[ "def get_nic_by_mac(self, mac):\n results = list(self.baremetal.ports(address=mac, details=True))\n try:\n return results[0]\n except IndexError:\n return None", "def get_macbookmac():\n input = os.popen('ifconfig en0')\n return ''.join([x.split()[1] for x in input if 'ether' in x])", "def _get_mac(cont):\n try:\n return cont['NetworkSettings']['Networks']['bridge']['MacAddress']\n except KeyError:\n return None", "def get_mac(interface='eth0'):\n\n nics = psutil.net_if_addrs().get(interface) # None if interface not in nics.\n if nics:\n for interface in nics:\n if interface.family == 17:\n return interface.address\n else: # if interface was not found return empty adress\n return '00:00:00:00:00:00'", "def get_interface_mac(self, device, interface):\n out = device.adb.shell(\"ifconfig %s\" % interface)\n completed = out.decode('utf-8').strip()\n res = re.match(\".* HWaddr (\\S+).*\", completed, re.S)\n asserts.assert_true(res, 'Unable to obtain MAC address for interface %s'\n % interface)\n return res.group(1)", "def get_our_mac(self):\n (status, mac) = self.__device.get_our_mac()\n self.__device.decode_error_status(status, cmd='get_our_mac', print_on_error=True)\n return \"%s\" % mac", "def get_mac_str(valve_index, port_num):\n two_byte_port_num = \"%04x\" % port_num\n two_byte_port_num_formatted = two_byte_port_num[:2] + \":\" + two_byte_port_num[2:]\n return \"00:00:00:%02x:%s\" % (valve_index, two_byte_port_num_formatted)", "def get_mac_address(interface):\n\ttry:\n\t\tline = os.popen(\"ifconfig {} | grep ether\".format(interface)).read()\n\t\treturn line.split()[1]\n\texcept:\n\t\tprint \"error: could not get mac address from device \" + interface\n\t\texit(0)", "def wifi_mac(self) -> str:\n self._logger.info(\"Retrieving WiFi MAC address...\")\n return self._device_info().get(\"MAC\")", "def getHostMac(self, hostname):\n try:\n hostNum = int(self.getHostNumber(hostname))\n url = self.baseURL + \"host/\"+str(hostNum)\n req = requests.get(url, headers=self.header)\n macAddr = req.json()['primac']\n return macAddr\n except Exception:\n self.log.exception('%s', \"Failed to connect to the FOG server\")", "def get_port(self) -> str:\n return self.__serial.port", "def lookup_host_mac(self, mac):\n msg = pypureomapi.OmapiMessage.open(\"host\")\n msg.obj.append((\"hardware-address\", pypureomapi.pack_mac(mac)))\n response = self.query_server(msg)\n if response.opcode != pypureomapi.OMAPI_OP_UPDATE:\n raise pypureomapi.OmapiErrorNotFound()\n try:\n return pypureomapi.unpack_ip(dict(response.obj)[\"ip-address\"])\n except KeyError: # ip-address\n raise pypureomapi.OmapiErrorNotFound()", "def get_src_mac(self):\n (status, mac) = self.__device.get_src_mac()\n self.__device.decode_error_status(status, cmd='get_src_mac', print_on_error=True)\n return \"%s\" % mac", "def NetworkElement_getMacAddressIDL(self):\n pass", "def MacAddr(self):\n if self.force_auto_sync:\n self.get('MacAddr')\n return self._MacAddr", "def get_random_mac(base_mac):\n\n return ':'.join(\n \"{:02x}\".format(random.randint(0x00, 0xff))if p == '00' else p\n for p in base_mac\n )", "def _get_phy_port_name(self):\n return self.__phy_port_name", "def mac_get_display():\n host_name = socket.gethostname() \n host_ip = socket.gethostbyname(host_name)\n return host_ip, \"%s:0\" % host_ip", "def get_port():\n port = 0\n if sys.platform.startswith('darwin'):\n port = glob.glob('/dev/tty.usbmodem*')[0]\n elif sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(32)]\n for p in ports:\n try:\n s = serial.Serial(p)\n s.close()\n port = p\n except (OSError, serial.SerialException):\n pass\n return port", "def get_machine_by_mac(self, mac):\n nic = self.get_nic_by_mac(mac)\n if nic is None:\n return None\n else:\n return self.get_machine(nic['node_uuid'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the socket id which the specified port is connectting with.
def get_port_connect_socket(self, port_id): return self.get_detail_from_port_info("Connect to socket: ", "\d+", port_id)
[ "def socket_id(self):\n return self._test_protocol.socket_id", "def socketPort(self):\n\n if not self.isBound():\n return None\n\n return self._port", "def select_unused_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n addr, port = s.getsockname()\n s.close()\n return port", "def port_id(self):\n # type: () -> int\n return self._get_property('port_id')", "def port_id(self) -> str:\n return self._port_id", "def get_socket(self, addr):\n return self.connections[addr].sock", "def port_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port_id\")", "def l4_port(port, proto, both=True):\n try:\n name = socket.getservbyport(port, proto)\n if both:\n name = \"{} ({})\".format(name, port)\n except:\n name = str(port)\n return name", "def _find_free_port():\n import socket\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Binding to port 0 will cause the OS to find an available port for us\n sock.bind((\"\", 0))\n port = sock.getsockname()[1]\n sock.close()\n # NOTE: there is still a chance the port could be taken by other processes.\n return port", "def get_random_lport():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('127.0.0.1', 0))\n addr, localport = sock.getsockname()\n sock.close()\n return localport", "def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.close()\n return port\n except Exception as e:\n pass\n return -1", "def port(self):\n\n return self.server_address[1]", "def docker_mapped_port(cid, port):\n output = subprocess.check_output('docker port %s %s' % (cid, port), shell=True)\n return int(output.split(':', 2)[1])", "def get_free_port():\n # TODO: Prone to errors if the OS assigns port to someone else before use.\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n free_port = s.getsockname()[1]\n s.close()\n\n return free_port", "def getPort(self):\n return int(self[SipViaHeader.PARAM_PORT]) if SipViaHeader.PARAM_PORT in self else None", "def bind_port(socket, ip, port):\n connection = 'tcp://%s' % ip\n if port <= 0:\n port = socket.bind_to_random_port(connection)\n else:\n connection += ':%i' % port\n socket.bind(connection)\n return port", "def get_port(self, service_id=protocol.SERVICE_UDP):\n return self._services[service_id]", "def get_port_from_httpserver():\n server_socket = (getattr(cherrypy.server, 'httpserver', None) and\n getattr(cherrypy.server.httpserver, 'socket', None))\n bind_addr = server_socket and server_socket.getsockname()\n return bind_addr[1] if (bind_addr and isinstance(bind_addr, tuple)) else 0", "def get_port(self) -> str:\n return self.__serial.port", "def _get_port():\n with TrafficTest._port_lock:\n TrafficTest._port += 1\n return TrafficTest._port" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the socket id which the specified port memory is allocated on.
def get_port_memory_socket(self, port_id): return self.get_detail_from_port_info("memory allocation on the socket: ", "\d+", port_id)
[ "def _find_free_port():\n import socket\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Binding to port 0 will cause the OS to find an available port for us\n sock.bind((\"\", 0))\n port = sock.getsockname()[1]\n sock.close()\n # NOTE: there is still a chance the port could be taken by other processes.\n return port", "def select_unused_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n addr, port = s.getsockname()\n s.close()\n return port", "def get_free_port():\n # TODO: Prone to errors if the OS assigns port to someone else before use.\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n free_port = s.getsockname()[1]\n s.close()\n\n return free_port", "def port_id(self):\n # type: () -> int\n return self._get_property('port_id')", "def port_id(self) -> str:\n return self._port_id", "def docker_mapped_port(cid, port):\n output = subprocess.check_output('docker port %s %s' % (cid, port), shell=True)\n return int(output.split(':', 2)[1])", "def socketPort(self):\n\n if not self.isBound():\n return None\n\n return self._port", "def socket_id(self):\n return self._test_protocol.socket_id", "def _get_port():\n with TrafficTest._port_lock:\n TrafficTest._port += 1\n return TrafficTest._port", "def port_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port_id\")", "def get_port(self, service_id=protocol.SERVICE_UDP):\n return self._services[service_id]", "def find_free_port(self, interface='127.0.0.1', socket_family=AF_INET,\n socket_type=SOCK_STREAM):\n address = getaddrinfo(interface, 0)[0][4]\n probe = socket(socket_family, socket_type)\n try:\n probe.bind(address)\n return probe.getsockname()\n finally:\n probe.close()", "def get_socket(self, addr):\n return self.connections[addr].sock", "def port_num(name):\n for num in self.port_map:\n if self.port_map[num] == name:\n return num\n return -1", "def _get_self_port(self, dev_id, dev_port):\n self_pid = None\n for pmap in self.portmap:\n if pmap[0] == dev_id and pmap[1] == dev_port:\n self_pid = pmap[2]\n break\n if self_pid is None:\n raise Exception(\"Cannot find DeviceID:{0} PortID:{1} in port map.\".format(dev_id, dev_port))\n return self_pid", "def get_port_id(self, vm_id):\n port_id = None\n return port_id", "def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.close()\n return port\n except Exception as e:\n pass\n return -1", "def unique_id_from_own_addr(self):\n return identifier(*self.listen_addr)", "def port_index(self):\n return self.__port_index", "def get_free_tcp_address() -> str:\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind((\"\", 0))\n host, port = tcp.getsockname()\n tcp.close()\n return \"tcp://127.0.0.1:{}\".format(port)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified port link status now.
def get_port_link_status(self, port_id): return self.get_detail_from_port_info("Link status: ", "\d+", port_id)
[ "def get_port_status(cluster, lswitch_id, port_id):\n try:\n r = do_request(HTTP_GET,\n \"/ws.v1/lswitch/%s/lport/%s/status\" %\n (lswitch_id, port_id), cluster=cluster)\n except exception.NotFound as e:\n LOG.error(_(\"Port not found, Error: %s\"), str(e))\n raise exception.PortNotFoundOnNetwork(\n port_id=port_id, net_id=lswitch_id)\n if r['link_status_up'] is True:\n return constants.PORT_STATUS_ACTIVE\n else:\n return constants.PORT_STATUS_DOWN", "def getPortStatus(self, timeout = 100):\n\t\treturn self.__devhandle.controlMsg(requestType = 0xa1,\n\t\t\t\t\t\t\t\t\t\t request = 1,\n\t\t\t\t\t\t\t\t\t\t value = 0,\n\t\t\t\t\t\t\t\t\t\t index = self.__intf,\n\t\t\t\t\t\t\t\t\t\t buffer = 1,\n\t\t\t\t\t\t\t\t\t\t timeout = timeout)[0]", "def _get_port_profile_status(self):\n return self.__port_profile_status", "def status(self, port):\n pstatus = ABSENT\n if port.origin in self.db:\n portname = port.attr['pkgname'].rsplit('-', 1)[0]\n for pkgname in self.db[port.origin]:\n if pkgname.rsplit('-', 1)[0] == portname:\n pstatus = max(pstatus,\n version(pkgname, port.attr['pkgname']))\n return pstatus", "def get_link_status(self, link_id: ObjectId) -> Any:\n link = self.get_unsafe_link_document(link_id)\n return link['status']", "def check_port(device, port_device):\r\n url = base_url + '/devices/' + device + '/ports'\r\n print(url)\r\n res = requests.get(url, auth=('onos', 'rocks'))\r\n print(res.status_code)\r\n if (res.status_code != 200):\r\n pass\r\n ports = res.json()['ports']\r\n print(ports)\r\n for port in ports:\r\n if port['port'] != port_device:\r\n continue\r\n if port['isEnabled'] == True:\r\n continue\r\n if (port['port'] == port_device) and (port['isEnabled'] == False):\r\n print(\"Link failure at switch {0}: port {1}\".format(\r\n device, port_device))\r\n return False\r\n return True", "def check_telnet_status():\n port = int(\n str(subprocess.Popen(\"nc -z 127.0.0.1 22; echo $?\", stdout=subprocess.PIPE, shell=True).communicate()[0]).split(\n \"\\\\n\")[0].split(\"'\")[1])\n if port == 1:\n # Port is closed\n return RAG.RED\n elif port == 0:\n # Port is closed\n return RAG.GREEN", "def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))", "def _get_port():\n with TrafficTest._port_lock:\n TrafficTest._port += 1\n return TrafficTest._port", "def status(self):\n response = requests.get(\"http://%s:%d/v1/status\" % (self.propsd_server, self.propsd_port))\n return json.loads(response.text)", "def get_status(self) -> LoopInfo:\n\n info = LoopInfo()\n fcntl.ioctl(self.fd, self.LOOP_GET_STATUS64, info)\n return info", "def counter_get_state(self, port):\n return self.comm('counter_get_state {0}'.format(port)) == '1'", "def get_connection_status():\n command = ['ping', '-c', '2', '-I', '3g-wan', '-W', '1', '8.8.8.8']\n return run_command(command)", "def check_if_port_available_factory(port):\n def check_if_port_available():\n \"\"\"\n Check if a port is in use\n :return bool not_in_use: True if not in use, False if in use\n \"\"\"\n check_port_command = \"netstat -tuna | grep -E \\\"{:d}\\s\\\"\".format(port)\n return not check_nonzero_exit(check_port_command)\n return check_if_port_available", "def check_port_status(self, port):\n # check existing ports dbqp has created\n dbqp_ports = self.check_dbqp_ports()\n if port not in dbqp_ports and not self.is_port_used(port):\n return 1\n else:\n return 0", "def set_port_state(self, port_no, link_up, dp_id=None):\n if dp_id is None:\n dp_id = self.DP_ID\n valve = self.valves_manager.valves[dp_id]\n self.apply_ofmsgs(\n valve.port_status_handler(\n port_no,\n ofp.OFPPR_MODIFY,\n 0 if link_up else ofp.OFPPS_LINK_DOWN,\n [],\n self.mock_time(0),\n ).get(valve, [])\n )\n self.port_expected_status(port_no, 1 if link_up else 0)", "def wallStationStatus(self, link, station):\n\t\tstationHex = hex(0x100 * link + station)[2:]\n\t\tresp = StatusResponse(self.port.command(\"803 \" + stationHex))\n\t\t\n\t\tstatus = []\n\t\tfor val in resp.results[1:]:\n\t\t\tstatus.append( string.atoi(val, 16) )\n\t\treturn status", "def get_status(self) -> NodeManagerStatus:", "def on_port_status(self, evt):\n msg = evt.msg\n dpath = msg.datapath\n port = msg.desc\n reason = msg.reason\n\n _LOG.debug(\"dp_id:%s port:%s reason: %d\", dpath.id, port, reason)\n\n if fibcdbm.dps().get_mode(dpath.id) is None:\n return\n\n def _enter():\n ofp = dpath.ofproto\n return reason != ofp.OFPPR_DELETE\n\n self.send_dp_port_config(dpath, port, _enter())", "def _portInUse(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sck:\n inUse = sck.connect_ex(('localhost', port)) == 0\n logging.debug(f' >>> Port {port} is in use: {inUse} <<<')\n return inUse" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified port link speed now.
def get_port_link_speed(self, port_id): return self.get_detail_from_port_info("Link speed: ", "\d+", port_id)
[ "def _get_speed(self):\n reply = self.query(command = b'/1?37\\r', port = self.port)\n number = reply['value']\n debug('get_speed(): reply = {}, and number = {}'.format(reply,number))\n return reply", "def get_speed(self):\n return float(self.send('speed?'))", "def get_speed(self):\n reply = self._speed\n return reply", "def _get_channel_speed(self):\n return self.__channel_speed", "async def get_laser_scan_speed(self):\n\n return unpack('I', await self._execute_command('#GetLaserScanSpeed').content)[0]", "def get_speed(self):\n if self.speed and self.period:\n return self.speed / 1024\n else:\n return 0", "def get_fan_speed(gpu_number):\n command = 'ethos-smi -g {0} | grep \"* Fan Speed\" | cut -f 5 -d \" \"'.format(gpu_number)\n fan_speed = subprocess.check_output(command, shell=True).decode('utf-8')\n return int(fan_speed[:-2])", "def speed(self):\n\n if \"speed\" not in self.ds:\n var = xroms.speed(self.ds.u, self.ds.v, self.grid, hboundary=\"extend\")\n self.ds[\"speed\"] = var\n return self.ds.speed", "def getSpeedTarget(self):\n return self.__speed_target", "def get_speed(self):\n velocity = self.get_linear_velocity()\n speed = np.linalg.norm(velocity)\n return speed", "def _set_port_speed(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}},), is_leaf=True, yang_name=\"port-speed\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"port_speed must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-if-ethernet:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}},), is_leaf=True, yang_name=\"port-speed\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__port_speed = t\n if hasattr(self, '_set'):\n self._set()", "def _set_port_speed(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}},), is_leaf=True, yang_name=\"port-speed\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"port_speed must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-if-ethernet:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_2500MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_UNKNOWN': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_25GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_1GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_5GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_10GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_50GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'SPEED_40GB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}, 'oc-eth:SPEED_100MB': {'@namespace': 'http://openconfig.net/yang/interfaces/ethernet', '@module': 'openconfig-if-ethernet'}},), is_leaf=True, yang_name=\"port-speed\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__port_speed = t\n if hasattr(self, '_set'):\n self._set()", "def readactspindlespeed(self):\n\t\tst=self._req_rdsingle(1,1,0x25)\n\t\treturn self._decode8(st['data']) if st['len']==8 else None", "def getSpeedOnStep(self, timeStep):\n if self.hasSpeedOnStep(timeStep):\n return self._speed[timeStep]\n else: # default case zero value\n return 0", "def travel_speed(self):\n return self._travel_speed", "def getWheelSpeed(diameter,rps):\n circ=math.pi*diameter\n return rps*circ", "def get_throughput_at_port(self, port):\n\n return self.current_port_throughput[(self.switch_id, port)]", "def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')", "def motor_speed(self, mot):\n microbit.i2c.write(0x10, b'\\x00')\n # 0x10 -> adresse / 4 -> lecture de 4 bytes\n speed_x = microbit.i2c.read(0x10, 4)\n return_speed = -1\n\n # 0 -> G / 1 -> D\n if(mot == self.MG):\n if(round(speed_x[1]) < 20 and round(speed_x[1]) != 0):\n return_speed = round(speed_x[1]) + 255\n else:\n return_speed = round(speed_x[1])\n\n elif(mot == self.MD):\n if(round(speed_x[3]) < 20 and round(speed_x[3]) != 0):\n return_speed = round(speed_x[3]) + 255\n else:\n return_speed = round(speed_x[3])\n\n return return_speed", "def speed2weight(speed):\n sub_str = speed.split('x')\n nlinks = int(sub_str[0])\n s1 = sub_str[1]\n if s1 == 'SDR':\n sp = 2\n color = 'red'\n elif s1 == 'DDR':\n sp = 4\n color = 'red'\n elif s1 == 'QDR':\n sp = 8\n color = 'blue'\n elif s1 == 'FDR10':\n sp = 8 # yep, equivalent to QDR, at best\n color = 'red'\n elif s1 == 'FDR':\n sp = 13.64\n color = 'green'\n elif s1 == 'EDR':\n sp = 24.24\n color = 'green'\n elif s1 == 'HDR':\n sp = 50 # roughly??\n color = 'green'\n elif s1 == 'NDR':\n sp = 100 # roughly?\n color = 'green'\n elif s1 == 'XDR':\n sp = 250 # the future?\n color = 'green'\n else:\n raise NotImplementedError('Support for Infiniband speed {} not implemented'.format(s1))\n return (nlinks * sp, color)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified port link mode, duplex or siplex.
def get_port_link_duplex(self, port_id): return self.get_detail_from_port_info("Link duplex: ", "\S+", port_id)
[ "def get_bond_mode(self, bond_port):\n return self.get_info_from_bond_config(\"Bonding mode: \", \"\\d*\", bond_port)", "def get_port(self) -> str:\n return self.__serial.port", "def get_port():\n port = 0\n if sys.platform.startswith('darwin'):\n port = glob.glob('/dev/tty.usbmodem*')[0]\n elif sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(32)]\n for p in ports:\n try:\n s = serial.Serial(p)\n s.close()\n port = p\n except (OSError, serial.SerialException):\n pass\n return port", "def getPort(self):\n return int(self[SipViaHeader.PARAM_PORT]) if SipViaHeader.PARAM_PORT in self else None", "def _get_port_control(self):\n return self.__port_control", "def action_port_protocol(self, port_line):\r\n\t\treturn appp(self.row, port_line)", "def r_port_obj(self, port):\r\n for switch_obj in self.r_switch_objects():\r\n port_obj = switch_obj.r_port_obj(port)\r\n if port_obj is not None:\r\n return port_obj\r\n return None", "def _get_mode(self, interface):\n url = self._construct_url(interface, suffix='mode')\n response = self._make_request('GET', url)\n root = etree.fromstring(response.text)\n mode = root.find(self._construct_tag('vlan-mode')).text\n return mode", "def port(self):\n return self._val.port or DEFAULT_PORTS.get(self._val.scheme)", "def PortRead():\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n data = gTelnetConn.read()\r\n return data;", "def _get_flexport(self):\n return self.__flexport", "def __pget_wifi_port(self):\n try:\n return self.__cp.getint(SEC, KEY_WIFI_PORT)\n except (ValueError, AttributeError), e:\n log.warning(\"config '%s' malformed (%s)\" % (KEY_WIFI_PORT, e))\n return 34271", "def _get_hardware_port(self):\n return self.__hardware_port", "def get_port_resistor(self, port):\n assert self.PortA <= port <= self.PortC\n read = self._device.readRaw(PI3HDMI336_TOTAL_BYTES)\n if port == self.PortA:\n return int(read[PI3HDMI336_OFFSET_BYTE1] & PI3HDMI336_BYTE1_RT_PORT_A != 0)\n elif port == self.PortB:\n return int(read[PI3HDMI336_OFFSET_BYTE1] & PI3HDMI336_BYTE1_RT_PORT_B != 0)\n else:\n return int(read[PI3HDMI336_OFFSET_BYTE1] & PI3HDMI336_BYTE1_RT_PORT_C != 0)", "def l4_port(port, proto, both=True):\n try:\n name = socket.getservbyport(port, proto)\n if both:\n name = \"{} ({})\".format(name, port)\n except:\n name = str(port)\n return name", "def get_com_port():\n ports = list(serial.tools.list_ports.comports())\n\n #Is list ports empty?\n if not ports:\n logging.critical(\"No Serial Ports found! Exiting now\")\n exit()\n\n #If there is only one port available, automatically use that one\n if len(ports) == 1:\n return ports[0].device\n\n #Display all available ports if there are more than one available\n print(\"Available Ports: \")\n for port in ports:\n print(port)\n return input(\"Enter Xbee Serialport: \")", "def getPortToThe(self, direction):\n # For some odd reason, getPortToThe('whatever').getPortNumber() doesn't\n # work. So getPortNumberToThe('whatever') was created to work around this.\n return self.port_to_the[direction]", "def host_mode(self):\n ret = self._get_attr(\"hostMode\")\n return PortMode(ret)", "def get_matching_multiplex_port(self,name):\n\n # short circuit: if the attribute name already exists return none\n # if name in self._portnames: return None\n # if not len([p for p in self._portnames if name.startswith(p) and name != p]): return None\n\n matching_multiplex_ports = [self.__getattribute__(p) for p in self._portnames \n if name.startswith(p) \n and name != p \n and hasattr(self, p) \n and self.__getattribute__(p).is_multiplex\n ]\n\n for port in matching_multiplex_ports:\n return port\n\n return None", "def get_port(args, default_filename=\"conf/uart_path.txt\"):\n if args.COM != None:\n port = \"COM\" + str(args.COM)\n elif args.ttyUSB != None:\n port = \"/dev/ttyUSB\" + str(args.ttyUSB)\n elif args.ttyS != None:\n port = \"/dev/ttyS\" + str(args.ttyS)\n else:\n port = read_dev_path(default_filename)\n\n return port" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the promiscuous mode of port.
def get_port_promiscuous_mode(self, port_id): return self.get_detail_from_port_info("Promiscuous mode: ", "\S+", port_id)
[ "def get_switching_mode(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 00\"))\n temp = self.board_socket.recv(1024)\n return(temp[3])", "def get_bond_mode(self, bond_port):\n return self.get_info_from_bond_config(\"Bonding mode: \", \"\\d*\", bond_port)", "def promisc_mode_policy(self):\n ret = self._get_attr(\"promiscModePolicy\")\n return NetworkAdapterPromiscModePolicy(ret)", "def _get_mode(self, interface):\n url = self._construct_url(interface, suffix='mode')\n response = self._make_request('GET', url)\n root = etree.fromstring(response.text)\n mode = root.find(self._construct_tag('vlan-mode')).text\n return mode", "async def get_network_ip_mode(self):\n\n return await self._execute_command('#GetNetworkIpMode').content.decode()", "def allow_promiscuous(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_promiscuous\")", "def __get_mode_modem(self, comm):\n try:\n resp=comm.sendatcommand('E0V1')\n return True\n except:\n return False", "def get_inquiry_mode(self):\n\n # save current filter\n old_filter = self._sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14)\n\n # Setup socket filter to receive only events related to the\n # read_inquiry_mode command\n flt = bluez.hci_filter_new()\n opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,\n bluez.OCF_READ_INQUIRY_MODE)\n bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)\n bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE)\n bluez.hci_filter_set_opcode(flt, opcode)\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt)\n\n # first read the current inquiry mode.\n bluez.hci_send_cmd(self._sock, bluez.OGF_HOST_CTL,\n bluez.OCF_READ_INQUIRY_MODE)\n\n pkt = self._sock.recv(255)\n\n status, mode = struct.unpack(\"xxxxxxBB\", pkt)\n if status != 0:\n mode = -1\n\n # restore old filter\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter)\n self._inquiry_mode = mode\n return mode", "def get_mode(self):\r\n command = \":scan:mode?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(3)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = int(answer[:-2])\r\n self.Stat = self.Stat._replace(mode=rlvalue)\r\n return rlvalue", "def get_pin_mode(self, pin):\n return self.pin_config[pin]", "def getBoardMode(self):\n scpiQuery = ':BMOD:SLOT%d:PGRP:MODE? PGRP1' % (self._slotNo, )\n result = self._processQuery(scpiQuery, 'getBoardMode():', self._ontRemote.timeout)\n # remove '_MODE'\n offset = result.find(self._postfix)\n return result[:offset]", "def igmp_mode(self):\n return self.data.get('igmp_mode')", "def host_mode(self):\n ret = self._get_attr(\"hostMode\")\n return PortMode(ret)", "def _get_manufacturing_mode(self):\n try:\n if 'manufacturing_mode' in self.facts:\n return self.facts['manufacturing_mode']\n response = self.config(command_list=[\"show chassis\"]).response()\n fpc_search = re.search('fpc', response)\n manuf_search = re.search('boot -h -m manufacturing', response)\n self.facts['manufacturing_mode'] = bool(response and(fpc_search and manuf_search))\n return self.facts['manufacturing_mode']\n except Exception as exp:\n self.log(level='WARN', message=exp)\n self.log(level='WARN', message=\"Unable to set manufacturing mode attribute\")\n return None", "def get_mode(self):\n return self.mode", "def _get_port_control(self):\n return self.__port_control", "def GetBgpRoutingMode(network):\n return network.get('routingConfig', {}).get('routingMode')", "def transmit_mode(self, mode: Optional[TransmitMode] = None):\n if mode is None:\n return self._remote_mode\n else:\n self._remote_mode = mode\n data = bytearray(bytes([mode]))\n data.append(0x00)\n return self.__do_call(FunctionBytes.TRANSMIT, data)", "def get_byte_mode(self, ):\n return 4 - (self.read_mdr1()[0] & 0x03)", "def stream_mode(self) -> 'StreamModeDetailsStreamMode':\n return pulumi.get(self, \"stream_mode\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the allmulticast mode of port.
def get_port_allmulticast_mode(self, port_id): return self.get_detail_from_port_info("Allmulticast mode: ", "\S+", port_id)
[ "def igmp_mode(self):\n return self.data.get('igmp_mode')", "def _get_port_group(self):\n return self.__port_group", "def read_all_channels(self):\n data = SPI.command(CMD_ANA_GETALL, returned=2)\n return [\n data[0] << 8 | data[1], data[2] << 8 | data[3], data[4] << 8 |\n data[5], data[6] << 8 | data[7]\n ]", "def GetAllMappedPorts(self):\n return self._port_mappings", "def get_ports(self):\n raise NotImplementedError() #pragma: no cover", "def test_ipv6network_is_multicast(self):\n n = 10**4\n args = ['1:2:3:4:5:6::/96', 'ff00::/8']\n for arg in args:\n net = ip.IPv6Network(arg)\n time1, result1 = timefn(n, lambda: net.is_multicast)\n enet = eip.IPv6Network(arg)\n time2, result2 = timefn(n, lambda: enet.is_multicast)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, arg)", "def getBoardMode(self):\n scpiQuery = ':BMOD:SLOT%d:PGRP:MODE? PGRP1' % (self._slotNo, )\n result = self._processQuery(scpiQuery, 'getBoardMode():', self._ontRemote.timeout)\n # remove '_MODE'\n offset = result.find(self._postfix)\n return result[:offset]", "def get_bond_mode(self, bond_port):\n return self.get_info_from_bond_config(\"Bonding mode: \", \"\\d*\", bond_port)", "def ports(self):\n return self.__ports[:]", "def get_outports( self ):\n return self._outports", "def get_ports( self, preserve_hierarchy=False ):\n if not preserve_hierarchy:\n return self._inports + self._outports\n else:\n return self._hports", "def FilterIpV4MulticastVpn(self):\n return self._get_attribute('filterIpV4MulticastVpn')", "def FilterIpV6MulticastVpn(self):\n return self._get_attribute('filterIpV6MulticastVpn')", "def requires_port(self):\n return self in {self.__class__.UDP, self.__class__.TCP}", "def get_inports( self ):\n return self._inports", "def get_all_channels(self):\r\n return self.all()", "def fan_modes(self) -> list[str] | None:\n if self.device_data.fan_modes:\n return self.device_data.fan_modes\n return None", "def get_switching_mode(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 00\"))\n temp = self.board_socket.recv(1024)\n return(temp[3])", "def FilterIpv4MulticastBgpMplsVpn(self):\n return self._get_attribute('filterIpv4MulticastBgpMplsVpn')", "def link_wcmmodes(self) -> ConfigNodePropertyArray:\n return self._link_wcmmodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get info by executing the command "show bonding config".
def get_info_from_bond_config(self, key_str, regx_str, bond_port): out = self.dut.send_expect("show bonding config %d" % bond_port, "testpmd> ") find_value = self.get_value_from_str(key_str, regx_str, out) return find_value
[ "def view_conf() -> None:\n print(Config.get_conf())", "def get_config(switchname, username, password):\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=switchname, username=username, password=password)\n command = 'cfgshow\\n'\n stdin, stdout, stderr = client.exec_command(command)\n config = stdout.read()\n\n return config", "def cmd_info(self):\n self.send(Command.from_attr(Command.INFO))\n self.bootinfo = BootInfo(self.recv_data())\n return self.bootinfo", "def GetBridgeInfoFromConf():\n bridges = {}\n with open('/usr/local/bluedon/www/cache/waf_bridge.conf', 'r') as f:\n for line in f.readlines():\n bridgeInfo = line.strip().split() # br0 vEth0,vEth1 num\n if len(bridgeInfo) == 3:\n bridges[bridgeInfo[0]] = [bridgeInfo[1]]\n return bridges", "def show_config(ctx):\n\n click.echo(json.dumps(ctx.obj.config, indent=4, separators=(',', ': '), cls=JSONEncoder))", "def show_bios_configuration(ctx, profile, configuration):\n\n bios_recipe = BIOSRecipe(ctx.obj['client'])\n config_data = bios_recipe.get_selected_configuration(configuration, profile=profile)\n print(json.dumps(config_data, indent=4, sort_keys=True))", "def config_list():\n click.echo(Config())", "def help_config(self):\n print(help_msg.cmds['config'])", "def getConfigInfo(self):\n return [(key, self.config[key][1]) for key in list(self.config.keys())]", "def get_bdev_info(self):\n targets = self.server_managers[-1].get_config_value('targets')\n bdev_tiers = 0\n bdev_info = []\n for engine in self.server_managers[-1].manager.job.yaml.engine_params:\n for index, tier in enumerate(engine.storage.storage_tiers):\n if tier.storage_class.value == 'nvme':\n bdev_tiers += 1\n for item, device in enumerate(tier.bdev_list.value):\n bdev_info.append(\n {'bdev': device,\n 'roles': ','.join(tier.bdev_roles.value or []),\n 'tier': index,\n 'tgt_ids': list(range(item, targets, len(tier.bdev_list.value)))})\n\n self.log.info('Detected NVMe devices in config')\n for bdev in bdev_info:\n self.log.info(' %s', dict_to_str(bdev, items_joiner=':'))\n return bdev_info", "def get_config():\n profiles = {}\n curr = None\n\n cmd = [\"netsh\", \"advfirewall\", \"show\", \"allprofiles\"]\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False, ignore_retcode=True)\n if ret[\"retcode\"] != 0:\n raise CommandExecutionError(ret[\"stdout\"])\n\n # There may be some problems with this depending on how `netsh` is localized\n # It's looking for lines that contain `Profile Settings` or start with\n # `State` which may be different in different localizations\n for line in ret[\"stdout\"].splitlines():\n if not curr:\n tmp = re.search(\"(.*) Profile Settings:\", line)\n if tmp:\n curr = tmp.group(1)\n elif line.startswith(\"State\"):\n profiles[curr] = line.split()[1] == \"ON\"\n curr = None\n\n return profiles", "def _device_info(self) -> dict:\n response = self._send(\"getStatus\")\n return self._json_decode(response)", "def get_sdr_info():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command sdr info\")\n result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)\n\n return result", "def get_config(self) -> None:\n body = Helpers.req_body(self.manager, 'devicedetail')\n body['method'] = 'configurations'\n body['uuid'] = self.uuid\n\n r, _ = Helpers.call_api(\n '/131airpurifier/v1/device/configurations',\n 'post',\n headers=Helpers.req_headers(self.manager),\n json_object=body,\n )\n\n if r is not None and Helpers.code_check(r):\n self.config = Helpers.build_config_dict(r)\n else:\n logger.debug('Unable to get config info for %s',\n self.device_name)", "def load_cli_configuration():\n parser = ArgumentParser()\n parser.add_argument(\"device\", type=str,\n help=\"bluetooth address of the Onewheel to connect to\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"display debug level logging\")\n args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)\n return args.device", "def get_card_info(self,device):\n cmd = \"vgc-monitor -d %s | grep \\\"Card Info\\\"\"%device\n o = self.run_command_chk_rc(cmd)\n out = o['output'][1]\n out_a = out.split(\":\")\n\n return out_a[1].strip()", "def getConfigInfo(self):\n\n cacheUrl = getattr(self.data.application.configuration, 'configCacheUrl', None)\n cacheDb = getattr(self.data.application.configuration, 'cacheName', None)\n configId = getattr(self.data.application.configuration, 'configId', None)\n\n return cacheUrl, cacheDb, configId", "def getconfig(ctx):\n \"\"\"and block drives that are currently connected to the node.\"\"\"\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n\n \n\n cli_utils.establish_connection(ctx)\n \n\n \n\n ctx.logger.info(\"\"\": \"\"\"+\"\"\";\"\"\"+\"\")\n try:\n _GetDriveConfigResult = ctx.element.get_drive_config()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n if ctx.json:\n print(simplejson.dumps(simplejson.loads(_GetDriveConfigResult), indent=4))\n return\n else:\n cli_utils.print_result(_GetDriveConfigResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def _fetch_ipconfig_infomation():\n \n # Launch up a shell, get the feedback\n process = portable_popen.Popen([\"ipconfig\", \"/all\"])\n\n # Get the output\n outputdata = process.stdout.readlines()\n \n # Close the pipe\n process.stdout.close()\n \n # Stores the info\n info_dict = {}\n \n # Store the current container\n current_container = None\n \n # Process each line\n for line in outputdata:\n # Strip unwanted characters\n line = line.strip(\"\\r\\n\")\n \n # Check if this line is blank, skip it\n if line.strip() == \"\":\n continue\n \n # This is a top-level line if it does not start with a space\n if not line.startswith(\" \"):\n # Do some cleanup\n line = line.strip(\" :\")\n \n # Check if this exists in the top return dictionary, if not add it\n if line not in info_dict:\n info_dict[line] = {}\n \n # Set the current container\n current_container = line\n \n # Otherwise, this line just contains some information\n else:\n # Check if we are in a container\n if not current_container:\n continue\n \n # Cleanup\n line = line.strip()\n line = line.replace(\". \", \"\")\n \n # Explode on the colon\n (key, value) = line.split(\":\",1)\n \n # More cleanup\n key = key.strip()\n value = value.strip()\n \n # Store this\n info_dict[current_container][key] = value\n \n # Return everything\n return info_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the mode of the bonding device which you choose.
def get_bond_mode(self, bond_port): return self.get_info_from_bond_config("Bonding mode: ", "\d*", bond_port)
[ "def get_mode(self):\n return self.mode", "def get_mode(self,):\n return self.current_mode", "def get_bonding_type(bondname):\n mode = -1\n try:\n with open('/sys/class/net/%s/bonding/mode' % bondname, 'r') as f:\n lines = f.readlines()\n if lines:\n comps = lines[0].strip().split()\n if comps and len(comps) == 2:\n mode_str = comps[1].strip()\n mode = int(mode_str)\n if mode == -1:\n with open('/etc/modprobe.d/bonding.conf', 'r') as f:\n for line in f:\n if bondname not in line:\n continue\n fields = line.strip().split()\n if fields and fields[0].lower() == 'options':\n r = re.match('[\\s\\S]*mode=([0-9])', line.strip())\n if r:\n mode_str = r.groups()[0]\n if mode_str:\n mode = int(mode_str)\n except Exception, e:\n # print e\n return mode, 'Error retrieving bonding type : %s' % str(e)\n else:\n return mode, None", "def _get_mode(self, interface):\n url = self._construct_url(interface, suffix='mode')\n response = self._make_request('GET', url)\n root = etree.fromstring(response.text)\n mode = root.find(self._construct_tag('vlan-mode')).text\n return mode", "def _get_manufacturing_mode(self):\n try:\n if 'manufacturing_mode' in self.facts:\n return self.facts['manufacturing_mode']\n response = self.config(command_list=[\"show chassis\"]).response()\n fpc_search = re.search('fpc', response)\n manuf_search = re.search('boot -h -m manufacturing', response)\n self.facts['manufacturing_mode'] = bool(response and(fpc_search and manuf_search))\n return self.facts['manufacturing_mode']\n except Exception as exp:\n self.log(level='WARN', message=exp)\n self.log(level='WARN', message=\"Unable to set manufacturing mode attribute\")\n return None", "def get_boot_mode():\n kernel = ctypes.windll.kernel32\n firmware_type = ctypes.c_uint()\n\n # Get value from kernel32 API\n try:\n kernel.GetFirmwareType(ctypes.byref(firmware_type))\n except:\n # Just set to zero\n firmware_type = ctypes.c_uint(0)\n\n # Set return value\n type_str = 'Unknown'\n if firmware_type.value == 1:\n type_str = 'Legacy'\n elif firmware_type.value == 2:\n type_str = 'UEFI'\n\n return type_str", "def __get_mode_modem(self, comm):\n try:\n resp=comm.sendatcommand('E0V1')\n return True\n except:\n return False", "def getRSelMode(self,targetDevice):\n if (targetDevice in self.adc_based_acquisition):\n return \"e5x\"\n elif (targetDevice in [\"SAML22\"]):\n return \"l22\"\n elif (targetDevice in [\"PIC32CZCA80\", \"PIC32CZCA90\"]):\n return \"pic32cz\"\n else:\n return \"std\"", "def get_switching_mode(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 00\"))\n temp = self.board_socket.recv(1024)\n return(temp[3])", "def mode(self) -> GameMode:\n return self._game.mode", "def get_inquiry_mode(self):\n\n # save current filter\n old_filter = self._sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14)\n\n # Setup socket filter to receive only events related to the\n # read_inquiry_mode command\n flt = bluez.hci_filter_new()\n opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,\n bluez.OCF_READ_INQUIRY_MODE)\n bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)\n bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE)\n bluez.hci_filter_set_opcode(flt, opcode)\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt)\n\n # first read the current inquiry mode.\n bluez.hci_send_cmd(self._sock, bluez.OGF_HOST_CTL,\n bluez.OCF_READ_INQUIRY_MODE)\n\n pkt = self._sock.recv(255)\n\n status, mode = struct.unpack(\"xxxxxxBB\", pkt)\n if status != 0:\n mode = -1\n\n # restore old filter\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter)\n self._inquiry_mode = mode\n return mode", "def get_mode_type(self, mode: str) -> str:\n for letter, modes in self.chanmodes.items():\n if mode in modes:\n return letter\n raise ModeTypeUnknown(mode)", "def robo_mode(self) -> str:\n return pulumi.get(self, \"robo_mode\")", "def mode(self):\n # type: () -> SrtMode\n return self._mode", "def getBoardMode(self):\n scpiQuery = ':BMOD:SLOT%d:PGRP:MODE? PGRP1' % (self._slotNo, )\n result = self._processQuery(scpiQuery, 'getBoardMode():', self._ontRemote.timeout)\n # remove '_MODE'\n offset = result.find(self._postfix)\n return result[:offset]", "def get_mode(self):\r\n command = \":scan:mode?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(3)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = int(answer[:-2])\r\n self.Stat = self.Stat._replace(mode=rlvalue)\r\n return rlvalue", "def _get_appearance_mode(self) -> str:\n if self.__appearance_mode == 0:\n return \"light\"\n else:\n return \"dark\"", "def get_mode(idx):\n return MySQLServer.SERVER_MODE[idx]", "def read_operation_mode():\n\n debug(\"Reading operation mode...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00101\"))\n operation_state = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n info(\"Operation mode was succesfully read!\")\n return int(operation_state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the balance transmit policy of bonding device.
def get_bond_balance_policy(self, bond_port): return self.get_info_from_bond_config("Balance Xmit Policy: ", "\S+", bond_port)
[ "def eth_adapter_policy(self):\n return self._eth_adapter_policy", "def wallet(self):\n\t\tdat = self.conn.call('GET', '/api/wallet/').json()\n\t\tbalance = float(dat['data']['total']['balance'])\n\t\tsendable = float(dat['data']['total']['sendable'])\n\t\treturn (balance, sendable)", "def set_balance_policy_for_bonding_device(self, bond_port, policy):\n self.dut.send_expect(\"set bonding balance_xmit_policy %d %s\" % (bond_port, policy), \"testpmd> \")\n new_policy = self.get_bond_balance_policy(bond_port)\n policy = \"BALANCE_XMIT_POLICY_LAYER\" + policy.lstrip('l')\n self.verify(new_policy == policy, \"Set bonding balance policy failed\")", "def get_transmit_power(self):\n (status, power) = self.__device.get_transmit_power()\n self.__device.decode_error_status(status, cmd='get_transmit_power', print_on_error=True)\n return \"%d dBm\" % (power)", "def request_balance(self):\n req = \"x\" + json.dumps({\"identifier\": self.pubkey})\n replies = self.broadcast_request(req)\n return int(SPVClient._process_replies(replies))", "def get_balance(self):\n balance_per_dev = self._build_balance_per_dev()\n return max(abs(b) for b in balance_per_dev.values())", "def bandwidth_share_level(self) -> Optional[str]:\n return pulumi.get(self, \"bandwidth_share_level\")", "def __get_policy__(self, agent):\n msg = comm.RequestPolicyMessage(agent.agent_id)\n reply_msg = agent.communicate(msg)\n return reply_msg.policy", "def policy(self) -> pulumi.Output['outputs.BlobInventoryPolicySchemaResponse']:\n return pulumi.get(self, \"policy\")", "def eth_network_policy(self):\n return self._eth_network_policy", "def getBalanceAdjustment(self) -> \"float\":\n return _coin.SoCamera_getBalanceAdjustment(self)", "def get_connection_throttle(self, loadbalancer):\r\n return loadbalancer.get_connection_throttle()", "def get_balance(self):\n if self.available:\n return self.total_amount\n else:\n raise ValueError('This bank account is closed')", "def eth_qos_policy(self):\n return self._eth_qos_policy", "def get_buy_amount(self):\r\n return self.balance / 3", "def get_asset_balance(self):\n return self.client.get_asset_balance(asset)", "def Tx(self):\n return self.P[0, 3]", "def getClientBalance(self, client, bot_config):\n currency = str(bot_config['currency'])\n pair = currency[len(currency)-4:len(currency)]\n if(pair == 'USDT'):\n balance = client.get_asset_balance(asset='USDT')\n else:\n balance = client.get_asset_balance(asset='BTC')\n return balance['free']", "def get_account_balance(self):\n return self.mtc.get_account_balance()['AvailableBalance']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the active slaves of the bonding device which you choose.
def get_bond_active_slaves(self, bond_port): try: return self.get_info_from_bond_config("Active Slaves \(\d\): \[", "\d*( \d*)*", bond_port) except Exception as e: return self.get_info_from_bond_config("Acitve Slaves: \[", "\d*( \d*)*", bond_port)
[ "def get_slaves(self, name):\n slaves = self.execute('SENTINEL', 'slaves', name)\n result = []\n for slave in slaves:\n result.append(dict_from_list(slave))\n return result", "def get_slave_list (bus=\"wisbone\"):\n\tslave_list = []\n\treturn slave_list", "def get_slaves_manager(bus, slave_book):\n return [\n bus.get(con_name, path) for con_name, path\n in slave_book.GetSlavesLocation()\n ]", "def GetActiveMaster():\n hostname = socket.getfqdn().split('.', 1)[0].lower()\n for master in chromium_utils.ListMasters():\n path = os.path.join(master, 'slaves.cfg')\n if os.path.exists(path):\n for slave in chromium_utils.RunSlavesCfg(path):\n if slave.get('hostname', None) == hostname:\n return slave['master']", "def getSlaves(self, name):\n try:\n return self.environments[name]['slaves']\n except KeyError:\n raise AssertionError('No such environment %s' % name)", "def GetSlaves(self, slave_filter=None):\n if slave_filter is None:\n return self._slaves[:]\n else:\n return [s for s in self._slaves if slave_filter(s)]", "def getReplicatedAgents(self):\n return self.session.request('replicationcomms/slave/agents')", "def get_master_slaves(server, options=None):\n if options is None:\n options = {}\n options[\"columns\"] = True\n options[\"raw\"] = False\n return server.exec_stmt(\"SHOW SLAVE HOSTS\", options)", "def slave_group_ids(self):\n return self.fetch_slave_group_ids()", "def cli(ctx, list_slaves, slave_id):\n if list_slaves is True:\n print(\" HOSTNAME ID\")\n for slave in ctx.slave_data[\"slaves\"]:\n print(SLAVE_STRING.format(agent_id=slave[\"id\"], hostname=slave[\"hostname\"]))\n return\n\n if slave_id is None:\n print(json.dumps(ctx.slave_data))\n return\n else:\n for slave in ctx.slave_data[\"slaves\"]:\n if slave[\"id\"] == slave_id:\n print(json.dumps(slave))\n break\n else:\n continue\n return\n\n return", "def getRemoteMasterCoresForDemand(self):\n return self.session.request('replicationcomms/slave/cores/masters')", "def rpc_list_bots(self, sender, *args):\n \n if (len(args) != 0):\n raise rpc.RPCFault(604, 'list_bots: no arguments')\n ls = [ act.jid for act in self.factory.actors.values() ]\n return ls", "def multiroom_info(self) -> dict:\n self._logger.info(\"Retrieving multiroom master and slaves of this device, if any...\")\n self._logger.debug(\"Retrieving master information...\")\n try:\n master_info = {'status': 'slave', 'master': {'ip': self._device_info()['master_ip']}}\n except KeyError:\n master_info = {'status': 'master'}\n self._logger.debug(\"Retrieving slave information...\")\n response = self._send(\"multiroom:getSlaveList\")\n slave_info = self._json_decode(response)\n master_info.update(slave_info)\n return master_info", "def _nextSlave(self, builder, slaves):\n request = builder.current_builder_request\n target_name = request.properties.getProperty('target-slave')\n\n if target_name:\n # See if we have the requested slave.\n for slave_builder in slaves:\n if slave_builder.slave.slavename == target_name:\n return slave_builder\n\n for slave_builder in slaves:\n if slave_builder.slave.canStartBuild():\n return slave_builder\n\n return random.choice(slaves)", "def test_report_active_slave(self, bond):\n mode = self.hosts_nets_nic_dict.get(0).get(bond).get(\"mode\")\n testflow.step(\n \"Check that the active slave name bond %s mode %s that reported \"\n \"via engine match to the active slave name on the host\", bond, mode\n )\n assert helper.compare_active_slave_from_host_to_engine(\n bond=bond\n ), (\n \"Active slave name bond %s mode %s that reported via engine \"\n \"isn't match to the active slave name on the host\" % (bond, mode)\n )", "def GetSlavesForMaster(self, master_config, options=None,\n important_only=True, active_only=True):\n slave_map = self.GetSlaveConfigMapForMaster(\n master_config, options=options, important_only=important_only,\n active_only=active_only)\n return slave_map.values()", "def get_slave_status(server, options=None):\n if options is None:\n options = {}\n options[\"columns\"] = True\n options[\"raw\"] = False\n return server.exec_stmt(\"SHOW SLAVE STATUS\", options)", "def test_get_slave_status():\n _test_call(mysql.get_slave_status, \"SHOW SLAVE STATUS\")", "def get_active_opflex_hosts(self):\n cmd1 = \"neutron agent-list | grep OpFlex | grep ':-)'\"\n cmd2 = \" | awk -F'|' '{print $4}'\"\n cmd = self.KEY + cmd1 + cmd2\n hosts = subprocess.check_output(['bash','-c', cmd])\n all_hosts = [host.strip() for host in hosts.split(\"\\n\") if host]\n return all_hosts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch the testpmd app with the command parameters.
def launch_app(self, pmd_param=" "): self.pmdout.start_testpmd("all", param=pmd_param)
[ "def run_tests_from_commandline():\n import maya.standalone\n maya.standalone.initialize()\n run_tests()", "def test_platform_args(self):\n self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa'], extensions=['scilifelab.pm.ext.ext_distributed'])\n handler.register(ProductionController)\n self._run_app()\n os.chdir(filedir)", "def test_one_main(self) -> None:\n args = [\"pony_gp.py\", \"--config=configs.ini\"]\n sys.argv = args\n pony_gp.main()\n self.assertTrue(True)", "def start(self):\n self._logger.info(\"Starting TestPMD...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n self._testpmd.send('port start all', 1)\n\n for port in range(self._nports):\n self._testpmd.send('csum set {} {} {}'.format(\n self._csum_layer, self._csum_calc, port), 1)\n self._testpmd.send('csum parse_tunnel {} {}'.format(\n self._csum_tunnel, port), 1)\n\n self._testpmd.send('start', 1)", "def main():\n # Read command line arguments\n args = get_input_arguments()\n # Unpack dictionary into keyword arguments\n # Unused arguments should be ignored silently.\n ppn.run(**args)", "def run(args):\n _set_development_path()\n from mabot import run\n run(args)", "def start_for_guest(self):\n self._logger.info(\"Starting TestPMD for one guest...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n # conflicting info if scatter needs to be enabled or not\n self._testpmd.send('port config all scatter on', 1)\n self._testpmd.send('port start all', 1)\n self._testpmd.wait(timeout=60) # port startup can take a few seconds\n\n self._testpmd.send('set portlist 0,2,1,3', 1)\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n self._testpmd.send('start', 1)", "def main():\n # set up the program to take in arguments from the command line", "def testdoc_cli(arguments):\n TestDoc().execute_cli(arguments)", "def launcher_command(self):\n return 'aprun'", "def report_viewer_app():\n\n cmd = 'cd BenchmarkDB && python app.py'\n\n run(cmd)", "def run() -> None:\n if len(sys.argv) < 2:\n _print_and_quit(\n \"Did not understand command.\\nUsage: ac <app> <command> [paramaters]\"\n )\n app = sys.argv[1]\n run_command(app, sys.argv[2:])", "def main():\n config.version = __version__\n noArgs = len(sys.argv)\n if noArgs == 1:\n guiLaunch()\n else:\n cliLaunch()", "def main():\n check_result = check_process_from_server(cmdline_part=CMDLINE_PART)\n if check_result is False:\n logger.info(\"Launching KPI-App ...\")\n try:\n subprocess.call([r\"C:\\Projects\\kpi_app\\app_launch.bat\"])\n except Exception as e:\n logging.error(e)\n finally:\n logger.info(\"Exiting checker ...\")\n time.sleep(2)\n sys.exit()\n else:\n logger.info(\"Exiting checker ...\")\n time.sleep(2)\n sys.exit()", "def __launch_management_tool() -> None:\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"manager.settings\")\n execute_from_command_line(sys.argv)", "def launch(self):\n self.processdev.start()\n pid = self.processdev.pid\n p = psutil.Process(self.processdev.pid)\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print(str(pid) + \"est le pid\")", "def main():\n\n args = parseArgs()\n\n vm = VMBuilder(args)\n\n if vm.args.command == 'list_disk_pools':\n print(vm.getDiskPools())\n elif vm.args.command == 'list_pool_volumes':\n print(vm.getDiskPoolVolumes())\n elif vm.args.command == 'create_vm':\n logging.debug(\"about to run vm.getbuild.createvm\")\n vm.verifyMinimumCreateVMArgs()\n vm.getBuild().createVM()\n else:\n logging.critical(\"The command you entered is not recognized.\")", "def testTargetParams(self):\n expected_output = (\n 'python starboard/tools/example/app_launcher_client.py'\n ' --platform MY_PLATFORM --config MY_CONFIG'\n ' --target_params=\"--url=http://my.server.test\"')\n argv = ['--target_params', '\"--url=http://my.server.test\"']\n cmd_str = run._ResolveTrampoline(argv=argv)\n self.assertEqual(expected_output, cmd_str)", "def test_forwarder_cli(start_forwarder):\n sys.argv = sys.argv[:1]\n sys.argv += [\"forwarder\"]\n main()\n sys.argv = sys.argv[:1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a bonding device with the parameters you specified.
def create_bonded_device(self, mode=0, socket=0, verify_detail=False): out = self.dut.send_expect("create bonded device %d %d" % (mode, socket), "testpmd> ") self.verify("Created new bonded device" in out, "Create bonded device on mode [%d] socket [%d] failed" % (mode, socket)) bond_port = self.get_value_from_str("Created new bonded device net_bond_testpmd_[\d] on \(port ", "\d+", out) bond_port = int(bond_port) if verify_detail: out = self.dut.send_expect("show bonding config %d" % bond_port, "testpmd> ") self.verify("Bonding mode: %d" % mode in out, "Bonding mode display error when create bonded device") self.verify("Slaves: []" in out, "Slaves display error when create bonded device") self.verify("Active Slaves: []" in out, "Active Slaves display error when create bonded device") self.verify("Primary: []" not in out, "Primary display error when create bonded device") out = self.dut.send_expect("show port info %d" % bond_port, "testpmd> ") self.verify("Connect to socket: %d" % socket in out, "Bonding port connect socket error") self.verify("Link status: down" in out, "Bonding port default link status error") self.verify("Link speed: 0 Mbps" in out, "Bonding port default link speed error") return bond_port
[ "def create(self, obj: Device):\n raise BanyanError('devices cannot be created via the API')", "def create_device(device):\n return FoobotDevice(auth_header=self.auth_header,\n user_id=device['userId'],\n uuid=device['uuid'],\n name=device['name'],\n mac=device['mac'], base_url=self.BASE_URL)", "def create_dhcp_ovs_bridge():\n http_controller = config_dic['http_threads'][threading.current_thread().name]\n dhcp_controller = http_controller.ovim.get_dhcp_controller()\n\n dhcp_controller.create_ovs_bridge()", "def new_device(mac, ip, name):\n return Device(mac, ip, name)", "def _create_device(device, host, username, password):\n module_name = 'acts.controllers.pdu_lib.' + device\n module = importlib.import_module(module_name)\n return module.PduDevice(host, username, password)", "def make_discoverable(self, duration=30):\n self.bt_device = dbus.Interface(self.bus.get_object(\"org.bluez\", \"/org/bluez/hci0\"),\n \"org.freedesktop.DBus.Properties\")\n # Check if the device is already in discoverable mode and if not then set a short discoverable period\n self.discoverable_status = self.bt_device.Get(\"org.bluez.Adapter1\", \"Discoverable\")\n if self.discoverable_status == 0:\n \"\"\"\n Agents manager the bt pairing process. Registering the NoInputNoOutput agent means now authentication from \n the RPi is required to pair with it.\n \"\"\"\n print(\"Placing the RPi into discoverable mode and turn pairing on\")\n print(f\"Discoverable for {duration} seconds only\")\n\n\n # Setup discoverability\n self.bt_device.Set(\"org.bluez.Adapter1\", \"DiscoverableTimeout\", dbus.UInt32(duration))\n self.bt_device.Set(\"org.bluez.Adapter1\", \"Discoverable\", True)\n self.bt_device.Set(\"org.bluez.Adapter1\", \"PairableTimeout\", dbus.UInt32(duration))\n self.bt_device.Set(\"org.bluez.Adapter1\", \"Pairable\", True)", "def create_bond_on_vm(vm_name, vm_resource, vnics, mode=1, proto=\"auto\"):\n bond = \"bond1\"\n remove_nm_controlled_cmd = (\n \"sed -i /NM_CONTROLLED/d /etc/sysconfig/network-scripts/ifcfg-{\"\n \"interface}\"\n )\n active_interface = vm_resource.network.get_info().get(\"interface\")\n assert not vm_resource.run_command(\n command=shlex.split(remove_nm_controlled_cmd.format(\n interface=active_interface)\n )\n )[0]\n assert not vm_resource.run_command(\n command=shlex.split(\"nmcli connection reload\")\n )[0]\n\n secondary_interface = \"System\\ {active_interface}\".format(\n active_interface=active_interface\n )\n primary_interface = hl_networks.get_vm_interface_by_vnic(\n vm=vm_name, vm_resource=vm_resource, vnic=vnics[0]\n )\n\n # Create connection in NM for the new interface\n nmcli_add_con = [\n \"nmcli connection add type ethernet con-name {primary_interface_1} \"\n \"ifname {primary_interface_2}\".format(\n primary_interface_1=primary_interface,\n primary_interface_2=primary_interface\n ),\n \"nmcli connection modify id {primary_interface} ipv4.method disabled\"\n \" ipv6.method ignore\".format(primary_interface=primary_interface),\n ]\n assert not all(\n [\n vm_resource.run_command(\n command=shlex.split(cmd))[0] for cmd in\n nmcli_add_con\n ]\n )\n\n # Create BOND\n create_bond_cmds = [\n \"nmcli connection add type bond con-name {bond} ifname \"\n \"bond1 mode {mode} {primary}\".format(\n bond=bond, mode=mode, primary=\"primary {primary_interface}\".format(\n primary_interface=primary_interface\n ) if mode == 1 else \"\"\n ),\n \"nmcli connection modify id {bond} ipv4.method {proto} \"\n \"ipv6.method ignore\".format(bond=bond, proto=proto)\n ]\n assert not all(\n [\n vm_resource.run_command(\n command=shlex.split(cmd))[0] for cmd in\n create_bond_cmds\n ]\n )\n\n # Add the slaves to the BOND\n for inter in primary_interface, secondary_interface:\n nmcli_add_slave = (\n \"nmcli connection modify id {inter} connection.slave-type \"\n \"bond connection.master {bond} connection.autoconnect \"\n \"yes\".format(bond=bond, inter=inter)\n )\n assert not vm_resource.run_command(\n command=shlex.split(nmcli_add_slave)\n )[0]\n\n # Deactivate all connection and activate again to get the new configuration\n nmcli_up_cmd = (\n \"nmcli connection down {primary_interface_1};\"\n \"nmcli connection down {secondary_interface_1};\"\n \"nmcli connection down {bond_1};\"\n \"nmcli connection up {bond_2};\"\n \"nmcli connection up {primary_interface_2};\"\n \"nmcli connection up {secondary_interface_2}\"\n ).format(\n primary_interface_1=primary_interface,\n secondary_interface_1=secondary_interface,\n bond_1=bond,\n bond_2=bond,\n primary_interface_2=primary_interface,\n secondary_interface_2=secondary_interface\n )\n try:\n vm_resource.run_command(\n command=shlex.split(\n nmcli_up_cmd\n ), tcp_timeout=10, io_timeout=10\n )\n except socket.timeout:\n pass", "def device_create(info):\r\n\r\n device_id = info[\"id\"]\r\n device_status = get_device_status(device_id)\r\n\r\n if device_status == \"new\":\r\n LOGGER.info(f\"Device create message received from {device_id}\")\r\n session = get_session()\r\n # check if a message has been recieved already\r\n saved_message = (\r\n session.query(Message)\r\n .filter(\r\n (Message.source == device_id) & (Message.classification == \"create\")\r\n )\r\n .first()\r\n )\r\n # if not, create a new message\r\n if not saved_message:\r\n saved_message = Message(device_id, \"server\", \"create\")\r\n session.add(saved_message)\r\n saved_message.payload = pickle.dumps(info)\r\n saved_message.set_datetime(valid_to=timedelta(minutes=30))\r\n session.commit()\r\n session.close()\r\n else:\r\n LOGGER.error(\r\n f\"create message received from device {device_id} which is not connected\"\r\n )", "def vpp_create_bond_interface(\n node, mode, load_balance=None, mac=None, gso=False):\n cmd = u\"bond_create2\"\n args = dict(\n id=int(Constants.BITWISE_NON_ZERO),\n use_custom_mac=bool(mac is not None),\n mac_address=L2Util.mac_to_bin(mac) if mac else None,\n mode=getattr(\n LinkBondMode,\n f\"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}\"\n ).value,\n lb=0 if load_balance is None else getattr(\n LinkBondLoadBalanceAlgo,\n f\"BOND_API_LB_ALGO_{load_balance.upper()}\"\n ).value,\n numa_only=False,\n enable_gso=gso\n )\n err_msg = f\"Failed to create bond interface on host {node[u'host']}\"\n with PapiSocketExecutor(node) as papi_exec:\n sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)\n\n InterfaceUtil.add_eth_interface(\n node, sw_if_index=sw_if_index, ifc_pfx=u\"eth_bond\"\n )\n if_key = Topology.get_interface_by_sw_index(node, sw_if_index)\n\n return if_key", "def setup_bd(client, conn_mgr):\n sess_hdl = conn_mgr.client_init()\n dev_tgt = DevTarget_t(0, hex_to_i16(0xffff))\n ifindices = [1, 2, 65]\n\n for ifindex in ifindices:\n action_spec = dc_set_bd_properties_action_spec_t(\n action_bd=0,\n action_vrf=0,\n action_rmac_group=0,\n action_bd_label=0,\n action_ipv4_unicast_enabled=True,\n action_ipv6_unicast_enabled=False,\n action_ipv4_multicast_enabled=False,\n action_ipv6_multicast_enabled=False,\n action_igmp_snooping_enabled=0,\n action_mld_snooping_enabled=0,\n action_ipv4_urpf_mode=0,\n action_ipv6_urpf_mode=0,\n action_stp_group=0,\n action_mrpf_group=0,\n action_ipv4_mcast_key_type=0,\n action_ipv4_mcast_key=0,\n action_ipv6_mcast_key_type=0,\n action_ipv6_mcast_key=0,\n action_stats_idx=0,\n action_learning_enabled=0)\n \n mbr_hdl = client.bd_action_profile_add_member_with_set_bd_properties(\n sess_hdl, dev_tgt,\n action_spec)\n match_spec = dc_port_vlan_mapping_match_spec_t(\n ingress_metadata_ifindex=ifindex,\n vlan_tag__0__valid=0,\n vlan_tag__0__vid=0,\n vlan_tag__1__valid=0,\n vlan_tag__1__vid=0)\n client.port_vlan_mapping_add_entry(\n sess_hdl, dev_tgt,\n match_spec, mbr_hdl)", "def create_device_action(self, netsim, device):\n self.log.info('Creating new netsim network with device ', device)\n response = None\n while True:\n # Create the network\n create_response = netsim.create_device(device)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config(device)\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def create_single_drive(data_dir, device):\n cmds = []\n cmds.append('sudo mkfs.ext4 -F {}'.format(device))\n cmds.append('sudo mkdir -p {}'.format(data_dir))\n cmds.append('sudo mount {} {}'.format(device, data_dir))\n cmds.append('sudo chmod a+w {}'.format(data_dir))\n\n utils.run_commands(cmds)\n logging.info('Created and mounted device {} at {}'.format(device, data_dir))", "def create_testbed_device_instance(self, dev_name_info, hint):\n testbed_dev = None\n if hint == \"AP\":\n testbed_dev = TestBedAP(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"AP\"\n if hint == \"STA\":\n testbed_dev = TestBedSTA(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"STA\"\n if hint == \"DUT\":\n testbed_dev = DUT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"DUT\"\n if hint == \"SNIFFER\":\n testbed_dev = Sniffer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"SNIFFER\"\n if hint == \"PCENDPOINT\":\n testbed_dev = PCEndpoint(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"PCENDPOINT\"\n if hint == \"APCONFIG\":\n testbed_dev = APConfig(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"APCONFIG\"\n if hint == \"RADIUSSERVER\":\n testbed_dev = RadiusServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"RADIUSSERVER\"\n if hint == \"OSUSERVER\":\n testbed_dev = OSUServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"OSUSERVER\"\n if hint == \"ATTENUATOR\":\n testbed_dev = Attenuator(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"ATTENUATOR\"\n if hint == \"POWERSWITCH\":\n testbed_dev = PowerSwitch(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"POWERSWITCH\"\n if hint == \"WFAEMT\":\n testbed_dev = WFAEMT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"WFAEMT\"\n return testbed_dev", "def get_create_instance_with_block_device_mapping_param(name, adminPass, systen_volume_id, os_type, \\\n data_volume_id, imageRef, flavorRef, uuid, port=None, security_group=None, \\\n user_data=None, availability_zone=None, fixed_ip=None, metadata={}, personality=[], \\\n delete_on_termination=True, version=openapi_version.V2):\n from commons import utils\n try:\n # 1. build use_data, which will contain password \n if os_type == 0:\n # windows\n port = port if port is not None and port.isdigit() else \"3389\"\n passwd = \\\n \"\"\"rem cmd\\r\\nnet user administrator %(adminPass)s\\r\\nREG ADD HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\Control\\\\Terminal\\\" \\\"Server\\\\Wds\\\\rdpwd\\\\Tds\\\\tcp /v PortNumber /t REG_DWORD /d %(PortNumber)s /f \\r\\nREG ADD HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\Control\\\\Terminal\\\" \\\"Server\\\\WinStations\\\\RDP-Tcp /v PortNumber /t REG_DWORD /d %(PortNumber)s /f \\n\"\"\" % {\"adminPass\": adminPass,\"PortNumber\":port}\n user_data_pass = utils.base64Encode(s=passwd)\n elif os_type == 1:\n # ubuntu/centos \n passwd = \\\n \"\"\"#!/bin/bash\\n#modified vm's passwd\\npasswd root <<EOF\\n%(adminPass)s\\n%(readminPass)s\\nEOF\"\"\" % {\"adminPass\": adminPass, \"readminPass\": adminPass}\n user_data_pass = utils.base64Encode(s=passwd)\n # 2. built return paramter \n if version == openapi_version.V3:\n pass\n else:\n if os_type == 1:\n # linux \n body = {\n \"server\" : {\n \"name\" : name,\n \"imageRef\" : imageRef,\n \"flavorRef\" : flavorRef,\n \"metadata\" : {},\n \"personality\" : personality,\n \"networks\" : [\n {\n \"uuid\" : uuid\n }\n ],\n \"block_device_mapping_v2\": []\n }\n }\n if os_type == 0:\n # windows\n body = {\n \"server\" : {\n \"name\" : name,\n \"imageRef\" : imageRef,\n \"flavorRef\" : flavorRef,\n \"metadata\" : {\"admin_pass\":adminPass},\n \"personality\" : personality,\n \"networks\" : [\n {\n \"uuid\" : uuid\n }\n ],\n \"block_device_mapping_v2\": []\n }\n }\n #if port:\n # body[\"server\"][\"networks\"][0][\"port\"] = port\n if systen_volume_id:\n # exist system volume\n body[\"server\"][\"block_device_mapping_v2\"].append({\n \"device_name\": \"/dev/vda\",\n \"source_type\": \"volume\",\n \"destination_type\": \"volume\",\n \"delete_on_termination\": delete_on_termination,\n \"guest_format\": None,\n \"uuid\": systen_volume_id,\n \"boot_index\": \"0\"\n })\n if data_volume_id:\n body[\"server\"][\"block_device_mapping_v2\"].append({\n \"device_name\": \"/dev/sda\",\n \"source_type\": \"volume\",\n \"destination_type\": \"volume\",\n \"delete_on_termination\": delete_on_termination,\n \"guest_format\": None,\n \"uuid\": data_volume_id,\n \"boot_index\": \"1\"\n })\n if security_group:\n body[\"server\"][\"security_group\"] = security_group\n if (user_data_pass or adminPass):\n body[\"server\"][\"user_data\"] = user_data_pass\n body[\"server\"][\"config_drive\"] = \"true\"\n if availability_zone:\n body[\"server\"][\"availability_zone\"] = availability_zone\n if fixed_ip: \n body[\"server\"][\"fixed_ip\"] = fixed_ip\n if delete_on_termination:\n body[\"server\"][\"delete_on_termination\"] = delete_on_termination\n return body\n except Exception, e:\n raise e", "def _make_ble_connection(self):\n if self.device == None:\n adapter = pygatt.backends.GATTToolBackend()\n nuki_ble_connection_ready = False\n\n while nuki_ble_connection_ready == False:\n print(\"Starting BLE adapter...\")\n adapter.start()\n print(\"Init Nuki BLE connection...\")\n try :\n self.device = adapter.connect(self.mac_address)\n nuki_ble_connection_ready = True\n except:\n print(\"Unable to connect, retrying...\")\n\n print(\"Nuki BLE connection established\")", "def defineNetwork(networkName, conn_libvirt, conn_ssh=None, primary=True):\n # create a persistent virtual network\n\n #create the bridge using brctl command\n cmd_1 = \"sudo brctl addbr {}\".format(networkName)\n cmd_2 = \"sudo ip link set {} up\".format(networkName)\n cmd_list = [cmd_1, cmd_2]\n if primary == True:\n print('local:')\n for cmd in cmd_list:\n os.system(cmd)\n else:\n ssh_remote(conn_ssh, cmd_list)\n\n JINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n template_values = {\n 'networkName': networkName,\n 'bridgeName': networkName,\n }\n template = JINJA_ENVIRONMENT.get_template(\"bridge.xml\")\n finalXML = template.render(template_values)\n filename = '/tmp/%s' %networkName\n with open(filename, 'w') as f:\n f.write(finalXML)\n f.close()\n\n f = open(filename)\n xmlconfig = f.read()\n #if primary==True:\n network = conn_libvirt.networkDefineXML(xmlconfig)\n \n if network == None:\n print('Failed to create a virtual network', file=sys.stderr)\n return\n network.setAutostart(True)\n network.create()\n print('The new persistent virtual network is active')", "def make_device_data(device_id, params_and_values):\n params = [param_tuple[0] for param_tuple in params_and_values]\n params_bitmask = encode_params(device_id, params)\n values = [param_tuple[1] for param_tuple in params_and_values]\n\n typeString = '<H' + format_string(device_id, params)\n\t\n temp_payload = struct.pack(typeString, params_bitmask, *values)\n payload = bytearray(temp_payload)\n\n message = HibikeMessage(messageTypes[\"DeviceData\"], payload)\n return message", "def create_network_function_device_config(self, context, request_data):\n\n try:\n nfp_context = module_context.init()\n log_info = request_data.get('info')\n logging_context = log_info['context'].get('logging_context', {})\n nfp_context['log_context'] = logging_context\n LOG.info(\"Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG \"\n \"for %(service_type)s, NFI: %(nfi)s, \"\n \"NF_ID: %(nf_id)s\",\n {'service_type': request_data['info']['service_type'],\n 'nfi': request_data['info']['context']['nfi_id'],\n 'nf_id': request_data['info']['context']['nf_id']})\n\n self._invoke_service_agent('create', request_data, True)\n except Exception as err:\n msg = (\"Failed to create network device configuration. %s\" %\n str(err).capitalize())\n LOG.error(msg)", "def send_create(assoc, class_uid, attr_list=None):\n return assoc.send_n_create(attr_list, class_uid, \"1.2.3.4\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start all the ports which the testpmd can see.
def start_all_ports(self): self.start_port("all")
[ "def start(self):\n self._logger.info(\"Starting TestPMD...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n self._testpmd.send('port start all', 1)\n\n for port in range(self._nports):\n self._testpmd.send('csum set {} {} {}'.format(\n self._csum_layer, self._csum_calc, port), 1)\n self._testpmd.send('csum parse_tunnel {} {}'.format(\n self._csum_tunnel, port), 1)\n\n self._testpmd.send('start', 1)", "def start_for_guest(self):\n self._logger.info(\"Starting TestPMD for one guest...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n # conflicting info if scatter needs to be enabled or not\n self._testpmd.send('port config all scatter on', 1)\n self._testpmd.send('port start all', 1)\n self._testpmd.wait(timeout=60) # port startup can take a few seconds\n\n self._testpmd.send('set portlist 0,2,1,3', 1)\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n self._testpmd.send('start', 1)", "def start_port(self, port):\n self.__send_expect(\"port start %s\" % str(port), \"testpmd> \")\n time.sleep(3)", "def enable_ports(self):\n pass", "def prepare(self):\n self.logger.info('Prepare ports for test')\n for _, ser in self.serials.items():\n ser.prepare()\n self.logger.info('Ports are ready to test')", "def run_scan(self):\n open_ports = []\n closed_ports = []\n for port in range(1, 3000):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock = ssl.wrap_socket(s)\n sock.connect((target, port))\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n ports.append(port)\n except Exception, e:\n closed_ports.append(port)\n return open_ports, closed_ports", "def test_ports_scanning():\n host = ipaddress.ip_address(u\"92.222.10.88\")\n scanner = Scanner(host, mock=True)\n\n assert scanner.is_local() is False\n assert scanner.run_ping_test() is True\n\n scanner.perform_scan()\n ports = scanner.extract_ports(\"tcp\")\n\n assert len(ports) >= 3\n for port in ports:\n assert port.__class__ == PortReport\n\n expected_ports = [22, 80, 443]\n port_numbers = [port.port_number for port in ports]\n for expected_port in expected_ports:\n assert expected_port in port_numbers", "def start(self):\n for shell in self._shells.values():\n shell.connect()", "def allPortsScanner(self, ip_addr):\r\n for port in range(1, 65536):\r\n print(f\"{info} {port} scanning: \", end=\"\")\r\n self.portScanner(ip_addr, port)\r\n print()", "def _set_default_ports(self):\n\n self.def_tcp_ports = (21, 22, 23, 25, 53, 80, 110, 113, 139, 143, 443, 445,\n 993, 995, 3306, 5432, 8000, 8080)\n self.def_udp_port = (53, 68, 69, 123, 161, 500, 514, 1194)\n\n return", "def _StartPortForwarderServices(self, services_dir, addr, waterfall_addr):\n\n log_name = 'waterfall_port_forwarder.log.txt'\n log_path = os.path.join(services_dir, log_name)\n test_output_dir = os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR')\n if test_output_dir:\n log_path = os.path.join(test_output_dir, log_name)\n\n args = [\n self._ports_bin,\n '-addr',\n addr,\n '-waterfall_addr',\n waterfall_addr,\n ]\n\n return subprocess.Popen(\n args,\n stdin=open('/dev/null'), # cannot use the _DEV_NULL var,\n # b/c we close all fds across forks.\n stderr=subprocess.STDOUT,\n stdout=open(log_path, 'a+b'),\n cwd=self._sockets_dir,\n close_fds=True)", "def port_testing(self):\n\n try:\n try:\n remoteServerIP = socket.gethostbyname(self.hostname)\n except socket.gaierror:\n remoteServerIP = socket.gethostbyname(self.url.split(\"/\")[0].split(\":\")[0])\n\n for port in PORTS_TO_SCAN:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(0.3)\n result = sock.connect_ex((remoteServerIP, port[0]))\n sock.close()\n\n if result == 0 and port[1] is False:\n self.portWeight = 1\n return\n elif result != 0 and port[1] is True:\n self.portWeight = 1\n return\n self.portWeight = 0\n return\n\n except Exception as e:\n logger.error(e)\n return -2", "def declare_services(self):\n # sort them in their order of preferred ports\n # Must be executed sequentially to allow for port reservations.\n for process in sorted(self.list_all_processes(),\n key=lambda proc: proc.preferred_ports\n if proc.preferred_ports else [65536]):\n ip = process.ip_addr\n for exposed_service_name in process.exposed_services:\n port = process.get_port(process.exposed_services[exposed_service_name])\n self.exposed_services[exposed_service_name] = (ip, port)\n\n for binded_service_name in process.binded_services:\n port = process.get_port(process.binded_services[binded_service_name])\n self.binded_services[binded_service_name] = (ip, port)\n self.validate_connect()", "def start_all_peers(self):\n for t in self.peers.keys():\n for p in self.peers[t]:\n p.start_all_runners()", "def start_services(self):", "def _setup_ports(self, ports=None):\n for node in self.nodes:\n node.ports.clear()\n\n if not ports:\n ports = [\n ('all', (0, 3)),\n ('grain', (1, 5)),\n ('ore', (10, 15)),\n ('all', (26, 32)),\n ('wool', (42, 46)),\n ('all', (49, 52)),\n ('all', (47, 51)),\n ('brick', (33, 38)),\n ('lumber', (11, 16))\n ]\n\n for i, (resource, nodes) in enumerate(ports):\n self.ports[i].resource = resource\n self.ports[i].nodes = nodes\n\n for node in nodes:\n self.nodes[node].ports.append(self.ports[i])", "def activate_all_ports(self, packets=10):\n for valve in self.valves_manager.valves.values():\n valve.dp.dyn_running = True\n for port in valve.dp.ports.values():\n port.dyn_phys_up = True\n for port in valve.dp.stack_ports():\n self.up_stack_port(port, dp_id=valve.dp.dp_id)\n self._update_port_map(port, True)\n self.trigger_all_ports(packets=packets)", "def set_ports(r):\n ipc_port = str(r.netsim.config.IPC_PORT)\n netconf_ssh_port = str(r.netsim.config.NETCONF_SSH_PORT)\n netconf_tcp_port = str(r.netsim.config.NETCONF_SSH_PORT)\n snmp_port = str(r.netsim.config.SNMP_PORT)\n cli_ssh_port = str(r.netsim.config.CLI_SSH_PORT)\n\n os.environ[\"IPC_PORT\"] = ipc_port\n os.environ[\"NETCONF_SSH_PORT\"] = netconf_ssh_port\n os.environ[\"NETCONF_TCP_PORT\"] = netconf_tcp_port\n os.environ[\"SNMP_PORT\"] = snmp_port\n os.environ[\"CLI_SSH_PORT\"] = cli_ssh_port\n\n netsim_dir = r.netsim.config.netsim_dir\n os.environ[\"NETSIM_DIR\"] = netsim_dir", "def get_ports(ip_address):\n ip_address = sys.argv[1]\n\n for ports in range(1, 65535):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n if sock.connect_ex((ip_address, ports)) == 0:\n print(f\"Port {ports} is open!\")\n sock.close()\n else:\n print(f\"Port {ports} is closed!\")\n sock.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start a port which the testpmd can see.
def start_port(self, port): self.__send_expect("port start %s" % str(port), "testpmd> ") time.sleep(3)
[ "def start_server(port: int):\n run(port)", "def open_port(self, port, protocol=\"TCP\"):\n cmd = ['open-port']\n cmd.append('{}/{}'.format(port, protocol))\n self._environment.command_runner(cmd)", "def start_for_guest(self):\n self._logger.info(\"Starting TestPMD for one guest...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n # conflicting info if scatter needs to be enabled or not\n self._testpmd.send('port config all scatter on', 1)\n self._testpmd.send('port start all', 1)\n self._testpmd.wait(timeout=60) # port startup can take a few seconds\n\n self._testpmd.send('set portlist 0,2,1,3', 1)\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n self._testpmd.send('start', 1)", "def start(self):\n self._logger.info(\"Starting TestPMD...\")\n dpdk.init()\n self._testpmd.start()\n self._logger.info(\"TestPMD...Started.\")\n\n self._testpmd.send('set fwd {}'.format(self._fwdmode), 1)\n\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n self._testpmd.send('port stop all', 1) # ports must be stopped to set mtu\n self._testpmd.send('port config all max-pkt-len {}'.format(\n settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE')), 1)\n self._testpmd.send('port start all', 1)\n\n for port in range(self._nports):\n self._testpmd.send('csum set {} {} {}'.format(\n self._csum_layer, self._csum_calc, port), 1)\n self._testpmd.send('csum parse_tunnel {} {}'.format(\n self._csum_tunnel, port), 1)\n\n self._testpmd.send('start', 1)", "def start_all_ports(self):\n self.start_port(\"all\")", "def test_create_port(self):\n port = create_ofport({'device': 'a'})\n port_dict = {'some-port-attributes-go-here': 42,\n 'firewall_group': 1}\n self.map.create_port(port, port_dict)\n self._check_port('a', 1)\n self._check_fwg(1, ['a'])", "def test_bad_port():\n pytest.xfail(\"Bad port.\")\n connect_to_dremio_flight_server_endpoint(\"localhost\",\n \"12345\", \"dremio\", \"dremio123\", False, False, False)", "def _make_port(self, port):\n return Port(port)", "def runserver(args):\n TestServer().run(args.port)", "def start(self):\n self.logger.info(\"Starting NLaunch Console \" +\n \"(port='{port}', pwd_path='{pwd_path}')\".format(\n port=self.port, pwd_path=self.pwd_path))\n reactor.listenTCP(self.port, NLaunchFactory(self.pwd_path))\n reactor.run()", "def enable_ports(self):\n pass", "def open_firewall_port(ceph_node, port, protocol):\n ceph_node.open_firewall_port(port, protocol)", "def test_port():\n parser = create_parser()\n parsed_arguments = parser.parse_args([\"--port\", \"11111\"])\n assert parsed_arguments.port == 11111, \"Wrong port\"", "def test_create_forwarded_port(self):\n mock_adb = mock.Mock()\n mock_adb.get_version_number = lambda: 37\n mock_adb.tcp_forward = lambda hinted_port, device_port: hinted_port\n mock_session = mock.Mock()\n mock_session.adb = mock_adb\n mock_session.log = mock.Mock()\n\n self.assertEqual(8080,\n Sl4aSession._create_forwarded_port(\n mock_session, 9999, 8080))", "def start_Rserve(port):\n # First check that 'R' is in PATH:\n if not shutil.which('R'):\n pytest.exit(\"Cannot start R interpreter, R executable not in PATH\", returncode=1)\n\n rProc = subprocess.Popen(\n ['R', 'CMD', 'Rserve', '--no-save', '--RS-conf',\n os.path.join(HERE_PATH, 'rserve-test.conf'),\n '--RS-port', str(port)],\n stdout=open('/dev/null'), stderr=subprocess.PIPE)\n # wait a moment until Rserve starts listening on EXTRA_RPORT\n time.sleep(0.6)\n if rProc.poll():\n # process has already terminated, so provide its stderr to the user:\n raise RuntimeError('Rserve has terminated prematurely with the '\n 'following message: %s' % rProc.stderr.read())\n\n # store original socket timeout and set timeout to new value during startup\n # of Rserve:\n defaultTimeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(1)\n\n rserv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n cnt = 0\n # give it a maximum of 10 tries with some sleep in between to wait for\n # Rserve to come into action!\n while cnt < 10:\n try:\n # open a socket connection to Rserve\n rserv.connect(('', port))\n except socket.error:\n time.sleep(0.3)\n cnt += 1\n else:\n # got a connection! Jump out of the loop\n break\n else:\n # after trying 10 times we still got no connection to Rserv - something\n # must be wrong.\n raise RuntimeError('Could not connect to Rserv over the network')\n\n # set back original default timeout value:\n socket.setdefaulttimeout(defaultTimeout)\n\n # make a simple test that Rserve really answers correctly by looking at the\n # first few bytes:\n hdr = rserv.recv(1024)\n rserv.close()\n if not hdr.startswith(b'Rsrv01'):\n rProc.terminate()\n raise RuntimeError(\n 'received wrong header information from socket (was: \"%s\")'\n % str(hdr[:10])\n )\n return rProc", "def start_httpd(addr): # pragma: no cover\n host, port = addr.split(':')\n logging.info('Starting HTTPD on {}:{}'.format(host, port))\n prometheus_client.start_http_server(int(port), host)", "def _StartPortForwarderServices(self, services_dir, addr, waterfall_addr):\n\n log_name = 'waterfall_port_forwarder.log.txt'\n log_path = os.path.join(services_dir, log_name)\n test_output_dir = os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR')\n if test_output_dir:\n log_path = os.path.join(test_output_dir, log_name)\n\n args = [\n self._ports_bin,\n '-addr',\n addr,\n '-waterfall_addr',\n waterfall_addr,\n ]\n\n return subprocess.Popen(\n args,\n stdin=open('/dev/null'), # cannot use the _DEV_NULL var,\n # b/c we close all fds across forks.\n stderr=subprocess.STDOUT,\n stdout=open(log_path, 'a+b'),\n cwd=self._sockets_dir,\n close_fds=True)", "def test_default_port():\n parser = create_parser()\n parsed_arguments = parser.parse_args([])\n assert parsed_arguments.port == 50000, \"Wrong port\"", "def set_port(self, port_name):\r\n global port\r\n port = port_name\r\n print(\"port set to: \" + port)\r\n self.port_menu()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the ports into the bonding device as slaves.
def add_slave_to_bonding_device(self, bond_port, invert_verify=False, *slave_port): if len(slave_port) <= 0: utils.RED("No port exist when add slave to bonded device") for slave_id in slave_port: self.__send_expect("add bonding slave %d %d" % (slave_id, bond_port), "testpmd> ") slaves = self.get_info_from_bond_config("Slaves \(\d\): \[", "\d*( \d*)*", bond_port) if not invert_verify: self.verify(str(slave_id) in slaves, "Add port as bonding slave failed") else: self.verify(str(slave_id) not in slaves, "Add port as bonding slave successfully,should fail")
[ "def add_slaves(no_of_slaves=''):\n _, master_ip = get_master_dns_ip()\n if master_ip and no_of_slaves:\n # Test and see if we can find existing slaves\n create_slaves(int(no_of_slaves))\n host_list = [slave.public_dns_name for slave in SLAVE_INSTANCES.itervalues()]\n execute(run_slave_tasks, hosts=host_list)\n else:\n print 'Setup a Master first'", "def add(self, slave):\n\n\t\tself.slaves[slave.pid] = slave", "def deploy_slaves():\n # Time for our slaves\n _, master_ip = get_master_dns_ip()\n if master_ip:\n # Test and see if we can find existing slaves\n slave_list = get_slave_dns_list()\n if NO_OF_SLAVES - len(slave_list) > 0:\n print 'Found {0} existing slaves creating {1} new slaves'.format(len(slave_list),\n NO_OF_SLAVES - len(slave_list))\n create_slaves(NO_OF_SLAVES - len(slave_list))\n host_list = [slave.public_dns_name for slave in SLAVE_INSTANCES.itervalues()] + slave_list\n else:\n print 'No more slaves needed'\n host_list = slave_list\n\n execute(run_slave_tasks, hosts=host_list)\n else:\n print 'Setup a Master first'", "def _setup_ports(self, ports=None):\n for node in self.nodes:\n node.ports.clear()\n\n if not ports:\n ports = [\n ('all', (0, 3)),\n ('grain', (1, 5)),\n ('ore', (10, 15)),\n ('all', (26, 32)),\n ('wool', (42, 46)),\n ('all', (49, 52)),\n ('all', (47, 51)),\n ('brick', (33, 38)),\n ('lumber', (11, 16))\n ]\n\n for i, (resource, nodes) in enumerate(ports):\n self.ports[i].resource = resource\n self.ports[i].nodes = nodes\n\n for node in nodes:\n self.nodes[node].ports.append(self.ports[i])", "def create_ports(self, data):\n return self._bulk_create(_port.Port, data)", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def ports(self):\n return chain(self.component.analog_ports, self.component.event_ports)", "def deploy_node_multiple_interfaces(self):\n self.env.revert_snapshot(\"ready_with_3_slaves\")\n\n interfaces_dict = {\n 'eth1': ['public'],\n 'eth2': ['storage'],\n 'eth3': ['fixed'],\n 'eth4': ['management'],\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['compute'],\n 'slave-03': ['cinder']\n }\n )\n nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)\n for node in nailgun_nodes:\n self.fuel_web.update_node_networks(node['id'], interfaces_dict)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n for node in ['slave-01', 'slave-02', 'slave-03']:\n self.env.verify_network_configuration(node)\n\n self.fuel_web.verify_network(cluster_id)\n\n self.env.make_snapshot(\"deploy_node_multiple_interfaces\")", "def distribute_subdir_slaves(master, builders, hostnames, slaves):\n # Assuming lists are used to ensure determinism.\n assert type(builders) == list\n assert type(hostnames) == list\n\n # Assuming there are more or equal builders than hostnames.\n assert len(builders) >= len(hostnames)\n\n subdir_index = 0\n hostname_index = 0\n for builder in builders:\n if hostname_index >= len(hostnames):\n # All hostnames were used, rotate and advance the subdir index.\n hostname_index = 0\n subdir_index += 1\n slaves.append({\n 'master': master,\n 'builder': builder,\n 'hostname': hostnames[hostname_index],\n 'os': 'linux',\n 'version': 'trusty',\n 'bits': '64',\n 'subdir': str(subdir_index),\n })\n hostname_index += 1", "def remove_all_slaves(self, bond_port):\n all_slaves = self.get_bond_slaves(bond_port)\n all_slaves = all_slaves.split()\n if len(all_slaves) == 0:\n pass\n else:\n self.remove_slave_from_bonding_device(bond_port, False, *all_slaves)", "def create_and_attach_ports(self):\n self.validate()\n\n for nic_type, nic in self._validated:\n if nic_type != 'port':\n # The 'binding:host_id' must be set to ensure IP allocation\n # is not deferred.\n # See: https://storyboard.openstack.org/#!/story/2009715\n port = self._connection.network.create_port(\n binding_host_id=self._node.id, **nic)\n self.created_ports.append(port.id)\n LOG.info('Created port %(port)s for node %(node)s with '\n '%(nic)s', {'port': _utils.log_res(port),\n 'node': _utils.log_res(self._node),\n 'nic': nic})\n else:\n # The 'binding:host_id' must be set to ensure IP allocation\n # is not deferred.\n # See: https://storyboard.openstack.org/#!/story/2009715\n self._connection.network.update_port(\n nic, binding_host_id=self._node.id)\n port = nic\n\n self._connection.baremetal.attach_vif_to_node(self._node,\n port.id)\n LOG.info('Attached port %(port)s to node %(node)s',\n {'port': _utils.log_res(port),\n 'node': _utils.log_res(self._node)})\n self.attached_ports.append(port.id)", "def add_listening_ports(self, protocol, ports):\n if protocol.upper() == \"UDP\":\n for p in ports:\n if p not in self.listening_udp_ports:\n self.listening_udp_ports += ports\n elif protocol.upper() == \"TCP\":\n for p in ports:\n if p not in self.listening_tcp_ports:\n self.listening_tcp_ports += ports\n else:\n print((\"Invalid Protocol: \" + protocol))", "def update_ports(self):\n\n if self.to_i != None:\n self.from_e.ports[self.from_i - 1].networks = self.to_e.ports[self.to_i - 1].networks\n else:\n self.from_e.ports[self.from_i - 1].networks = [self.to_e]", "def get_slaves(self, name):\n slaves = self.execute('SENTINEL', 'slaves', name)\n result = []\n for slave in slaves:\n result.append(dict_from_list(slave))\n return result", "def get_slave_list (bus=\"wisbone\"):\n\tslave_list = []\n\treturn slave_list", "def start_all_ports(self):\n self.start_port(\"all\")", "def enable_ports(self):\n pass", "def configure_ovs():\n if config('plugin') in [OVS, OVS_ODL]:\n if not service_running('openvswitch-switch'):\n full_restart()\n # Get existing set of bridges and ports\n current_bridges_and_ports = get_bridges_and_ports_map()\n log(\"configure OVS: Current bridges and ports map: {}\"\n .format(\", \".join(\"{}: {}\".format(b, \",\".join(v))\n for b, v in current_bridges_and_ports.items())))\n\n add_bridge(INT_BRIDGE, brdata=generate_external_ids())\n add_bridge(EXT_BRIDGE, brdata=generate_external_ids())\n\n ext_port_ctx = ExternalPortContext()()\n portmaps = DataPortContext()()\n bridgemaps = parse_bridge_mappings(config('bridge-mappings'))\n\n # if we have portmaps, then we ignore its value and log an\n # error/warning to the unit's log.\n if config('data-port') and config('ext-port'):\n log(\"Both ext-port and data-port are set. ext-port is deprecated\"\n \" and is not used when data-port is set\", level=ERROR)\n\n # only use ext-port if data-port is not set\n if not portmaps and ext_port_ctx and ext_port_ctx['ext_port']:\n _port = ext_port_ctx['ext_port']\n add_bridge_port(EXT_BRIDGE, _port,\n ifdata=generate_external_ids(EXT_BRIDGE),\n portdata=generate_external_ids(EXT_BRIDGE))\n log(\"DEPRECATION: using ext-port to set the port {} on the \"\n \"EXT_BRIDGE ({}) is deprecated. Please use data-port instead.\"\n .format(_port, EXT_BRIDGE),\n level=WARNING)\n\n for br in bridgemaps.values():\n add_bridge(br, brdata=generate_external_ids())\n if not portmaps:\n continue\n\n for port, _br in portmaps.items():\n if _br == br:\n if not is_linuxbridge_interface(port):\n add_bridge_port(br, port, promisc=True,\n ifdata=generate_external_ids(br),\n portdata=generate_external_ids(br))\n else:\n # NOTE(lourot): this will raise on focal+ and/or if the\n # system has no `ifup`. See lp:1877594\n add_ovsbridge_linuxbridge(\n br, port, ifdata=generate_external_ids(br),\n portdata=generate_external_ids(br))\n\n target = config('ipfix-target')\n bridges = [INT_BRIDGE, EXT_BRIDGE]\n bridges.extend(bridgemaps.values())\n\n if target:\n for bridge in bridges:\n disable_ipfix(bridge)\n enable_ipfix(bridge, target)\n else:\n # NOTE: removing ipfix setting from a bridge is idempotent and\n # will pass regardless of the existence of the setting\n for bridge in bridges:\n disable_ipfix(bridge)\n\n new_bridges_and_ports = get_bridges_and_ports_map()\n log(\"configure OVS: Final bridges and ports map: {}\"\n .format(\", \".join(\"{}: {}\".format(b, \",\".join(v))\n for b, v in new_bridges_and_ports.items())),\n level=DEBUG)\n\n # Ensure this runs so that mtu is applied to data-port interfaces if\n # provided.\n service_restart('os-charm-phy-nic-mtu')", "def run_on_all_slaves_on_all_hosts(cmd):\n return run_on_all_slave_hosts(_get_remote_slaves_cmd(cmd))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the specified slave port from the bonding device.
def remove_slave_from_bonding_device(self, bond_port, invert_verify=False, *slave_port): if len(slave_port) <= 0: utils.RED("No port exist when remove slave from bonded device") for slave_id in slave_port: self.dut.send_expect("remove bonding slave %d %d" % (int(slave_id), bond_port), "testpmd> ") out = self.get_info_from_bond_config("Slaves: \[", "\d*( \d*)*", bond_port) if not invert_verify: self.verify(str(slave_id) not in out, "Remove slave to fail from bonding device") else: self.verify(str(slave_id) in out, "Remove slave successfully from bonding device,should be failed")
[ "def delete_port(port):\n return IMPL.delete_port(port)", "def multiroom_remove(self, slave_ip: str) -> str:\n self._logger.info(\"Removing slave '\"+str(slave_ip)+\"' from multiroom group\")\n return self._send(\"multiroom:SlaveKickout:\"+str(slave_ip)).content.decode(\"utf-8\")", "def delete_port(self, port_name):\n command = ovs_vsctl.VSCtlCommand(\n 'del-port', (self.br_name, port_name), '--if-exists')\n self.run_command([command])", "def remove_all_slaves(self, bond_port):\n all_slaves = self.get_bond_slaves(bond_port)\n all_slaves = all_slaves.split()\n if len(all_slaves) == 0:\n pass\n else:\n self.remove_slave_from_bonding_device(bond_port, False, *all_slaves)", "def remove_slave_group_id(self, slave_group_id, persister=None):\n persister.exec_stmt(Group.DELETE_MASTER_SLAVE_GROUP_MAPPING,\n {\"params\": (slave_group_id, )})", "def delete_port(self, port_name):\n\n try:\n port_num = self.get_port_number(port_name)\n\n mask = np.arange(len(self.ports)) != port_num\n s = self.s[mask]\n self.s = s[:,mask]\n self.z0 = self.z0[mask]\n\n ports = list(self.ports)\n ports.remove(port_name)\n self.ports = tuple(ports)\n\n except:\n print(\"The \\\"{}\\\" port does not exist.\".format(port_name))", "def test_delete_logical_router_port(self):\n lrport = self._mocked_lrport()\n\n uuid = test_constants_v3.FAKE_ROUTER_PORT['id']\n lrport.delete(uuid)\n test_client.assert_json_call(\n 'delete', lrport,\n 'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid)", "def remove_out_port(self, id):\n\n del self.out_ports[id]", "def remove_in_port(self, id):\n\n del self.in_ports[id]", "def del_port(self, user, port):\n try:\n self.c.execute(sql['del_port'], (user, port))\n self.c.execute(sql['del_stocks'], (user, port))\n self.db.commit()\n except sqlite3.Error as e:\n self.db.rollback()\n flash(\"Can't delete port because \"+str(e))", "def free_port(self, port):\n \n self.logging.debug(\"Freeing port %d\" %(port))\n try:\n os.remove(self.get_file_name(port))\n except OSError:\n pass", "def test_Bridge_orport_del(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.assertEqual(self.bridge.orPort, 36489)\n\n del(self.bridge.orPort)\n self.assertIsNone(self.bridge.orPort)\n self.assertIsNone(self.bridge._orPort)", "def del_dynamic_port(self, addon_slug: str) -> None:\n if addon_slug not in self.ports:\n return\n\n del self.ports[addon_slug]\n self.save_data()", "def disconnect(self, port=CONFIG.SWITCH.ACRONAME_PORT, verbose=True, *args, **kwargs):\n if verbose:\n self.logger.info('Disconnecting USB{} port...'.format(port))\n out = self.switch.setPortDisable(port)\n if verbose:\n self.logger.done()\n self.switchlogger.info('%s disable port [%d]: %s' % (ACRONAME_TAG, port, out))", "def handle_link_down(self, port):\n self._routing_table.remove_down_ports(port)\n self._ports.remove(port)\n self._send_routes()", "def remove_connector(self, reservation_id: ReservationId, port1: str, port2: str):\n logger.info(f\"Removing connector between {port1} and {port2}\")\n self._api.DisconnectRoutesInReservation(reservation_id, [port1, port2])\n self._api.RemoveConnectorsFromReservation(reservation_id, [port1, port2])", "def add_slave_to_bonding_device(self, bond_port, invert_verify=False, *slave_port):\n if len(slave_port) <= 0:\n utils.RED(\"No port exist when add slave to bonded device\")\n for slave_id in slave_port:\n self.__send_expect(\"add bonding slave %d %d\" % (slave_id, bond_port), \"testpmd> \")\n\n slaves = self.get_info_from_bond_config(\"Slaves \\(\\d\\): \\[\", \"\\d*( \\d*)*\", bond_port)\n if not invert_verify:\n self.verify(str(slave_id) in slaves,\n \"Add port as bonding slave failed\")\n else:\n self.verify(str(slave_id) not in slaves,\n \"Add port as bonding slave successfully,should fail\")", "def removeStaticPort(self, port):\n if port.dynamic():\n raise RuntimeError(\"The given port should be static but is dynamic.\")\n self._environment.removePort(port)", "def detach_port(self, instance_obj, network_obj):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove all slaves of specified bound device.
def remove_all_slaves(self, bond_port): all_slaves = self.get_bond_slaves(bond_port) all_slaves = all_slaves.split() if len(all_slaves) == 0: pass else: self.remove_slave_from_bonding_device(bond_port, False, *all_slaves)
[ "def remove_slave_group_ids(self, persister=None):\n persister.exec_stmt(Group.DELETE_SLAVE_GROUPS,\n {\"params\": (self.__group_id, )})", "def delete(self):\n for port in self.ports:\n port.delete()\n self.ports = []\n self.subnet.close()", "def multiroom_remove(self, slave_ip: str) -> str:\n self._logger.info(\"Removing slave '\"+str(slave_ip)+\"' from multiroom group\")\n return self._send(\"multiroom:SlaveKickout:\"+str(slave_ip)).content.decode(\"utf-8\")", "def remove_from_group(self):\n self.simulator.devices['bulbs'].remove(self)", "def unregister(self):\n dobroad = self._broadcasting and self.data is not None and \\\n self.data.hub is not None\n self.do_broadcast(False)\n if dobroad:\n msg = SubsetDeleteMessage(self)\n self.data.hub.broadcast(msg)", "def remove_slave_from_bonding_device(self, bond_port, invert_verify=False, *slave_port):\n if len(slave_port) <= 0:\n utils.RED(\"No port exist when remove slave from bonded device\")\n for slave_id in slave_port:\n self.dut.send_expect(\"remove bonding slave %d %d\" % (int(slave_id), bond_port), \"testpmd> \")\n out = self.get_info_from_bond_config(\"Slaves: \\[\", \"\\d*( \\d*)*\", bond_port)\n if not invert_verify:\n self.verify(str(slave_id) not in out,\n \"Remove slave to fail from bonding device\")\n else:\n self.verify(str(slave_id) in out,\n \"Remove slave successfully from bonding device,should be failed\")", "def delete(self):\n\t\t[ n.delete() for n in self.nodes ]", "def remove_all_targets(self):\n cur = self.conn.cursor()\n cur.execute(\"DELETE FROM targets\")\n cur.close()", "def shutdown_lvm(device):\n device = block.sys_block_path(device)\n # lvm devices have a dm directory that containes a file 'name' containing\n # '{volume group}-{logical volume}'. The volume can be freed using lvremove\n name_file = os.path.join(device, 'dm', 'name')\n (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file))\n # use two --force flags here in case the volume group that this lv is\n # attached two has been damaged\n LOG.debug('running lvremove on %s/%s', vg_name, lv_name)\n util.subp(['lvremove', '--force', '--force',\n '{}/{}'.format(vg_name, lv_name)], rcs=[0, 5])\n # if that was the last lvol in the volgroup, get rid of volgroup\n if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:\n util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])\n # refresh lvmetad\n lvm.lvm_scan()", "def unregister_all_servers(self):\n for s in self.servers.services:\n if s.node_uuid == self.node_uuid:\n self.unregister_server(s)", "def do_host_device_label_remove(cc, args):\n host = ihost_utils._find_ihost(cc, args.hostnameorid)\n device = pci_device.find_device(cc, host, args.nameorpciaddr)\n for i in args.attributes[0]:\n device_labels = cc.device_label.list()\n found = False\n for lbl in device_labels:\n if (lbl.pcidevice_uuid == device.uuid and lbl.label_key == i):\n cc.device_label.remove(lbl.uuid)\n print('Deleted device label (%s, %s) for host %s device %s' %\n (lbl.label_key, lbl.label_value, host.hostname, device.name))\n found = True\n if not found:\n print('Host device label not found: host %s, device %s, label key %s ' %\n (host.hostname, device.name, i))", "def remove_empty_devices(self):\n entity_registry = er.async_get(self.hass)\n device_registry = dr.async_get(self.hass)\n device_list = dr.async_entries_for_config_entry(\n device_registry, self.config_entry.entry_id\n )\n\n for device_entry in device_list:\n entities = er.async_entries_for_device(\n entity_registry, device_entry.id, include_disabled_entities=True\n )\n\n if not entities:\n _LOGGER.debug(\"Removing orphaned device: %s\", device_entry.name)\n device_registry.async_update_device(\n device_entry.id, remove_config_entry_id=self.config_entry.entry_id\n )", "def delete(self, *devices):\n for d in devices:\n d.delete()", "def removeMachine(self, macAddress):\r\n for i in range(len(self.nodes)):\r\n if macAddress in self.nodes[i]:\r\n n = self.nodes[i]\r\n \r\n dbn = self.session.query(WorkerNode).filter_by(mac_address=macAddress).first()\r\n print dbn, 'removed'\r\n \r\n self.session.delete( dbn )\r\n self.session.commit()\r\n \r\n self.nodes.remove(n) \r\n return", "def deallocate(self, servers):\n for server in servers:\n log.debug(\"Deallocating server: {}\".format(server))\n self._allocated.remove(server)", "def reset_slave(server, clean=False):\n param = \"ALL\" if clean else \"\"\n server.exec_stmt(\"RESET SLAVE %s\" % (param, ))", "def destroy_all():\n log.info(\"Destroying the %s cluster\" % cluster_name)\n for n in seeds+nodes+stash:\n n.destroy()\n remove(save_file)", "def rm_host(self, host, is_master):\n self.hosts.pop(host)", "def test_remove_hub_from_cluster(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the primary slave for the bonding device.
def set_primary_for_bonding_device(self, bond_port, slave_port, invert_verify=False): self.dut.send_expect("set bonding primary %d %d" % (slave_port, bond_port), "testpmd> ") out = self.get_info_from_bond_config("Primary: \[", "\d*", bond_port) if not invert_verify: self.verify(str(slave_port) in out, "Set bonding primary port failed") else: self.verify(str(slave_port) not in out, "Set bonding primary port successfully,should not success")
[ "def setMaster(self, led, master):\n self.logger.info(\"setting master for led %d to %d\"%(led,master))\n self._connManager.send(self.protocolArne.setMaster(master, led=led))\n #self._connManager.send(COMMANDS['setMaster'] + struct.pack('B', led) + struct.pack('B', master))", "def set_master(self, master):\n self.master_host = master", "def _set_slave_num(self, device_num):\n self.slave_num = self.args.world_size\n self.slave_proc_num = 1\n system_device_num = self._get_slave_device_num()\n if device_num > 0 and device_num <= system_device_num:\n self.slave_device_num_per_proc = device_num\n self.slave_proc_num = int(system_device_num // device_num)\n else:\n self.slave_device_num_per_proc = system_device_num\n self.slave_proc_num = 1\n self.world_size = self.slave_num * self.slave_proc_num\n if General.cluster.standalone_boot:\n self.world_size = General.cluster.num_workers\n General.cluster.num_workers = self.world_size\n General.cluster.num_nodes = self.slave_num\n General.cluster.num_workers_per_node = self.slave_proc_num\n return", "def test_slave_master_up_cluster_id(self):\n self._cluster.master = None\n self._slave_1.is_slave = False\n self._slave_1.cluster_id = 1\n self._cluster.update_cluster()\n assert_equals(self._slave_1.cluster_id, 0)", "def SetMasterInstance(self, master_instance):\n self.master = master_instance.key\n self.put()", "def __set_minion_master(self):\n master_id = self.master_remote.hostname\n for rem in self.remotes.iterkeys():\n # remove old master public key if present. Minion will refuse to\n # start if master name changed but old key is present\n delete_file(rem, '/etc/salt/pki/minion/minion_master.pub',\n sudo=True, check=False)\n\n # set master id\n sed_cmd = ('echo master: {} > '\n '/etc/salt/minion.d/master.conf').format(master_id)\n rem.run(args=[\n 'sudo',\n 'sh',\n '-c',\n sed_cmd,\n ])", "def _configure_as_slave(group, server):\n try:\n if group.master:\n master = _server.MySQLServer.fetch(group.master)\n master.connect()\n _utils.switch_master(server, master)\n except _errors.DatabaseError as error:\n msg = \"Error trying to configure server ({0}) as slave: {1}.\".format(\n server.uuid, error)\n _LOGGER.debug(msg)\n raise _errors.ServerError(msg)", "def SetMasterIpAddress(self, master_ip):\n master = self.master.get()\n master.external_ip = master_ip\n master.put()", "def test(device, slave_address):\n result = device.set_slave_address(slave_address + 10)\n assert result is None\n\n result = device.get_slave_address()\n assert type(result) is int\n assert result == slave_address + 10\n\n # restore original address\n device.set_slave_address(slave_address)\n assert device.get_slave_address() == slave_address", "def configure_slave(\n self,\n ssh_client: paramiko.client.SSHClient,\n cluster: FlintrockCluster):\n raise NotImplementedError", "def setAllMaster(self, master):\n self.logger.info(\"setting master for all leds to %d\"%(master))\n self._connManager.send(self.protocolArne.setMaster(master))", "def determine_new_master(self):\n self.master_host = determine_host_address()", "def set_i2c_address(self, address):\n I2C_SLAVE = 0x703\n\tfcntl.ioctl(self.i2c_reader, I2C_SLAVE, address)\n\tfcntl.ioctl(self.i2c_writer, I2C_SLAVE, address)\n self.i2c_address = address", "def set_mastervol(db):\r\n # Turn off mute\r\n mastervol().SetMute(0, None)\r\n # Set PC volume from min vol -75 to max vol 0\r\n mastervol().SetMasterVolumeLevel(db, None)", "def _change_to_candidate(group_id, master_uuid, update_only=False):\n forbidden_status = (_server.MySQLServer.FAULTY, )\n master = _server.MySQLServer.fetch(_uuid.UUID(master_uuid))\n master.mode = _server.MySQLServer.READ_WRITE\n master.status = _server.MySQLServer.PRIMARY\n\n if not update_only:\n # Prepare the server to be the master\n master.connect()\n _utils.reset_slave(master)\n _utils.set_read_only(master, False)\n\n group = _server.Group.fetch(group_id)\n _set_group_master_replication(group, master.uuid, update_only)\n\n if not update_only:\n # Make slaves point to the master.\n for server in group.servers():\n if server.uuid != _uuid.UUID(master_uuid) and \\\n server.status not in forbidden_status:\n try:\n server.connect()\n _utils.switch_master(server, master)\n except _errors.DatabaseError as error:\n _LOGGER.debug(\n \"Error configuring slave (%s): %s.\", server.uuid, error\n )\n\n # At the end, we notify that a server was promoted.\n _events.trigger(\"SERVER_PROMOTED\", set([group_id]),\n group_id, master_uuid\n )", "def multiroom_master(self, ssid: str, channel: int, auth: str, encryption: str, psk: str) -> str:\n self._logger.info(\"Requesting multiroom sync as slave to master at ssid '\"+str(ssid)+\"'...\")\n return self._send(\"ConnectMasterAp:ssid=\" + str(self._hex(ssid)) + \":ch=\" + str(channel) + \":auth=\" + auth +\n \":encry=\" + encryption + \":pwd=\" + self._hex(psk) + \":chext=0\").content.decode(\"utf-8\")", "def add(self, slave):\n\n\t\tself.slaves[slave.pid] = slave", "def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)", "def configure_master(\n self,\n ssh_client: paramiko.client.SSHClient,\n cluster: FlintrockCluster):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the mode for the bonding device.
def set_mode_for_bonding_device(self, bond_port, mode): self.dut.send_expect("set bonding mode %d %d" % (mode, bond_port), "testpmd> ") mode_value = self.get_bond_mode(bond_port) self.verify(str(mode) in mode_value, "Set bonding mode failed")
[ "def _set_mode(self, mode):\n self._parent._write(self, DacBase._COMMAND_SET_SLOT_MODE.format(mode))\n self._mode = mode", "def set_mode(self, mode):\n self._mode = mode", "def set_mode(self, mode):\n self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)\n # Delay for 30 milliseconds (datsheet recommends 19ms, but a little more\n # can't hurt and the kernel is going to spend some unknown amount of time\n # too).\n time.sleep(0.03)", "def set_mode(self, val):\n # self.property_set(register_name, val)\n self.property_set(\"mode\", Sample(0, value=val, unit=\"dF\"))\n \n try:\n self.serial_send(\"A=1,Z=1,M=\" + str(self.modes[val.title()]) + \"\\x0D\")\n except:\n print \"error setting thermostat\"", "def set_mode(mode):\n master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')\n master.wait_heartbeat()\n\n mode_id = master.mode_mapping()[mode]\n master.mav.set_mode_send(\n master.target_system,\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\n mode_id)\n print(\"Mode \" + mode + \" successfully set.\")\n return True", "def mode(ctx, mode, touch_eject, autoeject_timeout, chalresp_timeout, force):\n dev = ctx.obj['dev']\n if autoeject_timeout:\n touch_eject = True\n autoeject = autoeject_timeout if touch_eject else None\n\n if mode is not None:\n if mode.transports != TRANSPORT.CCID:\n autoeject = None\n if touch_eject:\n ctx.fail('--touch-eject can only be used when setting'\n ' CCID-only mode')\n\n if not force:\n if mode == dev.mode:\n click.echo('Mode is already {}, nothing to do...'.format(mode))\n ctx.exit()\n elif not dev.has_mode(mode):\n click.echo('Mode {} is not supported on this YubiKey!'\n .format(mode))\n ctx.fail('Use --force to attempt to set it anyway.')\n force or click.confirm('Set mode of YubiKey to {}?'.format(mode),\n abort=True, err=True)\n\n try:\n dev.set_mode(mode, chalresp_timeout, autoeject)\n if not dev.can_write_config:\n click.echo(\n 'Mode set! You must remove and re-insert your YubiKey '\n 'for this change to take effect.')\n except ModeSwitchError as e:\n logger.debug('Failed to switch mode', exc_info=e)\n click.echo('Failed to switch mode on the YubiKey. Make sure your '\n 'YubiKey does not have an access code set.')\n\n else:\n click.echo('Current connection mode is: {}'.format(dev.mode))\n supported = ', '.join(t.name for t in TRANSPORT\n .split(dev.config.usb_supported))\n click.echo('Supported USB interfaces are: {}'.format(supported))", "def set_disco(self, mode=''):\n self.on()\n if mode.upper() in self.DISCO_CODES:\n command = self.DISCO_CODE + self.DISCO_CODES[mode.upper()]\n self.send_command(command, byte2=b\"\", byte3=b\"\")\n else:\n self.send_command(self.DISCO_MODE)", "def set_mode(self, mode = \"CHP\"):\n return self.echo(\":INIT:\" + mode)", "def setMode(self,mode = 0, master_channel = 0):\n if mode == 1:\n cmd = \"MO \"+ str(self.channel) +\" 1 \"+ str(master_channel)\n else:\n cmd = \"MO \"+ str(self.channel) +str(mode)", "def _mode_set(self, thermostat_mode: ThermostatMode):\n if thermostat_mode in [ThermostatMode.FAN_ALWAYS_ON, ThermostatMode.FAN_AUTO]:\n self._groups[GRP_FAN_MODE].set_value(thermostat_mode)\n else:\n self._groups[GRP_SYS_MODE].set_value(thermostat_mode)", "def set_mode(self, mode: OutputMode | None) -> None:\n if mode is None:\n lib.wlr_output_set_mode(self._ptr, ffi.NULL)\n else:\n lib.wlr_output_set_mode(self._ptr, mode._ptr)", "def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()", "def set_operating_mode(self, channel, mode):\n self.logger.debug('Set operating mode: {}'.format(mode))\n self.query(\"L{}I{}\".format(channel, self.MODES[mode]))", "def setConnectionMode(mode):\n # type: (int) -> None\n print(mode)", "def set_edid_mode(self, mode: str):\n if mode not in ['port1', 'remix', 'default']:\n _LOGGER.error(\"Bad EDID mode\")\n return\n result = self._avior.set_edid_mode(mode)\n if \"OK\" in result:\n pass\n else:\n _LOGGER.error(\"Set EDID mode error: {}\".format(result))", "def send_set_channel_mode_to(self, channel_name, mode):\n\t\tcommand = \"MODE #%s %s\" % (channel_name, mode)\n\t\tself.send_command_to_server(command)", "def set_display_mode(self, mode):\n self._mode = mode", "def set_password_mode(self, mode):\n if mode:\n self.send_raw(bytes([IAC, WILL, ECHO]))\n else:\n self.send_raw(bytes([IAC, WONT, ECHO]))", "def _enable_and_set_mode(self, interface, mode):\n # Enable switching\n url = self._construct_url(interface)\n payload = '<switchport></switchport>'\n self._make_request('POST', url, data=payload,\n acceptable_error_codes=(409,))\n\n # Set the interface mode\n if mode in ['access', 'trunk']:\n url = self._construct_url(interface, suffix='mode')\n payload = '<mode><vlan-mode>%s</vlan-mode></mode>' % mode\n self._make_request('PUT', url, data=payload)\n else:\n raise AssertionError('Invalid mode')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the MAC for the bonding device.
def set_mac_for_bonding_device(self, bond_port, mac): self.dut.send_expect("set bonding mac_addr %s %s" % (bond_port, mac), "testpmd> ") new_mac = self.get_port_mac(bond_port) self.verify(new_mac == mac, "Set bonding mac failed")
[ "def mac(self, mac):\n self._query_helper(\"system\", \"set_mac_addr\", {\"mac\": mac})", "def mac(self, mac: str) -> None:\n self._query_helper(\"system\", \"set_mac_addr\", {\"mac\": mac})", "def vpp_set_interface_mac(node, interface, mac):\n cmd = u\"sw_interface_set_mac_address\"\n args = dict(\n sw_if_index=InterfaceUtil.get_interface_index(node, interface),\n mac_address=L2Util.mac_to_bin(mac)\n )\n err_msg = f\"Failed to set MAC address of interface {interface}\" \\\n f\"on host {node[u'host']}\"\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)", "def setmac(if_vlan, if_wan, mac):\n setifflags(if_vlan, getifflags(if_vlan) & 0xffff ^ IFF_UP)\n setifflags(if_wan, getifflags(if_wan) & 0xffff ^ IFF_UP)\n setifhwaddr(if_wan, mac)\n setifhwaddr(if_vlan, mac)\n setifflags(if_wan, getifflags(if_wan) | IFF_UP)\n setifflags(if_vlan, getifflags(if_vlan) | IFF_UP)", "def set_mac_address(self, mac_address=None):\n self._mac_address = mac_address if mac_address else \\\n generate_mac_address()", "def _set_mac(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"mac\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"mac must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"mac\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__mac = t\n if hasattr(self, '_set'):\n self._set()", "def put_device_mac(new_device_mac): # noqa: E501\n if connexion.request.is_json:\n new_device_mac = DeviceMAC.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'", "def set_linux_interface_mac(\n node, interface, mac, namespace=None, vf_id=None):\n mac_str = f\"vf {vf_id} mac {mac}\" if vf_id is not None \\\n else f\"address {mac}\"\n ns_str = f\"ip netns exec {namespace}\" if namespace else u\"\"\n\n cmd = f\"{ns_str} ip link set {interface} {mac_str}\"\n exec_cmd_no_error(node, cmd, sudo=True)", "def mac_id(self, mac_id):\n\n self._mac_id = mac_id", "def host_mac(self, host_mac):\n\n self._host_mac = host_mac", "def _set_mac(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=mac.mac, is_container='container', presence=False, yang_name=\"mac\", rest_name=\"mac\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntVlanCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"mac must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=mac.mac, is_container='container', presence=False, yang_name=\"mac\", rest_name=\"mac\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntVlanCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__mac = t\n if hasattr(self, '_set'):\n self._set()", "def change_macaddr(interface: str, new_macaddr: str) -> None:\n subprocess.call(['ifconfig', interface, 'down'])\n subprocess.call(['ifconfig', interface, 'hw', 'ether', new_macaddr])\n subprocess.call(['ifconfig', interface, 'up'])", "def setifhwaddr(ifname, mac):\n s_ioctl(SIOCSIFHWADDR, ifreq_ifr_hwaddr(ifname, mac))", "def set_rx_mac(self, index, mac):\n macaddress = map(myint, mac.split(':'))\n (status, null) = self.__device.setRxMAC(index, macaddress)\n self.__device.decode_error_status(status, cmd='setRxMAC()', print_on_error=True)", "def set_mac_addresses(self, value: MacAddresses) -> None:\n if value is None:\n raise ValueError('Invalid MAC addresses')\n self._mac_addresses = value", "def change_mac(interface, new_mac):\n print(f\"[+] Changing MAC address for {interface} to {new_mac}\")\n\n try:\n subprocess.call([\"ip\", \"link\", \"set\", interface, \"down\"])\n subprocess.call([\"ip\", \"link\", \"set\", interface, \"address\", new_mac])\n subprocess.call([\"ip\", \"link\", \"set\", interface, \"up\"])\n except Exception as e:\n print(e)\n return -1", "def ue_mac(self, ue_mac):\n\n self._ue_mac = ue_mac", "def set_mac_address_alt(self, mac_address=None):\n self._mac_address_alt = mac_address if mac_address else \\\n generate_mac_address()", "def vclMacBasedVlan_setPort(self,mac='00:00:00:00:00:01',member='\\x80'):\n index = '6.' + mac2index(mac)\n pMember = netsnmp.Varbind(self.tnVclMacBasedPortMember[0],index,member,getType(type=self.tnVclMacBasedPortMember[1]))\n vars = netsnmp.VarList(pMember)\n res = self.sess.set(vars)\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the balance transmit policy for the bonding device.
def set_balance_policy_for_bonding_device(self, bond_port, policy): self.dut.send_expect("set bonding balance_xmit_policy %d %s" % (bond_port, policy), "testpmd> ") new_policy = self.get_bond_balance_policy(bond_port) policy = "BALANCE_XMIT_POLICY_LAYER" + policy.lstrip('l') self.verify(new_policy == policy, "Set bonding balance policy failed")
[ "def set_balance(self, zone: int, balance: int):\n raise NotImplemented()", "def SetTrafficShaper(self, name, per_policy, priority, guaranteed_bandwidth, maximum_bandwidth, diffserv='disable',\n diffservcode='000000'):\n payload = {'json':\n {\n 'name': name,\n 'per-policy': per_policy,\n 'priority': priority,\n 'guaranteed-bandwidth': int(guaranteed_bandwidth),\n 'maximum_bandwidth': int(maximum_bandwidth),\n 'diffserv': diffserv,\n 'diffservcode': diffservcode\n }\n }\n return self.ApiSet('cmdb/firewall.shaper/traffic-shaper/' + name + '/', payload)", "def set_balance(self, value):\n self.balance = value # updates player balance after each game", "def set_limits_software_approach_policy(self, policy):\n policy_ = self.convert_to_enum(policy, MOT_LimitsSoftwareApproachPolicy)\n self.sdk.SCC_SetLimitsSoftwareApproachPolicy(self._serial, policy_)", "def setThrottle(self, throttle):\n \n self._throttle = float(throttle) \n absThrottle = abs(self._throttle)\n \n #Fordwards or backwards movement\n #TODO: 20181114 DPM: This is not required to do if the throttle sign was not changed\n if self._throttle >= 0.0:\n SysfsWriter.writeOnce(\"0\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n else:\n SysfsWriter.writeOnce(\"1\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n \n\n #Throttle\n if absThrottle > 0.0 and absThrottle <= Motor.MAX_THROTTLE: \n \n self._duty = int((self._rangeDuty * absThrottle) + self._minDuty)\n \n elif absThrottle == 0.0:\n self._setNeutralThrottle()\n \n else: # absThrottle > Motor.MAX_THROTTLE\n self._duty = int((self._rangeDuty * Motor.MAX_THROTTLE) + self._minDuty)\n self._throttle = Motor.MAX_THROTTLE if self._throttle > 0.0 else -Motor.MAX_THROTTLE\n\n self._sysfsWriter.write(str(self._duty))", "def pay_bet(self):\n self.wallet -= self.bet\n self.bet = 0", "def _set_transaction_safety_enforcement_policy(policy):\n assert policy in ('ts-enforce-none', 'ts-enforce-all',\n 'ts-enforce-all-except-user-lock'), policy\n _REQUEST_STATE.ts_enforcement_policy = policy", "def setThrottle(self, throttle):\n \n self._throttle = float(throttle) \n absThrottle = abs(self._throttle)\n \n if absThrottle > Motor.MAX_THROTTLE: \n self._throttle = Motor.MAX_THROTTLE if self._throttle >= 0.0 else -Motor.MAX_THROTTLE\n\n self._log(\"throttle: {0}\".format(self._throttle))", "def set_tenant_cdn_policy(self, cdn_type, policy, policy_value):\n payload = {\n \"cdnType\": cdn_type,\n \"policy\": policy,\n \"policyValue\": policy_value\n }\n qry = ServiceOperationQuery(self, \"SetTenantCdnPolicy\", None, payload)\n self.context.add_query(qry)\n return self", "def set_tx_power(self, tx_power):\r\n valid_tx_power_values = [-40, -20, -16, -12, -8, -4, 0, 3, 4]\r\n if tx_power not in valid_tx_power_values:\r\n raise ValueError(\"Invalid transmit power value {}. Must be one of: {}\".format(tx_power, valid_tx_power_values))\r\n self.ble_driver.ble_gap_tx_power_set(tx_power)", "def setProtectionDelay(self, delay):\n if delay >= 0.001 and delay < 0.6:\n self.instr.write(\"VOLT:PROT:DELAY %f\" % float(delay))\n \n else:\n ValueError(\"Value not in range\")", "def set_bandwidth_limit(self, value='BWFULL'):\n #CMD$=“BWL C1,ON”\n print debug_msg.TBD_MSG", "def set_policy(self, policy):\n self._policy = 'custom'\n self._P = policy", "def _set_balance(self):\n #Write your Function here to play with 'balance' parameter......\n pass", "def __set_baudrate(self, baud_rate):\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"down\"])\n if response != 0:\n print(\"Error: Cannot deactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n response = subprocess.call(\n [\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"type\", \"can\", \"bitrate\", str(baud_rate)])\n if response != 0:\n print(\"Error: Cannot set {0} baudrate for interface '{1}'\".format(baud_rate, canSend.can_interface))\n print(response)\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"up\"])\n if response != 0:\n print(\"Error: Cannot eactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n self.__print_actual_baudrate()", "def set_nominal_capacity(self, capacity_ah=30):\n\n self.master_payloads['BattCap_Write'][4] = capacity_ah", "def setBet(self, bet):\n if self.balance == 0:\n #if balance is 0 can't place a bet\n print(\"You have no money\")\n return False\n if bet > self.balance:\n #Cannot bet more then you have\n print(\"Your bet is more then your balance\")\n return False\n if bet < 0:\n #Cannot bet negatively\n #Betting negatively comes with a 5 dollar penalty\n self.balance += bet\n print(\"Because you entered a negative amount you have been penalized 5 dollars\")\n return False\n self.bet = bet\n self.__deduct() #deduct bet from balance\n print(\"You have bet {}, balance is now {}\".format(self.bet, self.balance))\n return True", "def _set_transmit_power(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transmit_power must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__transmit_power = t\n if hasattr(self, '_set'):\n self._set()", "def set_encryption_policy(self, enc_policy=1):\n pe_settings = self._session.get_pe_settings()\n pe_settings.in_enc_policy = pe_settings.out_enc_policy = libtorrent.enc_policy(enc_policy)\n self._session.set_pe_settings(pe_settings)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send packets to the slaves and calculate the slave`s RX packets and unbond port TX packets.
def send_default_packet_to_slave(self, unbound_port, bond_port, pkt_count=100, **slaves): pkt_orig = {} pkt_now = {} temp_count = 0 summary = 0 # send to slave ports pkt_orig = self.get_all_stats(unbound_port, 'tx', bond_port, **slaves) for slave in slaves['active']: temp_count = self.send_packet(self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count) summary += temp_count for slave in slaves['inactive']: self.send_packet(self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count, True) pkt_now = self.get_all_stats(unbound_port, 'tx', bond_port, **slaves) for key in pkt_now: for num in [0, 1, 2]: pkt_now[key][num] -= pkt_orig[key][num] return pkt_now, summary
[ "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def makePackets(self, runners, page, reps, timingOrder, sync=249):\n # Dictionary of devices to be run.\n runnerInfo = dict((runner.dev.devName, runner) for runner in runners)\n\n # Upload sequence data (pipelined).\n loadPkts = []\n for board in self.boardOrder:\n if board in runnerInfo:\n runner = runnerInfo[board]\n isMaster = len(loadPkts) == 0\n p = runner.loadPacket(page, isMaster)\n if p is not None:\n loadPkts.append(p)\n\n # Setup board state (not pipelined).\n # Build a list of (setupPacket, setupState).\n setupPkts = []\n for board in self.boardOrder:\n if board in runnerInfo:\n runner = runnerInfo[board]\n p = runner.setupPacket()\n if p is not None:\n setupPkts.append(p)\n # Run all boards (master last).\n # Set the first board which is both in the boardOrder and also in the\n # list of runners for this sequence as the master. Any subsequent boards\n # for which we have a runner are set to slave mode, while subsequent\n # unused boards are set to idle mode. For example:\n # All boards: 000000\n # runners: --XX-X\n # mode: msis (i: idle, m: master, s: slave) -DTS\n boards = [] # List of (<device object>, <register bytes to write>).\n for board, delay in zip(self.boardOrder, self.boardDelays):\n if board in runnerInfo:\n runner = runnerInfo[board]\n slave = len(boards) > 0\n regs = runner.runPacket(page, slave, delay, sync)\n boards.append((runner.dev, regs))\n elif len(boards):\n # This board is after the master, but will not itself run, so\n # we put it in idle mode.\n dev = self.fpgaServer.devices[board] # Look up device wrapper.\n if isinstance(dev, dac.DAC):\n regs = dev.regIdle(delay)\n boards.append((dev, regs))\n elif isinstance(dev, adc.ADC):\n # ADC boards always pass through signals, so no need for\n # Idle mode.\n pass\n boards = boards[1:] + boards[:1] # move master to the end.\n runPkts = self.makeRunPackets(boards)\n # Collect and read (or discard) timing results.\n seqTime = max(runner.seqTime for runner in runners)\n collectPkts = [runner.collectPacket(seqTime, self.ctx)\n for runner in runners]\n readPkts = [runner.readPacket(timingOrder) for runner in runners]\n\n return loadPkts, setupPkts, runPkts, collectPkts, readPkts", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def makeRunPackets(self, data):\n\n wait = self.directEthernetServer.packet(context=self.ctx)\n run = self.directEthernetServer.packet(context=self.ctx)\n both = self.directEthernetServer.packet(context=self.ctx)\n if LOGGING_PACKET:\n wait = LoggingPacket(wait, name='run=wait')\n run = LoggingPacket(run, name='run=run')\n both = LoggingPacket(both, name='run=both')\n # Wait for triggers and discard them. The actual number of triggers to\n # wait for will be decide later. The 0 is a placeholder here.\n wait.wait_for_trigger(0, key='nTriggers')\n both.wait_for_trigger(0, key='nTriggers')\n # Run all boards.\n for dev, regs in data:\n bytes = regs.tostring()\n # We must switch to each board's destination MAC each time we write\n # data because our packets for the direct ethernet server is in the\n # main context of the board group, and therefore does not have a\n # specific destination MAC.\n run.destination_mac(dev.MAC).write(bytes)\n both.destination_mac(dev.MAC).write(bytes)\n return wait, run, both", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def master_send_continue():\n for ii in get_slaves():\n mpi_comm.send(None, dest=ii, tag=tags.CONTINUE)\n logger.info('slave nodes - continue')", "def slave_transmission(slave_ip, time_of_flight, data):\n global max_delay\n if max_delay > time_of_flight:\n time.sleep(max_delay - time_of_flight)\n data_sock.sendto(data, (slave_ip, SEND_DATA_PORT))", "def transmit_packets(self, neighbors):\n assert isinstance(neighbors, list) or isinstance(neighbors, tuple), \\\n 'Node #%d: an invalid type of the given list of neighbors!' \\\n % self.__node_id\n neighbors_set = set(neighbors)\n for packet in self.__transmitted_packets:\n if packet[-1] is None:\n packet[-1] = \\\n self.__get_transmission_neighbors(packet[2], packet[0],\n neighbors)\n if packet[0] <= self.__time.simulation_time \\\n and packet[1] >= self.__time.simulation_time:\n assert packet[-1] is not None, \\\n 'Node #%d: the list of neighbors for packet %d is empty!' \\\n % (self.__node_id, packet[2])\n if packet[-1]:\n packet[-1] = list(set(packet[-1]) & neighbors_set)\n # print packet", "def test_round_robin_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def send_packet():", "def _two_level_comm(self, alive_nodes):\n # heads wait for all ordinary nodes, then transmit to BS\n for node in self.get_ordinary_nodes():\n node.transmit()\n for node in self.get_heads():\n node.transmit()", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "async def test_multi_send_and_recv(self, components): # type: ignore\n\n tasks = []\n\n for component in components:\n tasks.append(asyncio.ensure_future(\n component.send_and_receive(self._giraffe.get_ip(), self._echo_port)))\n\n await asyncio.gather(*tasks)", "def _write_data_to_arduino(self, retries=5):\n while self.running:\n\n # Check heartbeat.\n if self.last_ping+1 <= time.time():\n self.last_ping = time.time()\n self.last_ping_dt = datetime.now()\n# self.print('Queuing ping.')\n self.outgoing_queue.put(Packet(c.ID_PING))\n\n # Sending pending commands.\n if not self.outgoing_queue.empty():\n packet = self.outgoing_queue.get()\n\n ack_success = False\n for attempt in xrange(retries):\n self.print('Sending: %s, attempt %i, (%i packets remaining)' % (packet, attempt, self.outgoing_queue.qsize()))\n\n sent_time = time.time()\n self._write_packet(packet)\n t0 = time.time() - sent_time\n# self.print('Sent secs:', t0, ' self.write_time:', self.write_time)\n\n if not self.running:\n ack_success = True\n break\n elif packet.id in c.ACK_IDS:\n # Wait for acknowledgement.\n if self._wait_for_ack(packet.id, sent_time):\n ack_success = True\n break\n else:\n self.print('Timed out waiting for ack of packet %s, on attempt %i.' % (packet, attempt))\n self.ack_failure_count += 1\n else:\n # Don't wait for acknowledgement.\n break\n\n if packet.id in c.ACK_IDS:\n with self.ack_queue_lock:\n self.ack_queue[packet] = ack_success\n\n self.print('Write thread exited.')", "def runSendReceiveTest(self, pkt2send, src_port , pkt2recv, destination_ports):\n\n masked2recv = Mask(pkt2recv)\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"dst\")\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"src\")\n\n send_packet(self, src_port, pkt2send)\n (index, rcv_pkt, received) = self.verify_packet_any_port(masked2recv, destination_ports)\n\n self.tests_total += 1\n\n return received", "def deploy_slaves():\n # Time for our slaves\n _, master_ip = get_master_dns_ip()\n if master_ip:\n # Test and see if we can find existing slaves\n slave_list = get_slave_dns_list()\n if NO_OF_SLAVES - len(slave_list) > 0:\n print 'Found {0} existing slaves creating {1} new slaves'.format(len(slave_list),\n NO_OF_SLAVES - len(slave_list))\n create_slaves(NO_OF_SLAVES - len(slave_list))\n host_list = [slave.public_dns_name for slave in SLAVE_INSTANCES.itervalues()] + slave_list\n else:\n print 'No more slaves needed'\n host_list = slave_list\n\n execute(run_slave_tasks, hosts=host_list)\n else:\n print 'Setup a Master first'", "def sendCommandResponses(self):\n while self.commandResponses:\n commandResponse, con = self.commandResponses.popleft()\n try:\n logging.debug(\"Sending response: \" + commandResponse)\n con.sendall(commandResponse)\n except socket.error as e:\n logging.warning(str(e))", "def send(self, tick):\n # TODO: Create an empty list of packets that the host will send\n pkts = []\n \n \n\n # First, process retransmissions\n for i, unacked_pkt in enumerate(self.unacked):\n unacked_pkt = self.unacked[i]\n if tick >= unacked_pkt.timeout_tick:\n if self.verbose:\n print(\n \"@ \"\n + str(tick)\n + \" timeout for unacked_pkt \"\n + str(unacked_pkt.seq_num)\n + \" timeout duration was \"\n + str(unacked_pkt.timeout_duration)\n )\n # TODO: Retransmit any packet that has timed out\n # by doing the following in order\n #CHECKKK\n # (1) Creating a new packet\n #retx_pkt = Packet(tick , self.max_seq +1)\n retx_pkt = Packet(tick , unacked_pkt.seq_num)\n # (2) Incrementing num_retx (for debugging purposes)\n retx_pkt.num_retx +=1\n \n # (3) Append the packet to the list of packets created earlier\n pkts.append(retx_pkt)\n # (4) Backing off the timer\n self.timeout_calculator.exp_backoff()\n # (5) Updating timeout_tick and timeout_duration appropriately after backing off the timer\n #pls check wassup\n \n retx_pkt.timeout_duration = tick - unacked_pkt.timeout_tick #not sure at all\n retx_pkt.timeout_tick= tick + retx_pkt.timeout_duration\n\n if self.verbose:\n print(\n \"retx packet @ \"\n + str(tick)\n + \" with sequence number \"\n + str(retx_pkt.seq_num)\n )\n if self.verbose:\n print(\n \"@ \"\n + str(tick)\n + \" exp backoff for packet \"\n + str(unacked_pkt.seq_num)\n )\n self.unacked[i] = unacked_pkt\n\n assert len(self.unacked) <= self.window\n\n # Now fill up the window with new packets\n while len(self.unacked) < self.window:\n # TODO: Create new packets, set their retransmission timeout, and add them to the list\n #BIG CHECK\n pkt = Packet(tick , self.max_seq +1)\n pkt.timeout_tick = self.timeout_calculator.timeout + tick\n #pkt.timeout_duration = tick - pkt.timeout_tick #not sure at all\n pkts.append(pkt)\n\n #what to set their retransmission timeout as?\n # TODO: Remember to update self.max_seq and add the just sent packet to self.unacked\n self.max_seq = pkt.seq_num\n self.unacked.append(pkt)\n if self.verbose:\n print(\n \"sent packet @ \"\n + str(tick)\n + \" with sequence number \"\n + str(pkt.seq_num)\n )\n # window must be filled up at this point\n assert len(self.unacked) == self.window\n\n # TODO: return the list of packets that need to be transmitted on to\n # the network\n return pkts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send packets to the unbound port and calculate unbound port RX packets and the slave`s TX packets.
def send_default_packet_to_unbound_port(self, unbound_port, bond_port, pkt_count=300, **slaves): pkt_orig = {} pkt_now = {} summary = 0 # send to unbonded device pkt_orig = self.get_all_stats(unbound_port, 'rx', bond_port, **slaves) summary = self.send_packet(unbound_port, False, FRAME_SIZE_64, pkt_count) pkt_now = self.get_all_stats(unbound_port, 'rx', bond_port, **slaves) for key in pkt_now: for num in [0, 1, 2]: pkt_now[key][num] -= pkt_orig[key][num] return pkt_now, summary
[ "def send_default_packet_to_slave(self, unbound_port, bond_port, pkt_count=100, **slaves):\n pkt_orig = {}\n pkt_now = {}\n temp_count = 0\n summary = 0\n\n # send to slave ports\n pkt_orig = self.get_all_stats(unbound_port, 'tx', bond_port, **slaves)\n for slave in slaves['active']:\n temp_count = self.send_packet(self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count)\n summary += temp_count\n for slave in slaves['inactive']:\n self.send_packet(self.dut_ports[slave], False, FRAME_SIZE_64, pkt_count, True)\n pkt_now = self.get_all_stats(unbound_port, 'tx', bond_port, **slaves)\n\n for key in pkt_now:\n for num in [0, 1, 2]:\n pkt_now[key][num] -= pkt_orig[key][num]\n\n return pkt_now, summary", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def runSendReceiveTest(self, pkt2send, src_port , pkt2recv, destination_ports):\n\n masked2recv = Mask(pkt2recv)\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"dst\")\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"src\")\n\n send_packet(self, src_port, pkt2send)\n (index, rcv_pkt, received) = self.verify_packet_any_port(masked2recv, destination_ports)\n\n self.tests_total += 1\n\n return received", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def send(self, tick):\n # Create an empty list of packets that the host will send\n packets = []\n\n # First, process retransmissions\n for i in range(0, len(self.unacked)):\n unacked_pkt = self.unacked[i]\n if tick >= self.unacked[i].timeout_tick:\n # Retransmit any packet that has timed out\n # by doing the following in order\n # (1) creating a new packet,\n packet = Packet(tick, unacked_pkt.seq_num)\n # (2) setting its retx attribute to True (just for debugging)\n packet.retx = True\n # (3) Append the packet to the list of packets created earlier\n packets.append(packet)\n # (4) Backing off the timer\n self.timeout_calculator.exp_backoff()\n # (5) Updating timeout_tick and timeout_duration appropriately after backing off the timer\n unacked_pkt.timeout_duration = self.timeout_calculator.timeout\n unacked_pkt.timeout_tick = tick + unacked_pkt.timeout_duration\n # (6) Updating num_retx\n unacked_pkt.num_retx += 1\n self.unacked[i] = unacked_pkt\n\n assert(len(self.unacked) <= self.window)\n\n # Now fill up the window with new packets\n while (len(self.unacked) < self.window):\n # Create new packets, set their retransmission timeout, and add them to the list\n packet = Packet(tick, self.max_seq+1)\n unacked_packet = UnackedPacket(packet.seq_num)\n unacked_packet.timeout_duration = self.timeout_calculator.timeout\n unacked_packet.timeout_tick = self.timeout_calculator.timeout + tick\n packets.append(packet)\n # Remember to update self.max_seq and add then just sent packet to self.unacked\n self.max_seq += 1\n self.unacked.append(unacked_packet)\n print(\"send packet @\", tick, \"with sequence number \", self.max_seq)\n # window must be filled up at this point\n assert(len(self.unacked) == self.window)\n\n # return the list of packets that need to be transmitted on to the network\n return packets", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def send(self, tick):\n # TODO: Create an empty list of packets that the host will send\n pkts = []\n \n \n\n # First, process retransmissions\n for i, unacked_pkt in enumerate(self.unacked):\n unacked_pkt = self.unacked[i]\n if tick >= unacked_pkt.timeout_tick:\n if self.verbose:\n print(\n \"@ \"\n + str(tick)\n + \" timeout for unacked_pkt \"\n + str(unacked_pkt.seq_num)\n + \" timeout duration was \"\n + str(unacked_pkt.timeout_duration)\n )\n # TODO: Retransmit any packet that has timed out\n # by doing the following in order\n #CHECKKK\n # (1) Creating a new packet\n #retx_pkt = Packet(tick , self.max_seq +1)\n retx_pkt = Packet(tick , unacked_pkt.seq_num)\n # (2) Incrementing num_retx (for debugging purposes)\n retx_pkt.num_retx +=1\n \n # (3) Append the packet to the list of packets created earlier\n pkts.append(retx_pkt)\n # (4) Backing off the timer\n self.timeout_calculator.exp_backoff()\n # (5) Updating timeout_tick and timeout_duration appropriately after backing off the timer\n #pls check wassup\n \n retx_pkt.timeout_duration = tick - unacked_pkt.timeout_tick #not sure at all\n retx_pkt.timeout_tick= tick + retx_pkt.timeout_duration\n\n if self.verbose:\n print(\n \"retx packet @ \"\n + str(tick)\n + \" with sequence number \"\n + str(retx_pkt.seq_num)\n )\n if self.verbose:\n print(\n \"@ \"\n + str(tick)\n + \" exp backoff for packet \"\n + str(unacked_pkt.seq_num)\n )\n self.unacked[i] = unacked_pkt\n\n assert len(self.unacked) <= self.window\n\n # Now fill up the window with new packets\n while len(self.unacked) < self.window:\n # TODO: Create new packets, set their retransmission timeout, and add them to the list\n #BIG CHECK\n pkt = Packet(tick , self.max_seq +1)\n pkt.timeout_tick = self.timeout_calculator.timeout + tick\n #pkt.timeout_duration = tick - pkt.timeout_tick #not sure at all\n pkts.append(pkt)\n\n #what to set their retransmission timeout as?\n # TODO: Remember to update self.max_seq and add the just sent packet to self.unacked\n self.max_seq = pkt.seq_num\n self.unacked.append(pkt)\n if self.verbose:\n print(\n \"sent packet @ \"\n + str(tick)\n + \" with sequence number \"\n + str(pkt.seq_num)\n )\n # window must be filled up at this point\n assert len(self.unacked) == self.window\n\n # TODO: return the list of packets that need to be transmitted on to\n # the network\n return pkts", "def port_scan(self, message):\n for port in AVAILABLE_PORTS:\n # ...Except our own port\n if port != self.self_node.address[1]:\n self.udp_socket.sendto(message, (\"127.0.0.1\", port))", "def runUDP(self, sock):\n # just send entire message without check for completeness\n for block in self.blocks:\n self.packetsSent += 1\n if (self.noise < random.random()):\n # send message to receiver at IP, PORT\n sock.sendto(pickle.dumps(block), (self.recv_ip, self.recv_port))\n sock.sendto(pickle.dumps(None), (self.recv_ip, self.recv_port))", "def send_raw_packet(packet, port):", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def makeRunPackets(self, data):\n\n wait = self.directEthernetServer.packet(context=self.ctx)\n run = self.directEthernetServer.packet(context=self.ctx)\n both = self.directEthernetServer.packet(context=self.ctx)\n if LOGGING_PACKET:\n wait = LoggingPacket(wait, name='run=wait')\n run = LoggingPacket(run, name='run=run')\n both = LoggingPacket(both, name='run=both')\n # Wait for triggers and discard them. The actual number of triggers to\n # wait for will be decide later. The 0 is a placeholder here.\n wait.wait_for_trigger(0, key='nTriggers')\n both.wait_for_trigger(0, key='nTriggers')\n # Run all boards.\n for dev, regs in data:\n bytes = regs.tostring()\n # We must switch to each board's destination MAC each time we write\n # data because our packets for the direct ethernet server is in the\n # main context of the board group, and therefore does not have a\n # specific destination MAC.\n run.destination_mac(dev.MAC).write(bytes)\n both.destination_mac(dev.MAC).write(bytes)\n return wait, run, both", "def verify_active_backup_rx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n slave_num = slaves['active'].__len__()\n if slave_num != 0:\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_slave(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * slave_num, \"Not correct RX pkt on bond port in mode 1\")\n self.verify(pkt_now[unbound_port][0] == pkt_count * active_flag, \"Not correct TX pkt on unbound port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct RX pkt on inactive port in mode 1\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Not correct RX pkt on active port in mode 1\")", "def send_packet():", "def blockTxRxMediaPorts(txip,rxip):\n\n txport1 = txport2 = rxport1 = rxport2 = ''\n\n txports = commands.getoutput(\"sudo netstat -ap --numeric-ports | grep udp| grep %s|grep mgen | awk '{print $4}'| head -2\" %txip)\n\n output = txports.split('\\n')\n if output:\n\n output1 = output[0].split(':')\n if output1:\n txport1 = output1[1]\n \n output2 = output[1].split(':')\n if output2: \n txport2 = output2[1]\n\n rxports = commands.getoutput(\"sudo netstat -ap --numeric-ports | grep udp| grep %s|grep mgen | awk '{print $4}'| head -2\" %rxip)\n\n output = rxports.split('\\n')\n if output:\n output1 = output[0].split(':')\n if output1:\n rxport1 = output1[1]\n output2 = output[1].split(':')\n if output2: \n rxport2 = output2[1]\n \n if (txport1 == '' or txport2 == '' or rxport1 == '' or rxport2 == ''):\n \n raise AssertionError('txport or rxport not found from mgen')\n \n os.system('sudo iptables -A OUTPUT -p udp -s %s --sport %s -j DROP'%(txip,txport1))\n os.system('sudo iptables -A OUTPUT -p udp -s %s --sport %s -j DROP'%(txip,txport2))\n os.system('sudo iptables -A OUTPUT -p udp -s %s --sport %s -j DROP'%(rxip,rxport1))\n os.system('sudo iptables -A OUTPUT -p udp -s %s --sport %s -j DROP'%(rxip,rxport2))\n \n ports = [txport1,txport2,rxport1,rxport2]\n return ports", "def udpSender(self, badThingsQueue, stateQueue, pipe):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n while True: \n try:\n nextCall = time.time()\n msg = self.sendBuffer.get()\n if msg != 0 and msg is not None and self.dawn_ip is not None: \n s.sendto(msg, (self.dawn_ip, UDPSendClass.SEND_PORT))\n nextCall += 1.0/self.socketHZ\n time.sleep(max(nextCall - time.time(), 0))\n except Exception as e:\n badThingsQueue.put(BadThing(sys.exc_info(), \n \"UDP sender thread has crashed with error: \" + str(e), \n event = BAD_EVENTS.UDP_SEND_ERROR, \n printStackTrace = True))", "def releaseTxRxMediaPorts(txip,rxip,ports):\n os.system('sudo iptables -D OUTPUT -p udp -s %s --sport %s -j DROP'%(txip,ports[0]))\n os.system('sudo iptables -D OUTPUT -p udp -s %s --sport %s -j DROP'%(txip,ports[1]))\n os.system('sudo iptables -D OUTPUT -p udp -s %s --sport %s -j DROP'%(rxip,ports[2]))\n os.system('sudo iptables -D OUTPUT -p udp -s %s --sport %s -j DROP'%(rxip,ports[3]))", "def send_packet (self, buffer_id, raw_data, out_port, in_port):\n\t\tmsg = of.ofp_packet_out()\n\t\tmsg.in_port = in_port\n\t\tif buffer_id != -1 and buffer_id is not None:\n\t\t\t# We got a buffer ID from the switch; use that\n\t\t\tmsg.buffer_id = buffer_id\n\t\telse:\n\t\t\t# No buffer ID from switch -- we got the raw data\n\t\t\tif raw_data is None:\n\t\t\t\t# No raw_data specified -- nothing to send!\n\t\t\t\treturn\n\t\t\tmsg.data = raw_data\n\n\t\t# Add an action to send to the specified port\n\t\taction = of.ofp_action_output(port = out_port)\n\t\tmsg.actions.append(action)\n\n\t\t# Send message to switch\n\t\tself.connection.send(msg)", "def sendNrbdData(s,date,time):\n global timeBinary\n global specBinary\n global nrbdBinary\n global resends\n data = nrbdBinary\n # figure out how to divide the data into packets if needed\n dataDivis = int(math.floor(len(data)/maxNrbdPacket))\n \n #For each packet\n for i in range(0,maxNrbdPacket):\n nrbdHeader = \"{%s,%s,210220000.000,nrbd,3.0,lab1,tst-00,600000,664,>h,%d,%d,1,1}\" % (date, time, narrowBandFFTSize, 2*narrowBandBandwidth)\n packetData = nrbdHeader\n packetData += data[i*dataDivis:(i+1)*dataDivis]\n packetData += DataStopKey\n\n #Assemble the headers\n serverHeader = \"{%s,%s,%d,%d,3.0,lab1,tst-00,%d,nrbd}\" % (date, time, 0, maxSpecPacket, len(packetData))\n\n response = \"\"\n dataResponse = \"\"\n \n # Send the server header and ensure it is recived\n # If more than 3 resends quit the connection\n while resends <= 3:\n # Send the server header within the confines of the server specs\n s.send(HeaderSend)\n s.send(serverHeader)\n sleep(2)\n\n nextData = False\n print(\"Sent: %s\" % HeaderSend+serverHeader)\n response += s.recv(bufferSize)\n print(\"Recieved: %s\" % response)\n \n if HeaderOK in response:\n s.send(packetData)\n sleep(2)\n while resends <=3:\n # Now moving on to the data\n print(\"Sent Data\")\n dataResponse += s.recv(bufferSize)\n print(\"Recieved: %s\" % dataResponse)\n if DataOK in dataResponse:\n nextData = True\n dataResponse = \"\"\n response = \"\"\n print \"Next Data Packet\"\n break\n elif dataResponse == DataNOK:\n retries += 1\n dataResponse = \"\"\n continue\n \n elif response == HeaderNOK:\n resends += 1\n response = \"\"\n dataResponse = \"\"\n continue\n\n if nextData:\n break\n if resends > 3:\n s.send(DataEnd)\n return 1\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do some basic operations to bonded devices and slaves, such as adding, removing, setting primary or setting mode.
def verify_bound_basic_opt(self, mode_set): bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True) self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1]) mode_value = self.get_bond_mode(bond_port_0) self.verify('%d' % mode_set in mode_value, "Setting bonding mode error") bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0) self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0]) self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0]) OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1 self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE) self.set_mode_for_bonding_device(bond_port_0, mode_set) self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2]) time.sleep(5) self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2]) self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2]) primary_now = self.get_bond_primary(bond_port_0) self.verify(int(primary_now) == self.dut_ports[1], "Reset primary slave failed after removing primary slave") for bond_port in [bond_port_0, bond_port_1]: self.remove_all_slaves(bond_port) self.dut.send_expect("quit", "# ") self.launch_app()
[ "def setup_bd(client, conn_mgr):\n sess_hdl = conn_mgr.client_init()\n dev_tgt = DevTarget_t(0, hex_to_i16(0xffff))\n ifindices = [1, 2, 65]\n\n for ifindex in ifindices:\n action_spec = dc_set_bd_properties_action_spec_t(\n action_bd=0,\n action_vrf=0,\n action_rmac_group=0,\n action_bd_label=0,\n action_ipv4_unicast_enabled=True,\n action_ipv6_unicast_enabled=False,\n action_ipv4_multicast_enabled=False,\n action_ipv6_multicast_enabled=False,\n action_igmp_snooping_enabled=0,\n action_mld_snooping_enabled=0,\n action_ipv4_urpf_mode=0,\n action_ipv6_urpf_mode=0,\n action_stp_group=0,\n action_mrpf_group=0,\n action_ipv4_mcast_key_type=0,\n action_ipv4_mcast_key=0,\n action_ipv6_mcast_key_type=0,\n action_ipv6_mcast_key=0,\n action_stats_idx=0,\n action_learning_enabled=0)\n \n mbr_hdl = client.bd_action_profile_add_member_with_set_bd_properties(\n sess_hdl, dev_tgt,\n action_spec)\n match_spec = dc_port_vlan_mapping_match_spec_t(\n ingress_metadata_ifindex=ifindex,\n vlan_tag__0__valid=0,\n vlan_tag__0__vid=0,\n vlan_tag__1__valid=0,\n vlan_tag__1__vid=0)\n client.port_vlan_mapping_add_entry(\n sess_hdl, dev_tgt,\n match_spec, mbr_hdl)", "def configure_ovs():\n if config('plugin') in [OVS, OVS_ODL]:\n if not service_running('openvswitch-switch'):\n full_restart()\n # Get existing set of bridges and ports\n current_bridges_and_ports = get_bridges_and_ports_map()\n log(\"configure OVS: Current bridges and ports map: {}\"\n .format(\", \".join(\"{}: {}\".format(b, \",\".join(v))\n for b, v in current_bridges_and_ports.items())))\n\n add_bridge(INT_BRIDGE, brdata=generate_external_ids())\n add_bridge(EXT_BRIDGE, brdata=generate_external_ids())\n\n ext_port_ctx = ExternalPortContext()()\n portmaps = DataPortContext()()\n bridgemaps = parse_bridge_mappings(config('bridge-mappings'))\n\n # if we have portmaps, then we ignore its value and log an\n # error/warning to the unit's log.\n if config('data-port') and config('ext-port'):\n log(\"Both ext-port and data-port are set. ext-port is deprecated\"\n \" and is not used when data-port is set\", level=ERROR)\n\n # only use ext-port if data-port is not set\n if not portmaps and ext_port_ctx and ext_port_ctx['ext_port']:\n _port = ext_port_ctx['ext_port']\n add_bridge_port(EXT_BRIDGE, _port,\n ifdata=generate_external_ids(EXT_BRIDGE),\n portdata=generate_external_ids(EXT_BRIDGE))\n log(\"DEPRECATION: using ext-port to set the port {} on the \"\n \"EXT_BRIDGE ({}) is deprecated. Please use data-port instead.\"\n .format(_port, EXT_BRIDGE),\n level=WARNING)\n\n for br in bridgemaps.values():\n add_bridge(br, brdata=generate_external_ids())\n if not portmaps:\n continue\n\n for port, _br in portmaps.items():\n if _br == br:\n if not is_linuxbridge_interface(port):\n add_bridge_port(br, port, promisc=True,\n ifdata=generate_external_ids(br),\n portdata=generate_external_ids(br))\n else:\n # NOTE(lourot): this will raise on focal+ and/or if the\n # system has no `ifup`. See lp:1877594\n add_ovsbridge_linuxbridge(\n br, port, ifdata=generate_external_ids(br),\n portdata=generate_external_ids(br))\n\n target = config('ipfix-target')\n bridges = [INT_BRIDGE, EXT_BRIDGE]\n bridges.extend(bridgemaps.values())\n\n if target:\n for bridge in bridges:\n disable_ipfix(bridge)\n enable_ipfix(bridge, target)\n else:\n # NOTE: removing ipfix setting from a bridge is idempotent and\n # will pass regardless of the existence of the setting\n for bridge in bridges:\n disable_ipfix(bridge)\n\n new_bridges_and_ports = get_bridges_and_ports_map()\n log(\"configure OVS: Final bridges and ports map: {}\"\n .format(\", \".join(\"{}: {}\".format(b, \",\".join(v))\n for b, v in new_bridges_and_ports.items())),\n level=DEBUG)\n\n # Ensure this runs so that mtu is applied to data-port interfaces if\n # provided.\n service_restart('os-charm-phy-nic-mtu')", "def update_broadlink_remotes():\n log.info(\"Updating avaiable broadlink devices\")\n r = task.executor(requests.post, url = \"http://localhost:10981/discover\", data = {})\n devices = json.loads(r.text)\n data = get_registered_devices(os.path.join(BROADLINK_CONFIG_FOLDER, STORAGE_FILE)) \n updated_data = update_list(devices, data) \n update_input_select(updated_data, INPUT_SELECT_YAML_FILE, INPUT_SELECT_REMOTE) \n write_json_data(os.path.join(BROADLINK_CONFIG_FOLDER, STORAGE_FILE), updated_data)\n\n input_select.reload() #This is called here instead of the script to make sure this service terminates before reloading", "def toggle_all():\n master_status = toggle_device('Master')\n other_devices = [x for x in DEVICES if x != 'Master']\n for d in other_devices:\n if device_status(d) != master_status:\n toggle_device(d)", "def main():\n get_oc_bgp = \"\"\"\n<bgp xmlns=\"http://openconfig.net/yang/bgp\">\n <global>\n <state/>\n </global>\n</bgp>\n\"\"\" \n\n for device in DEVICES:\n with manager.connect(host=device, port=PORT, username=USER,\n password=PASS, hostkey_verify=False,\n device_params={'name': 'nexus'},\n look_for_keys=False, allow_agent=False) as m:\n \n # Add the loopback interface \n netconf_response = m.get(('subtree', get_oc_bgp))\n # Parse the XML response\n xml_data = netconf_response.data_ele\n asn = xml_data.find(\".//{http://openconfig.net/yang/bgp}as\").text\n\n router_id = xml_data.find(\".//{http://openconfig.net/yang/bgp}router-id\").text\n\n print(\"ASN number:{}, Router ID: {} for {} {}\".format(asn, router_id, DEVICE_NAMES[device], device))", "def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)", "def run(self):\n turn_off_lights()\n\n nxt_cmd = \"/usr/sbin/rfkill unblock bluetooth\"\n rtncode, stdout, stderr = run_command(nxt_cmd)\n if rtncode != 0:\n log_message(\"'%s' command failed. rtncode=%d\" % (nxt_cmd, rtncode))\n time.sleep(1)\n\n nxt_cmd = \"/usr/bin/hciconfig hci0 up\"\n run_command(nxt_cmd)\n if rtncode != 0:\n log_message(\"'%s' command failed. rtncode=%d\" % (nxt_cmd, rtncode))\n time.sleep(3)\n\n nxt_cmd = \"/usr/bin/hciconfig hci0 piscan\"\n run_command(nxt_cmd)\n if rtncode != 0:\n log_message(\"'%s' command failed. rtncode=%d\" % (nxt_cmd, rtncode))\n time.sleep(2)\n\n nxt_cmd = \"/usr/bin/hciconfig hci0 sspmode 0\"\n run_command(nxt_cmd)\n if rtncode != 0:\n log_message(\"'%s' command failed. rtncode=%d\" % (nxt_cmd, rtncode))\n\n sk8flair_app_start()\n log_message(\"Started 'Sk8Flair' application and detached.\")\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n bus = dbus.SystemBus()\n\n if bus != None:\n agent = Sk8FlairAgent(bus, SK8FLAIR_AGENT_PATH)\n profile = Sk8FlairProfile(bus, SK8FLAIR_PROFILE_PATH)\n\n bluez_obj = bus.get_object(BLUEZ_BUS_NAME, BLUEZ_BUS_PATH)\n\n if bluez_obj != None:\n mainloop = GObject.MainLoop()\n\n try:\n profile_manager = dbus.Interface(bluez_obj, BLUEZ_INTERFACE_PROFILEMANAGER1)\n\n profile_manager.RegisterProfile(SK8FLAIR_PROFILE_PATH, self._profile_uuid, self._profile_args)\n log_message(\"RFCOMM serial profile registered.\")\n\n try:\n agent_manager = dbus.Interface(bluez_obj, BLUEZ_INTERFACE_AGENTMANAGER1)\n\n resp = agent_manager.RegisterAgent(SK8FLAIR_AGENT_PATH, SK8FLAIR_PAIRING_CAPABILITIES)\n log_message(\"Pairing agent registered. resp=%r\" % resp)\n\n resp = agent_manager.RequestDefaultAgent(SK8FLAIR_AGENT_PATH)\n log_message(\"Pairing agent set as default. resp=%r\" % resp)\n\n # This is where our thread enters our GObject dispatching loop\n log_message(\"Starting 'Skate Flair Service' DBUS main loop.\")\n mainloop.run()\n\n log_message(\"Main loop exited normally.\")\n\n except:\n err_msg = traceback.format_exc()\n log_message(err_msg)\n\n finally:\n agent_manager.UnregisterAgent(SK8FLAIR_AGENT_PATH)\n log_message(\"Pairing agent unregistered.\")\n\n finally:\n profile_manager.UnregisterProfile(SK8FLAIR_PROFILE_PATH)\n log_message(\"RFCOMM serial profile unregistered.\")\n\n else:\n log_message(\"Unable to open the BlueZ bus.\")\n\n else:\n log_message(\"Unable to open DBUS system bus.\")\n\n return", "def update_broadlink_on_toggle(mac_adress, input_bool):\n\n #Before running this service check the state of the input_boolean. If the state is off then do not run the service\n state = state.get(input_bool) \n if state == \"off\":\n log.debug(\"The device is off - The service will not try to update\") \n return \n\n ##Get recorded information in the json file\n json_data = read_json_data(os.path.join(BROADLINK_CONFIG_FOLDER, mac_adress.replace(':', '') + \".json\"))\n ip_address = json_data[\"ip\"]\n try: \n device = blk.hello(ip_address, timeout = 1)# Is this timeout enough? Since its in the local network it should be fine\n except blk.exceptions.NetworkTimeoutError: \n message = f\"Could not reach the IP address {ip_address}. Running discovery ...\" \n notify.persistent_notification(message = message, title = \"Broadlink\")\n broadlink_raceland.update_broadlink_remotes() #Update broadlink devices if there was a network error \n \n else: \n discovered_device_mac = format_mac(device.mac) #Note: pyscript does not support iterators\n if discovered_device_mac != mac_adress: #On the off chance the IP adress update makes one device have the IP address of another device (broadlink)\n message = f\"Ip address was updated {ip_address}. Running discovery ...\"\n notify.persistent_notification(message = message, title = \"Broadlink\")\n broadlink_raceland.update_broadlink_remotes() #Update broadlink devices if there was a network error ", "def bootloader():\n raise NotImplementedError(\"Contribute on github.com/alej0varas/pybolator\")", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def set_uplink(self, is_master=None, enable=None):\r\n if enable is not None and not enable:\r\n self.send('UUL0')\r\n\r\n if is_master is not None:\r\n self.send('UMS%d' % (0 if is_master else 1))\r\n\r\n if enable is not None and enable:\r\n self.send('UUL1')", "def ping_devices():", "def verify_bound_promisc_opt(self, mode_set):\n unbound_port = self.dut_ports[3]\n bond_port = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port,\n False,\n self.dut_ports[0],\n self.dut_ports[1],\n self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (unbound_port, bond_port), \"testpmd> \")\n self.start_port(bond_port)\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n port_disabled_num = 0\n testpmd_all_ports = self.dut_ports\n testpmd_all_ports.append(bond_port)\n for port_id in testpmd_all_ports:\n value = self.get_detail_from_port_info(\"Promiscuous mode: \", \"enabled\", port_id)\n if not value:\n port_disabled_num += 1\n self.verify(port_disabled_num == 0,\n \"Not all slaves of bonded device turn promiscuous mode on by default.\")\n\n ether_ip = {}\n ether = {}\n ether['dest_mac'] = \"00:11:22:33:44:55\"\n ether_ip['ether'] = ether\n\n send_param = {}\n pkt_count = 1\n send_param['pkt_count'] = pkt_count\n pkt_info = [ether_ip, send_param]\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0]]\n slaves['inactive'] = []\n\n pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(do_transmit and pkt_size != LACP_MESSAGE_SIZE,\n \"Data not received by slave or bonding device when promiscuous enabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and\n pkt_now[bond_port][0] == pkt_count,\n \"Data not received by slave or bonding device when promiscuous enabled\")\n\n self.dut.send_expect(\"set promisc %s off\" % bond_port, \"testpmd> \")\n port_disabled_num = 0\n testpmd_all_ports = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], bond_port]\n for port_id in testpmd_all_ports:\n value = self.get_detail_from_port_info('Promiscuous mode: ', 'disabled', port_id)\n if value == 'disabled':\n port_disabled_num += 1\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(port_disabled_num == 4,\n \"Not all slaves of bonded device turn promiscuous mode off in mode %d.\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(port_disabled_num == 1,\n \"Not only turn bound device promiscuous mode off in mode %d\" % mode_set)\n else:\n self.verify(port_disabled_num == 2,\n \"Not only the primary slave turn promiscous mode off in mode %d, \" % mode_set +\n \" when bonded device promiscous disabled.\")\n\n if mode_set != MODE_LACP:\n send_param['verify'] = True\n pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(not do_transmit or\n pkt_size == LACP_MESSAGE_SIZE,\n \"Data received by slave or bonding device when promiscuous disabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == 0 and\n pkt_now[bond_port][0] == 0,\n \"Data received by slave or bonding device when promiscuous disabled\")\n\n pkt_now, summary = self.send_default_packet_to_slave(self.dut_ports[3], bond_port, pkt_count, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(not do_transmit or\n pkt_size != LACP_MESSAGE_SIZE,\n \"RX or TX packet number not correct when promiscuous disabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and\n pkt_now[self.dut_ports[3]][0] == pkt_now[bond_port][0] and\n pkt_now[bond_port][0] == pkt_count,\n \"RX or TX packet number not correct when promiscuous disabled\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def main():\n eapi_conn = pyeapi.connect_to(\"pynet-sw2\")\n\n # Argument parsing\n parser = argparse.ArgumentParser(\n description=\"Idempotent addition/removal of VLAN to Arista switch\"\n )\n parser.add_argument(\"vlan_id\", help=\"VLAN number to create or remove\", action=\"store\", type=int)\n parser.add_argument(\n \"--name\",\n help=\"Specify VLAN name\",\n action=\"store\",\n dest=\"vlan_name\",\n type=str\n )\n parser.add_argument(\"--remove\", help=\"Remove the given VLAN ID\", action=\"store_true\")\n\n cli_args = parser.parse_args()\n vlan_id = cli_args.vlan_id\n remove = cli_args.remove\n vlan_name = six.text_type(cli_args.vlan_name)\n\n # Check if VLAN already exists\n check_vlan = check_vlan_exists(eapi_conn, vlan_id)\n\n # check if action is remove or add\n if remove:\n if check_vlan:\n print(\"VLAN exists, removing it\")\n command_str = 'no vlan {}'.format(vlan_id)\n eapi_conn.config([command_str])\n else:\n print(\"VLAN does not exist, no action required\")\n else:\n if check_vlan:\n if vlan_name is not None and check_vlan != vlan_name:\n print(\"VLAN already exists, setting VLAN name\")\n configure_vlan(eapi_conn, vlan_id, vlan_name)\n else:\n print(\"VLAN already exists, no action required\")\n else:\n print(\"Adding VLAN including vlan_name (if present)\")\n configure_vlan(eapi_conn, vlan_id, vlan_name)", "def InitSlave(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self, mb_info):\n self.microblaze = Arduino(mb_info, ARDUINO_RANGEFINDER_PROGRAM)", "def main():\n global s\n global toggled\n\n print (\"Please, put the Wiimote on discoverable mode (press 1+2)\")\n wiimote = cwiid.Wiimote()\n print (\"Wiimote detected\")\n\n s = _connect_port(PORT)\n if not s:\n sys.exit(1)\n\n print (\"Socket connected\")\n\n wiimote.led = cwiid.LED1_ON\n wiimote.enable(cwiid.FLAG_MESG_IFC)\n wm_cal = wiimote.get_acc_cal(cwiid.EXT_NONE)\n esc = 0\n\n tabOfExistentButtons.sort()\n tabOfExistentButtons.reverse()\n\n while not esc :\n wiimote.rpt_mode = cwiid.RPT_BTN\n time.sleep(0.05)\n wiimote.enable(cwiid.FLAG_NONBLOCK)\n msg = wiimote.get_mesg()\n wiimote.disable(cwiid.FLAG_NONBLOCK)\n\n if msg != None :\n if msg[0][0] == cwiid.MESG_BTN :\n button = msg[0][1]\n t = detect_button(button)\n for i in t:\n buttonPress(i)\n buttonPressAllTab(t)\n\n if button == cwiid.BTN_1 + cwiid.BTN_2 :\n esc = 1\n else :\n buttonPressAllTab(None)\n\n \"\"\"\n # This seems to be the part where we treat the accelerometers\n # Don't want to use it for the moment\n wiimote.rpt_mode = cwiid.RPT_ACC\n msg1 = wiimote.get_mesg()\n if msg1 != None :\n if msg1[0][0] == cwiid.MESG_ACC :\n acceleration(msg1[0][1],wm_cal)\n \"\"\"\n\n s.close()\n wiimote.led = 0\n wiimote.close()\n\n print (\"Wiimote connection and socket connection closed succefully\")\n print (\"Bye bye!\")", "def setup_platform(hass, config, add_devices, discovery_info=None):\r\n from . import pytuya\r\n\r\n devices = config.get(CONF_SWITCHES)\r\n\r\n switches = []\r\n pytuyadevice = pytuya.OutletDevice(config.get(CONF_DEVICE_ID), config.get(CONF_HOST), config.get(CONF_LOCAL_KEY))\r\n pytuyadevice.set_version(float(config.get(CONF_PROTOCOL_VERSION)))\r\n\r\n if len(devices) > 0:\r\n for object_id, device_config in devices.items():\r\n outlet_device = TuyaCache(pytuyadevice)\r\n switches.append(\r\n TuyaDevice(\r\n outlet_device,\r\n device_config.get(CONF_NAME),\r\n device_config.get(CONF_FRIENDLY_NAME, object_id),\r\n device_config.get(CONF_ICON),\r\n device_config.get(CONF_ID),\r\n device_config.get(CONF_CURRENT),\r\n device_config.get(CONF_CURRENT_CONSUMPTION),\r\n device_config.get(CONF_VOLTAGE)\r\n )\r\n )\r\n print('Setup localtuya subswitch [{}] with device ID [{}] '.format(device_config.get(CONF_FRIENDLY_NAME, object_id), device_config.get(CONF_ID)))\r\n _LOGGER.info(\"Setup localtuya subswitch %s with device ID %s \", device_config.get(CONF_FRIENDLY_NAME, object_id), config.get(CONF_ID) )\r\n else:\r\n outlet_device = TuyaCache(pytuyadevice)\r\n switches.append(\r\n TuyaDevice(\r\n outlet_device,\r\n config.get(CONF_NAME),\r\n config.get(CONF_FRIENDLY_NAME),\r\n config.get(CONF_ICON),\r\n config.get(CONF_ID),\r\n config.get(CONF_CURRENT),\r\n config.get(CONF_CURRENT_CONSUMPTION),\r\n config.get(CONF_VOLTAGE)\r\n )\r\n )\r\n print('Setup localtuya switch [{}] with device ID [{}] '.format(config.get(CONF_FRIENDLY_NAME), config.get(CONF_ID)))\r\n _LOGGER.info(\"Setup localtuya switch %s with device ID %s \", config.get(CONF_FRIENDLY_NAME), config.get(CONF_ID) )\r\n\r\n add_devices(switches)", "def toggle(module, curr_switch, toggle_ports, toggle_speed, port_speed, splitter_ports, quad_ports, task, msg):\n output = ''\n cli = pn_cli(module)\n clicopy = cli\n count = 0\n\n for speed in toggle_speed:\n if int(port_speed.strip('g'))/int(speed.strip('g')) >= 4:\n is_splittable = True\n else:\n is_splittable = False\n\n while (count <= 10):\n cli = clicopy\n cli += 'switch %s lldp-show format local-port ' % curr_switch\n cli += 'parsable-delim ,'\n out = run_command(module, cli, task, msg)\n if out:\n local_ports = out.split()\n break\n else:\n time.sleep(3)\n count += 1\n\n if not local_ports:\n module.fail_json(\n unreachable=False,\n failed=True,\n exception='',\n summary='Unable to discover topology',\n task='Fabric creation',\n msg='Fabric creation failed',\n changed=False\n )\n\n _undiscovered_ports = sorted(list(set(toggle_ports) - set(local_ports)),\n key=lambda x: int(x))\n non_splittable_ports = []\n undiscovered_ports = []\n\n for _port in _undiscovered_ports:\n if splitter_ports.get(_port, 0) == 1:\n undiscovered_ports.append(\"%s-%s\" % (_port, int(_port)+3))\n elif splitter_ports.get(_port, 0) == 0:\n undiscovered_ports.append(_port)\n else:\n # Skip intermediate splitter ports\n continue\n if not is_splittable:\n non_splittable_ports.append(_port)\n undiscovered_ports = \",\".join(undiscovered_ports)\n\n if not undiscovered_ports:\n continue\n\n cli = clicopy\n cli += 'switch %s port-config-modify port %s ' % (curr_switch, undiscovered_ports)\n cli += 'disable'\n run_command(module, cli, task, msg)\n\n if non_splittable_ports:\n non_splittable_ports = \",\".join(non_splittable_ports)\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % non_splittable_ports\n cli += 'speed %s enable' % speed\n run_command(module, cli, task, msg)\n else:\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % undiscovered_ports\n cli += 'speed %s enable' % speed\n run_command(module, cli, task, msg)\n\n time.sleep(10)\n\n # Revert undiscovered ports back to their original speed\n cli = clicopy\n cli += 'switch %s lldp-show format local-port ' % curr_switch\n cli += 'parsable-delim ,'\n local_ports = run_command(module, cli, task, msg)\n _undiscovered_ports = sorted(list(set(toggle_ports) - set(local_ports)),\n key=lambda x: int(x))\n disable_ports = []\n undiscovered_ports = []\n for _port in _undiscovered_ports:\n if _port in quad_ports:\n disable_ports.append(str(_port))\n # dont add to undiscovered ports\n elif splitter_ports.get(_port, 0) == 1:\n splitter_ports_range = set(map(str, (range(int(_port), int(_port)+4))))\n if not splitter_ports_range.intersection(set(local_ports)):\n disable_ports.append(\"%s-%s\" % (_port, int(_port)+3))\n undiscovered_ports.append(_port)\n elif splitter_ports.get(_port, 0) == 0:\n disable_ports.append(str(_port))\n undiscovered_ports.append(_port)\n else:\n # Skip intermediate splitter ports\n pass\n\n disable_ports = \",\".join(disable_ports)\n if disable_ports:\n cli = clicopy\n cli += 'switch %s port-config-modify port %s disable' % (curr_switch, disable_ports)\n run_command(module, cli, task, msg)\n\n undiscovered_ports = \",\".join(undiscovered_ports)\n if not undiscovered_ports:\n return 'Toggle completed successfully '\n\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % undiscovered_ports\n cli += 'speed %s enable' % port_speed\n run_command(module, cli, task, msg)\n output += 'Toggle completed successfully '\n\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create bonded device, add one slave, verify bonded device MAC action varies with the mode.
def verify_bound_mac_opt(self, mode_set): mac_address_0_orig = self.get_port_mac(self.dut_ports[0]) mac_address_1_orig = self.get_port_mac(self.dut_ports[1]) mac_address_2_orig = self.get_port_mac(self.dut_ports[2]) mac_address_3_orig = self.get_port_mac(self.dut_ports[3]) bond_port = self.create_bonded_device(mode_set, SOCKET_1) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1]) mac_address_bond_orig = self.get_port_mac(bond_port) self.verify(mac_address_1_orig == mac_address_bond_orig, "Bonded device MAC address not same with first slave MAC") self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2]) mac_address_2_now = self.get_port_mac(self.dut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]: self.verify(mac_address_1_orig == mac_address_bond_now and mac_address_bond_now == mac_address_2_now, "NOT all slaves MAC address same with bonding device in mode %d" % mode_set) else: self.verify(mac_address_1_orig == mac_address_bond_now and mac_address_bond_now != mac_address_2_now, "All slaves should not be the same in mode %d" % mode_set) new_mac = "00:11:22:00:33:44" self.set_mac_for_bonding_device(bond_port, new_mac) self.start_port(bond_port) mac_address_1_now = self.get_port_mac(self.dut_ports[1]) mac_address_2_now = self.get_port_mac(self.dut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]: self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac, "Set mac failed for bonding device in mode %d" % mode_set) elif mode_set == MODE_LACP: self.verify(mac_address_bond_now == new_mac and mac_address_1_now != new_mac and mac_address_2_now != new_mac and mac_address_1_now != mac_address_2_now, "Set mac failed for bonding device in mode %d" % mode_set) elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]: self.verify(mac_address_bond_now == new_mac and mac_address_1_now == new_mac and mac_address_bond_now != mac_address_2_now, "Set mac failed for bonding device in mode %d" % mode_set) self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False) mac_address_1_now = self.get_port_mac(self.dut_ports[1]) mac_address_2_now = self.get_port_mac(self.dut_ports[2]) mac_address_bond_now = self.get_port_mac(bond_port) self.verify(mac_address_bond_now == new_mac, "Slave MAC changed when set primary slave") mac_address_1_orig = mac_address_1_now self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2]) mac_address_2_now = self.get_port_mac(self.dut_ports[2]) self.verify(mac_address_2_now == mac_address_2_orig, "MAC not back to original after removing the port") mac_address_1_now = self.get_port_mac(self.dut_ports[1]) mac_address_bond_now = self.get_port_mac(bond_port) self.verify(mac_address_bond_now == new_mac and mac_address_1_now == mac_address_1_orig, "Bonding device or slave MAC changed after removing the primary slave") self.remove_all_slaves(bond_port) self.dut.send_expect("quit", "# ") self.launch_app()
[ "def create_bonded_device(self, mode=0, socket=0, verify_detail=False):\n out = self.dut.send_expect(\"create bonded device %d %d\" % (mode, socket), \"testpmd> \")\n self.verify(\"Created new bonded device\" in out,\n \"Create bonded device on mode [%d] socket [%d] failed\" % (mode, socket))\n bond_port = self.get_value_from_str(\"Created new bonded device net_bond_testpmd_[\\d] on \\(port \",\n \"\\d+\",\n out)\n bond_port = int(bond_port)\n\n if verify_detail:\n out = self.dut.send_expect(\"show bonding config %d\" % bond_port, \"testpmd> \")\n self.verify(\"Bonding mode: %d\" % mode in out,\n \"Bonding mode display error when create bonded device\")\n self.verify(\"Slaves: []\" in out,\n \"Slaves display error when create bonded device\")\n self.verify(\"Active Slaves: []\" in out,\n \"Active Slaves display error when create bonded device\")\n self.verify(\"Primary: []\" not in out,\n \"Primary display error when create bonded device\")\n\n out = self.dut.send_expect(\"show port info %d\" % bond_port, \"testpmd> \")\n self.verify(\"Connect to socket: %d\" % socket in out,\n \"Bonding port connect socket error\")\n self.verify(\"Link status: down\" in out,\n \"Bonding port default link status error\")\n self.verify(\"Link speed: 0 Mbps\" in out,\n \"Bonding port default link speed error\")\n\n return bond_port", "def setup_bd(client, conn_mgr):\n sess_hdl = conn_mgr.client_init()\n dev_tgt = DevTarget_t(0, hex_to_i16(0xffff))\n ifindices = [1, 2, 65]\n\n for ifindex in ifindices:\n action_spec = dc_set_bd_properties_action_spec_t(\n action_bd=0,\n action_vrf=0,\n action_rmac_group=0,\n action_bd_label=0,\n action_ipv4_unicast_enabled=True,\n action_ipv6_unicast_enabled=False,\n action_ipv4_multicast_enabled=False,\n action_ipv6_multicast_enabled=False,\n action_igmp_snooping_enabled=0,\n action_mld_snooping_enabled=0,\n action_ipv4_urpf_mode=0,\n action_ipv6_urpf_mode=0,\n action_stp_group=0,\n action_mrpf_group=0,\n action_ipv4_mcast_key_type=0,\n action_ipv4_mcast_key=0,\n action_ipv6_mcast_key_type=0,\n action_ipv6_mcast_key=0,\n action_stats_idx=0,\n action_learning_enabled=0)\n \n mbr_hdl = client.bd_action_profile_add_member_with_set_bd_properties(\n sess_hdl, dev_tgt,\n action_spec)\n match_spec = dc_port_vlan_mapping_match_spec_t(\n ingress_metadata_ifindex=ifindex,\n vlan_tag__0__valid=0,\n vlan_tag__0__vid=0,\n vlan_tag__1__valid=0,\n vlan_tag__1__vid=0)\n client.port_vlan_mapping_add_entry(\n sess_hdl, dev_tgt,\n match_spec, mbr_hdl)", "def _make_ble_connection(self):\n if self.device == None:\n adapter = pygatt.backends.GATTToolBackend()\n nuki_ble_connection_ready = False\n\n while nuki_ble_connection_ready == False:\n print(\"Starting BLE adapter...\")\n adapter.start()\n print(\"Init Nuki BLE connection...\")\n try :\n self.device = adapter.connect(self.mac_address)\n nuki_ble_connection_ready = True\n except:\n print(\"Unable to connect, retrying...\")\n\n print(\"Nuki BLE connection established\")", "def create_bond_on_vm(vm_name, vm_resource, vnics, mode=1, proto=\"auto\"):\n bond = \"bond1\"\n remove_nm_controlled_cmd = (\n \"sed -i /NM_CONTROLLED/d /etc/sysconfig/network-scripts/ifcfg-{\"\n \"interface}\"\n )\n active_interface = vm_resource.network.get_info().get(\"interface\")\n assert not vm_resource.run_command(\n command=shlex.split(remove_nm_controlled_cmd.format(\n interface=active_interface)\n )\n )[0]\n assert not vm_resource.run_command(\n command=shlex.split(\"nmcli connection reload\")\n )[0]\n\n secondary_interface = \"System\\ {active_interface}\".format(\n active_interface=active_interface\n )\n primary_interface = hl_networks.get_vm_interface_by_vnic(\n vm=vm_name, vm_resource=vm_resource, vnic=vnics[0]\n )\n\n # Create connection in NM for the new interface\n nmcli_add_con = [\n \"nmcli connection add type ethernet con-name {primary_interface_1} \"\n \"ifname {primary_interface_2}\".format(\n primary_interface_1=primary_interface,\n primary_interface_2=primary_interface\n ),\n \"nmcli connection modify id {primary_interface} ipv4.method disabled\"\n \" ipv6.method ignore\".format(primary_interface=primary_interface),\n ]\n assert not all(\n [\n vm_resource.run_command(\n command=shlex.split(cmd))[0] for cmd in\n nmcli_add_con\n ]\n )\n\n # Create BOND\n create_bond_cmds = [\n \"nmcli connection add type bond con-name {bond} ifname \"\n \"bond1 mode {mode} {primary}\".format(\n bond=bond, mode=mode, primary=\"primary {primary_interface}\".format(\n primary_interface=primary_interface\n ) if mode == 1 else \"\"\n ),\n \"nmcli connection modify id {bond} ipv4.method {proto} \"\n \"ipv6.method ignore\".format(bond=bond, proto=proto)\n ]\n assert not all(\n [\n vm_resource.run_command(\n command=shlex.split(cmd))[0] for cmd in\n create_bond_cmds\n ]\n )\n\n # Add the slaves to the BOND\n for inter in primary_interface, secondary_interface:\n nmcli_add_slave = (\n \"nmcli connection modify id {inter} connection.slave-type \"\n \"bond connection.master {bond} connection.autoconnect \"\n \"yes\".format(bond=bond, inter=inter)\n )\n assert not vm_resource.run_command(\n command=shlex.split(nmcli_add_slave)\n )[0]\n\n # Deactivate all connection and activate again to get the new configuration\n nmcli_up_cmd = (\n \"nmcli connection down {primary_interface_1};\"\n \"nmcli connection down {secondary_interface_1};\"\n \"nmcli connection down {bond_1};\"\n \"nmcli connection up {bond_2};\"\n \"nmcli connection up {primary_interface_2};\"\n \"nmcli connection up {secondary_interface_2}\"\n ).format(\n primary_interface_1=primary_interface,\n secondary_interface_1=secondary_interface,\n bond_1=bond,\n bond_2=bond,\n primary_interface_2=primary_interface,\n secondary_interface_2=secondary_interface\n )\n try:\n vm_resource.run_command(\n command=shlex.split(\n nmcli_up_cmd\n ), tcp_timeout=10, io_timeout=10\n )\n except socket.timeout:\n pass", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def make_discoverable(self, duration=30):\n self.bt_device = dbus.Interface(self.bus.get_object(\"org.bluez\", \"/org/bluez/hci0\"),\n \"org.freedesktop.DBus.Properties\")\n # Check if the device is already in discoverable mode and if not then set a short discoverable period\n self.discoverable_status = self.bt_device.Get(\"org.bluez.Adapter1\", \"Discoverable\")\n if self.discoverable_status == 0:\n \"\"\"\n Agents manager the bt pairing process. Registering the NoInputNoOutput agent means now authentication from \n the RPi is required to pair with it.\n \"\"\"\n print(\"Placing the RPi into discoverable mode and turn pairing on\")\n print(f\"Discoverable for {duration} seconds only\")\n\n\n # Setup discoverability\n self.bt_device.Set(\"org.bluez.Adapter1\", \"DiscoverableTimeout\", dbus.UInt32(duration))\n self.bt_device.Set(\"org.bluez.Adapter1\", \"Discoverable\", True)\n self.bt_device.Set(\"org.bluez.Adapter1\", \"PairableTimeout\", dbus.UInt32(duration))\n self.bt_device.Set(\"org.bluez.Adapter1\", \"Pairable\", True)", "def add_device_service(call: ServiceCall) -> None:\n gateway: XiaomiGateway = call.data[ATTR_GW_MAC]\n gateway.write_to_hub(gateway.sid, join_permission=\"yes\")\n persistent_notification.async_create(\n hass,\n (\n \"Join permission enabled for 30 seconds! \"\n \"Please press the pairing button of the new device once.\"\n ),\n title=\"Xiaomi Aqara Gateway\",\n )", "def multiroom_add(self, slave_ip: str) -> str:\n self._logger.info(\"Slaving '\"+str(slave_ip)+\"' to this device...\")\n info = self._device_info()\n secure = info.get('securemode')\n args = [info.get('ssid'), info.get('WifiChannel'), info.get('auth') if secure else \"OPEN\",\n info.get('encry') if secure else \"\", info.get('psk') if secure else \"\"]\n self._logger.debug(\"Opening client connection to slave device '\"+str(slave_ip)+\"'...\")\n slave = linkplayctl.Client(slave_ip)\n return slave.multiroom_master(*args)", "def create_dhcp_ovs_bridge():\n http_controller = config_dic['http_threads'][threading.current_thread().name]\n dhcp_controller = http_controller.ovim.get_dhcp_controller()\n\n dhcp_controller.create_ovs_bridge()", "def add_master_mac(self, device_index, mac):\n device_index = int(device_index, 0)\n macaddress = map(myint, mac.split(':'))\n (status, null) = self.__device.add_master_mac(device_index, macaddress)\n self.__device.decode_error_status(status, cmd='addMasterMAC()', print_on_error=True)", "def new_device(mac, ip, name):\n return Device(mac, ip, name)", "def vpp_create_bond_interface(\n node, mode, load_balance=None, mac=None, gso=False):\n cmd = u\"bond_create2\"\n args = dict(\n id=int(Constants.BITWISE_NON_ZERO),\n use_custom_mac=bool(mac is not None),\n mac_address=L2Util.mac_to_bin(mac) if mac else None,\n mode=getattr(\n LinkBondMode,\n f\"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}\"\n ).value,\n lb=0 if load_balance is None else getattr(\n LinkBondLoadBalanceAlgo,\n f\"BOND_API_LB_ALGO_{load_balance.upper()}\"\n ).value,\n numa_only=False,\n enable_gso=gso\n )\n err_msg = f\"Failed to create bond interface on host {node[u'host']}\"\n with PapiSocketExecutor(node) as papi_exec:\n sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)\n\n InterfaceUtil.add_eth_interface(\n node, sw_if_index=sw_if_index, ifc_pfx=u\"eth_bond\"\n )\n if_key = Topology.get_interface_by_sw_index(node, sw_if_index)\n\n return if_key", "def create_device(device):\n return FoobotDevice(auth_header=self.auth_header,\n user_id=device['userId'],\n uuid=device['uuid'],\n name=device['name'],\n mac=device['mac'], base_url=self.BASE_URL)", "def create_new_mac(self):\n candidate = self.create_mac()\n while candidate in self.mac_adresses:\n candidate = self.create_mac()\n\n self.mac_adresses.append(candidate)\n return candidate", "def clonedev(self):\n try:\n try:\n self.device_hidrep\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n try:\n self.devcfg.bmAttributes\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n try:\n self.devcfg.bMaxPower\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n cloner = open(\"clones/%s\" % self.SelectedDevice, 'w')\n print(\"setting up: %s\" % self.manufacturer)\n print(\"Creating backup of device\\n\")\n self.devJson = json.dumps({\"idVen\":'0x{:04X}'.format(self.device.idVendor),\\\n \"idProd\":'0x{:04X}'.format(self.device.idProduct),\\\n \"manufacturer\":self.manufacturer,\\\n \"bcdDev\":'0x{:04X}'.format(self.device.bcdDevice),\\\n \"bcdUSB\":'0x{:04X}'.format(self.device.bcdUSB),\\\n \"serial\":self.device.serial_number,\\\n \"bDevClass\":'0x{:02X}'.format(self.device.bDeviceClass),\\\n \"bDevSubClass\":'0x{:02X}'.format(self.device.bDeviceSubClass),\\\n \"protocol\":'0x{:02X}'.format(self.device.bDeviceProtocol),\\\n \"MaxPacketSize\":'0x{:02X}'.format(self.device.bMaxPacketSize0),\\\n \"hidreport\":','.join([i.decode('utf-8') for i in self.device_hidrep]),\\\n \"bmAttributes\":'0x{:02X}'.format(self.devcfg.bmAttributes),\\\n \"MaxPower\":'0x{:02X}'.format(self.devcfg.bMaxPower),\n \"product\":self.device.product})\n cloner.write(self.devJson)\n cloner.write('\\n++++++\\n')\n cloner.write(str(self.device)+\"\\n\\n\")\n print(\"- Done: Device settings copied to file.\\n\")\n cloner.close()\n except Exception as e:\n self.showMessage(\"Cannot clone the device!\\n\", color='red',blink='y')", "def test_setup_reserved_2(self):\n with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:\n # Create DeviceManager.\n self.conf.register_opt(\n cfg.BoolOpt('enable_isolated_metadata', default=False))\n self.conf.register_opt(\n cfg.BoolOpt('force_metadata', default=False))\n plugin = mock.Mock()\n device = mock.Mock()\n mock_IPDevice.return_value = device\n device.route.get_gateway.return_value = None\n mgr = dhcp.DeviceManager(self.conf, plugin)\n self.mock_load_interface_driver.assert_called_with(\n self.conf, get_networks_callback=plugin.get_networks)\n\n # Setup with a reserved DHCP port.\n network = FakeDualNetworkReserved2()\n network.project_id = 'Project A'\n reserved_port_1 = network.ports[-2]\n reserved_port_2 = network.ports[-1]\n\n def mock_update(port_id, dict):\n if port_id == reserved_port_1.id:\n return None\n\n port = reserved_port_2\n port.network_id = dict['port']['network_id']\n port.device_id = dict['port']['device_id']\n return port\n\n plugin.update_dhcp_port.side_effect = mock_update\n mgr.driver.get_device_name.return_value = 'ns-XXX'\n mgr.driver.use_gateway_ips = False\n self.mock_ip_lib.ensure_device_is_ready.return_value = True\n mgr.setup(network)\n plugin.update_dhcp_port.assert_called_with(reserved_port_2.id,\n mock.ANY)\n\n mgr.driver.init_l3.assert_called_with(\n 'ns-XXX', ['192.168.0.6/24', 'fdca:3ba5:a17a:4ba3::2/64'],\n namespace='qdhcp-ns')", "def nb_configurator(filename, nb, new_nxos_config):\n try:\n nb.dcim.devices.create(\n name=new_nxos_config[\"hostname\"],\n device_type=1 if new_nxos_config[\"type\"] == \"N9K-C93180YC-FX\" else 1,\n device_role=1 if \"spine\" in new_nxos_config[\"hostname\"] else 2,\n site=1,\n )\n except pynetbox.core.query.RequestError: # if the device already exists, move on\n pass\n\n for k, v in new_nxos_config[\"interfaces\"].items():\n try:\n interface = nb.dcim.interfaces.get(name=k, device=new_nxos_config[\"hostname\"])\n interface.description = v[\"description\"]\n\n if v[\"vlan_id\"] and not nb.ipam.vlans.get(vid=v[\"vlan_id\"]):\n nb.ipam.vlans.create(vid=v[\"vlan_id\"], name=v[\"vlan_name\"], site=1)\n\n if v[\"vlan_id\"]:\n interface.mode = v[\"mode\"]\n nb_vlan = nb.ipam.vlans.get(vid=v[\"vlan_id\"])\n interface.untagged_vlan = nb_vlan.id\n\n if v[\"ipv4\"] and not nb.ipam.ip_addresses.get(address=v[\"ipv4\"]):\n nb.ipam.ip_addresses.create(address=v[\"ipv4\"], status=1, interface=interface.id)\n\n if k == \"mgmt0\" and v[\"ipv4\"]:\n device = nb.dcim.devices.get(name=new_nxos_config[\"hostname\"])\n ip = nb.ipam.ip_addresses.get(q=v[\"ipv4\"])\n device.primary_ip4 = ip.id\n device.save()\n\n interface.save()\n\n except pynetbox.core.query.RequestError as e:\n print(e.error)\n\n # delete following from dict, we want to handle this from netbox\n del new_nxos_config[\"interfaces\"]\n del new_nxos_config[\"type\"]\n del new_nxos_config[\"hostname\"]\n\n # rewrite the file with deleted variables\n with open(filename, \"w\") as file:\n yaml.dump(new_nxos_config, file)", "def create_testbed_device_instance(self, dev_name_info, hint):\n testbed_dev = None\n if hint == \"AP\":\n testbed_dev = TestBedAP(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"AP\"\n if hint == \"STA\":\n testbed_dev = TestBedSTA(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"STA\"\n if hint == \"DUT\":\n testbed_dev = DUT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"DUT\"\n if hint == \"SNIFFER\":\n testbed_dev = Sniffer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"SNIFFER\"\n if hint == \"PCENDPOINT\":\n testbed_dev = PCEndpoint(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"PCENDPOINT\"\n if hint == \"APCONFIG\":\n testbed_dev = APConfig(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"APCONFIG\"\n if hint == \"RADIUSSERVER\":\n testbed_dev = RadiusServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"RADIUSSERVER\"\n if hint == \"OSUSERVER\":\n testbed_dev = OSUServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"OSUSERVER\"\n if hint == \"ATTENUATOR\":\n testbed_dev = Attenuator(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"ATTENUATOR\"\n if hint == \"POWERSWITCH\":\n testbed_dev = PowerSwitch(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"POWERSWITCH\"\n if hint == \"WFAEMT\":\n testbed_dev = WFAEMT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"WFAEMT\"\n return testbed_dev", "def test_a_register_device_for_loan_license(self):\n self.status.register(self.status.DEVICEID1, self.status.DEVICENAME1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set promiscuous mode on bonded device, verify bonded device and all slaves have different actions by the different modes.
def verify_bound_promisc_opt(self, mode_set): unbound_port = self.dut_ports[3] bond_port = self.create_bonded_device(mode_set, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (unbound_port, bond_port), "testpmd> ") self.start_port(bond_port) self.dut.send_expect("start", "testpmd> ") port_disabled_num = 0 testpmd_all_ports = self.dut_ports testpmd_all_ports.append(bond_port) for port_id in testpmd_all_ports: value = self.get_detail_from_port_info("Promiscuous mode: ", "enabled", port_id) if not value: port_disabled_num += 1 self.verify(port_disabled_num == 0, "Not all slaves of bonded device turn promiscuous mode on by default.") ether_ip = {} ether = {} ether['dest_mac'] = "00:11:22:33:44:55" ether_ip['ether'] = ether send_param = {} pkt_count = 1 send_param['pkt_count'] = pkt_count pkt_info = [ether_ip, send_param] slaves = {} slaves['active'] = [self.dut_ports[0]] slaves['inactive'] = [] pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves) if mode_set == MODE_LACP: do_transmit = False pkt_size = 0 if pkt_now[unbound_port][0]: do_transmit = True pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0] self.verify(do_transmit and pkt_size != LACP_MESSAGE_SIZE, "Data not received by slave or bonding device when promiscuous enabled") else: self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and pkt_now[bond_port][0] == pkt_count, "Data not received by slave or bonding device when promiscuous enabled") self.dut.send_expect("set promisc %s off" % bond_port, "testpmd> ") port_disabled_num = 0 testpmd_all_ports = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], bond_port] for port_id in testpmd_all_ports: value = self.get_detail_from_port_info('Promiscuous mode: ', 'disabled', port_id) if value == 'disabled': port_disabled_num += 1 if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]: self.verify(port_disabled_num == 4, "Not all slaves of bonded device turn promiscuous mode off in mode %d." % mode_set) elif mode_set == MODE_LACP: self.verify(port_disabled_num == 1, "Not only turn bound device promiscuous mode off in mode %d" % mode_set) else: self.verify(port_disabled_num == 2, "Not only the primary slave turn promiscous mode off in mode %d, " % mode_set + " when bonded device promiscous disabled.") if mode_set != MODE_LACP: send_param['verify'] = True pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves) if mode_set == MODE_LACP: do_transmit = False pkt_size = 0 if pkt_now[unbound_port][0]: do_transmit = True pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0] self.verify(not do_transmit or pkt_size == LACP_MESSAGE_SIZE, "Data received by slave or bonding device when promiscuous disabled") else: self.verify(pkt_now[self.dut_ports[0]][0] == 0 and pkt_now[bond_port][0] == 0, "Data received by slave or bonding device when promiscuous disabled") pkt_now, summary = self.send_default_packet_to_slave(self.dut_ports[3], bond_port, pkt_count, **slaves) if mode_set == MODE_LACP: do_transmit = False pkt_size = 0 if pkt_now[unbound_port][0]: do_transmit = True pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0] self.verify(not do_transmit or pkt_size != LACP_MESSAGE_SIZE, "RX or TX packet number not correct when promiscuous disabled") else: self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and pkt_now[self.dut_ports[3]][0] == pkt_now[bond_port][0] and pkt_now[bond_port][0] == pkt_count, "RX or TX packet number not correct when promiscuous disabled") self.remove_all_slaves(bond_port) self.dut.send_expect("quit", "# ") self.launch_app()
[ "def test_promiscuous_pass(self):\n if _debug: TestVLAN._debug(\"test_promiscuous_pass\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # reach into the network and enable promiscuous mode\n tnet.vlan.nodes[2].promiscuous = True\n\n # make a PDU from node 1 to node 2\n pdu = PDU(b'data', source=1, destination=2)\n\n # node 1 sends the pdu to node 2, node 3 also gets a copy\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduDestination=2).success()\n\n # run the group\n tnet.run()", "def allow_promiscuous(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_promiscuous\")", "def set_mode_for_bonding_device(self, bond_port, mode):\n self.dut.send_expect(\"set bonding mode %d %d\" % (mode, bond_port), \"testpmd> \")\n mode_value = self.get_bond_mode(bond_port)\n self.verify(str(mode) in mode_value, \"Set bonding mode failed\")", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def set_mode(mode):\n master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')\n master.wait_heartbeat()\n\n mode_id = master.mode_mapping()[mode]\n master.mav.set_mode_send(\n master.target_system,\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\n mode_id)\n print(\"Mode \" + mode + \" successfully set.\")\n return True", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def set_wifi_wmm(self, mode):\n if mode in (\"on\", \"1\", 1):\n self._logger.info(\"Set wifi wmm to on\")\n mode = 1\n elif mode in (\"off\", \"0\", 0):\n self._logger.info(\"Set wifi wmm to off\")\n mode = 0\n else:\n raise Exception(-5,\n \"Parameter mode is not valid !\")\n for radio in self.WIFI_RADIOS:\n self._send_cmd(\"interface dot11radio \" + str(radio))\n if mode == 0:\n self._send_cmd(\"no dot11 qos mode wmm\")\n else:\n self._send_cmd(\"dot11 qos mode wmm\")\n self._send_cmd(\"exit\")", "def _enable_and_set_mode(self, interface, mode):\n # Enable switching\n url = self._construct_url(interface)\n payload = '<switchport></switchport>'\n self._make_request('POST', url, data=payload,\n acceptable_error_codes=(409,))\n\n # Set the interface mode\n if mode in ['access', 'trunk']:\n url = self._construct_url(interface, suffix='mode')\n payload = '<mode><vlan-mode>%s</vlan-mode></mode>' % mode\n self._make_request('PUT', url, data=payload)\n else:\n raise AssertionError('Invalid mode')", "def set_ovs_protocol(self):\n\t\tfor sw in setting.switches:\n\t\t\tcmd = \"sudo ovs-vsctl set bridge %s protocols=OpenFlow13\" % sw\n\t\t\tos.system(cmd)", "def set_wifi_voip(self, voip):\n if voip in (\"on\", \"1\", 1):\n self._logger.info(\"Set wifi voip to on\")\n voip = 1\n elif voip in (\"off\", \"0\", 0):\n self._logger.info(\"Set wifi voip to off\")\n voip = 0\n else:\n raise Exception(-5, \"Parameter voip is not valid !\")\n \n if voip != 0:\n self._send_cmd(\"class-map match-all _class_voip2\")\n self._send_cmd( \"match ip dscp default\")\n self._send_cmd( \"exit\")\n self._send_cmd(\"class-map match-all _class_voip0\")\n self._send_cmd( \"match ip dscp cs6\")\n self._send_cmd( \"exit\")\n self._send_cmd(\"class-map match-all _class_voip1\")\n self._send_cmd( \"match ip dscp cs7\")\n self._send_cmd( \"exit\")\n\n self._send_cmd(\"policy-map voip\")\n self._send_cmd( \"class _class_voip0\")\n self._send_cmd( \"set cos 6\")\n self._send_cmd( \"exit\")\n self._send_cmd( \"class _class_voip1\")\n self._send_cmd( \"set cos 7\")\n self._send_cmd( \"exit\")\n self._send_cmd( \"class _class_voip2\")\n self._send_cmd( \"set cos 6\")\n self._send_cmd( \"exit\")\n self._send_cmd( \"exit\")\n for radio in self.WIFI_RADIOS:\n self._send_cmd(\"interface dot11radio \" + str(radio))\n self._send_cmd(\" service-policy input voip\")\n self._send_cmd(\" service-policy output voip\")\n self._send_cmd(\" exit\")\n else:\n self._send_cmd(\"no policy-map voip\")\n self._send_cmd(\"no class-map match-all _class_voip2\")\n self._send_cmd(\"no class-map match-all _class_voip0\")\n self._send_cmd(\"no class-map match-all _class_voip1\")", "def _mode_set(self, thermostat_mode: ThermostatMode):\n if thermostat_mode in [ThermostatMode.FAN_ALWAYS_ON, ThermostatMode.FAN_AUTO]:\n self._groups[GRP_FAN_MODE].set_value(thermostat_mode)\n else:\n self._groups[GRP_SYS_MODE].set_value(thermostat_mode)", "def set_inquiry_mode(self, mode):\n old_filter = self._sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14)\n\n # Setup socket filter to receive only events related to the\n # write_inquiry_mode command\n flt = bluez.hci_filter_new()\n opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,\n bluez.OCF_WRITE_INQUIRY_MODE)\n bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)\n bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);\n bluez.hci_filter_set_opcode(flt, opcode)\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt)\n\n # send the command!\n bluez.hci_send_cmd(self._sock, bluez.OGF_HOST_CTL,\n bluez.OCF_WRITE_INQUIRY_MODE, struct.pack(\"B\", mode))\n\n pkt = self._sock.recv(255)\n\n status = struct.unpack(\"xxxxxxB\", pkt)[0]\n\n # restore old filter\n self._sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter)\n if status != 0: return -1\n return 0", "def test_102_tune_nat_instance(self):\n instance = self.ctx.nat_instance\n address = instance.ip_address\n ssh = remote_client.RemoteClient(address,\n self.ssh_user,\n pkey=self.keypair.material)\n\n ssh.exec_command(\"sudo iptables -t nat -A POSTROUTING -s %s \"\n \"-o eth0 -j MASQUERADE\" % str(self.vpc_cidr))\n ssh.exec_command(\"sudo sysctl -w net.ipv4.ip_forward=1\")\n ssh.exec_command(\"echo $'auto eth1\\niface eth1 inet dhcp\\n' \"\n \"| sudo tee -a /etc/network/interfaces.d/eth1.cfg\")\n ssh.exec_command(\"sudo ifup eth1\")", "def test_mode_toggle(self, caplog, api_mock):\n self.mock_api.return_value = ({'code': 0}, 200)\n fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj)\n f = fan.auto_mode()\n assert f\n assert fan.mode == 'auto'\n f = fan.manual_mode()\n assert fan.mode == 'manual'\n assert f\n f = fan.sleep_mode()\n assert fan.mode == 'sleep'\n assert f", "def setConnectionMode(mode):\n # type: (int) -> None\n print(mode)", "def set_mode(self, mode):\n self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)\n # Delay for 30 milliseconds (datsheet recommends 19ms, but a little more\n # can't hurt and the kernel is going to spend some unknown amount of time\n # too).\n time.sleep(0.03)", "def device_test(self):\n # Create a MPI packet object\n mpi_packet = MPI()\n mpi_packet.descriptor = MPI.MPI_BASE_CMD_DESCRIPTOR\n \n # Set to idle payload\n field_len = 0x02\n field_desc = 0x05\n mpi_packet.payload = [field_len, field_desc]\n\n # Payload length \n mpi_packet.payload_len = len(mpi_packet.payload)\n \n # Build imu ping command in bytes\n command = mpi_packet.build()\n \n # Send byte packet to microstrain imu \n self.ser.write(command)\n \n # Read output from the imu adter sleeping for 2 ms\n sleep(0.002)\n reply = self.ser.read(16)\n \n if reply[7] == \"\\x00\":\n print \" Device Built-In Test (BIT) successful!\"\n print \" BIT Error Flags : \"\n print \" Byte 1 : \", '0x' + reply[10].encode('hex') \n print \" Byte 2 : \", '0x' + reply[11].encode('hex')\n print \" Byte 3 : \", '0x' + reply[12].encode('hex')\n print \" Byte 4 : \", '0x' + reply[13].encode('hex')\n else:\n print \" Command unsuccessful\"\n err = '0x' + reply[7].encode('hex')\n print \" Error Code : \", err\n print \" Error Message : \", MPI.MPI_ACK_NACK_ERROR[err]\n \n return", "def test_send_appctl_mode(self):\n msg = Mode(mode=\"a good mode\")\n pub = rospy.Publisher(\"/appctl/mode\", Mode, queue_size=3)\n rospy.init_node(ROS_NODE_NAME, anonymous=True)\n self.checker(pub, msg, \"a good mode\")", "def set_mode(self, val):\n # self.property_set(register_name, val)\n self.property_set(\"mode\", Sample(0, value=val, unit=\"dF\"))\n \n try:\n self.serial_send(\"A=1,Z=1,M=\" + str(self.modes[val.title()]) + \"\\x0D\")\n except:\n print \"error setting thermostat\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the transmitting packet are all correct in the round robin mode.
def verify_round_robin_tx(self, unbound_port, bond_port, **slaves): pkt_count = 300 pkt_now = {} pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves) if slaves['active'].__len__() == 0: self.verify(pkt_now[bond_port][0] == 0, "Bonding port should not have TX pkt in mode 0 when all slaves down") else: self.verify(pkt_now[bond_port][0] == pkt_count, "Bonding port has error TX pkt count in mode 0") for slave in slaves['active']: self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), "Active slave has error TX pkt count in mode 0") for slave in slaves['inactive']: self.verify(pkt_now[slave][0] == 0, "Inactive slave has error TX pkt count in mode 0")
[ "def mustRetransmit(self):\n if self.syn or self.fin or self.dlen:\n return True\n return False", "def check_for_packets_to_send(self):\n socket_id, network_packet_json_str = self.next_scheduled_network_packet(time.time())\n while socket_id:\n #_debug_print(\"Sending scheduled network packet: \" + str(socket_id) + \" - \" + network_packet_json_str)\n self._socket.send_multipart([socket_id, network_packet_json_str.encode('utf-8'), str(time.time()).encode('utf-8')])\n sent_time = time.time()\n _debug_print(\"NetworkPacket to \" + str(socket_id) + \"sent at: \" + str(sent_time))\n # Get next scheduled network Packet\n socket_id, network_packet_json_str = self.next_scheduled_network_packet(time.time())", "def test_rd_send_router_solicitation(self):\n\n count = 2\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n self.vapi.ip6nd_send_router_solicitation(self.pg1.sw_if_index, mrc=count)\n rx_list = self.pg1.get_capture(count, timeout=3)\n self.assertEqual(len(rx_list), count)\n for packet in rx_list:\n self.assertEqual(packet.haslayer(IPv6), 1)\n self.assertEqual(packet[IPv6].haslayer(ICMPv6ND_RS), 1)\n dst = ip6_normalize(packet[IPv6].dst)\n dst2 = ip6_normalize(\"ff02::2\")\n self.assert_equal(dst, dst2)\n src = ip6_normalize(packet[IPv6].src)\n src2 = ip6_normalize(self.pg1.local_ip6_ll)\n self.assert_equal(src, src2)\n self.assertTrue(bool(packet[ICMPv6ND_RS].haslayer(ICMPv6NDOptSrcLLAddr)))\n self.assert_equal(packet[ICMPv6NDOptSrcLLAddr].lladdr, self.pg1.local_mac)", "def test_promiscuous_pass(self):\n if _debug: TestVLAN._debug(\"test_promiscuous_pass\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # reach into the network and enable promiscuous mode\n tnet.vlan.nodes[2].promiscuous = True\n\n # make a PDU from node 1 to node 2\n pdu = PDU(b'data', source=1, destination=2)\n\n # node 1 sends the pdu to node 2, node 3 also gets a copy\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduDestination=2).success()\n\n # run the group\n tnet.run()", "def _receive_check(self, length):\n data = self._receive(length)\n # Test checksum\n checksum = data[-1]\n s = sum(data[:-1]) % 256\n if s != checksum:\n raise ButtshockError(\"Checksum mismatch! 0x%.02x != 0x%.02x\" % (s, checksum))\n return data[:-1]", "def checksum(self):\n\n if (sum(self.packet) % 2 == 0) and (self.control_bit == 1) or\\\n (sum(self.packet) % 2 == 1) and (self.control_bit == 0):\n return True\n else:\n return False", "def verify_packet(self, packet, context):\n pass", "def test_pass_through():\n (board_id, _) = complete_bamr_board_transfer_to_sut()\n (recv_board_id, _) = complete_board_transfer_from_sut()\n\n assert board_id == recv_board_id, \"Board ID mismatch\"", "def check_for_received_packet(self):\n return self._wrapper.check_for_received_packet()", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def valid(packet):\n if is_udp(packet) and is_without_data(packet):\n return True\n return False", "def check_collisions(self):\n if len(self.incoming) == 1: return # Single packet in Q\n\n pkt = self.incoming[-1]\n for p in self.incoming[:-1]:\n # TODO Can we break out fast if we determine a collision?\n # Maybe if we keep track of the strongest packet?\n if self.collision_type == 'capture' or self.collision_type == 'capture_nonorth':\n if self.frequency_collision(pkt, p) and self.timing_collision(pkt, p):\n if self.collision_type == 'capture':\n collided = self.power_collision_1(pkt, p)\n else:\n collided = self.power_collision_2(pkt, p)\n for c in collided:\n c.collided = True\n else:\n if self.frequency_collision(pkt, p) and self.sf_collision(pkt, p):\n pkt.collided = True\n p.collided = True", "def writable(self):\n return len(self.packet) > 0", "def _try_sending(self):\n if self._stop_on == 't':\n # Time based test will be stopped by the test class\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n elif self._stop_on == 'b':\n # Check the remianing block count\n if self._test._blocks_remaining:\n self._test._blocks_remaining -= 1\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n else:\n # No more blocks to send. Inform test and do not reschedule sending\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()\n\n elif self._stop_on == 's':\n # Check the remaining bytes count\n\n if self._test._bytes_remaining > self._block_size:\n # Send the whole block, reduce size as normal\n self._test._bytes_remaining -= self._block_size\n self._send_block()\n self._sending_handle = self._loop.call_soon(self._try_sending)\n\n elif self._test._bytes_remaining > 0:\n # Sent the whole block, reduce to 0\n self._test._bytes_remaining = 0\n self._send_block()\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()\n\n else:\n self._sending_handle = None\n self.done = True\n self._test.sendable_data_depleted()", "def is_packet_ready(state):\n return (state.id == StateId.READY) and (len(state.packet) > 0);", "def wait_for_packet(self):\n try:\n self.pexpectobj.expect_exact(\"bytes received from\")\n except pexpect.exceptions.TIMEOUT:\n self.__dump_text()\n assert False\n try:\n self.pexpectobj.expect_exact(\"\\r\\r\\n>\")\n except pexpect.exceptions.TIMEOUT:\n self.__dump_text()\n assert False\n return self.pexpectobj.before", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def runSendReceiveTest(self, pkt2send, src_port , pkt2recv, destination_ports):\n\n masked2recv = Mask(pkt2recv)\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"dst\")\n masked2recv.set_do_not_care_scapy(scapy.Ether, \"src\")\n\n send_packet(self, src_port, pkt2send)\n (index, rcv_pkt, received) = self.verify_packet_any_port(masked2recv, destination_ports)\n\n self.tests_total += 1\n\n return received", "def packet_concludes_transfer(self, packet):\n\n pipe = self._pipe_identifier_for_packet(packet)\n\n # FIXME: short packet detection, here\n\n # If this is a setup token packet, it always ends the transfer.\n # (Setup packets always exist by themselves.)\n if packet.token is USBPacketID.SETUP:\n return True\n\n # If this is a control endpoint packet, apply special rules.\n try:\n first_packet = self.packets_captured[pipe][0]\n\n\n # Any direction switch on a control endpoint means we're ending a transfer.\n direction_switch = (packet.direction != first_packet.direction)\n if (packet.endpoint_number == 0) and direction_switch:\n return True\n except KeyError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that receiving and transmitting the packets correctly in the round robin mode, when bringing any one slave of the bonding device link down.
def test_round_robin_one_slave_down(self): bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") stat = self.tester.get_port_status(self.tester.get_local_port(self.dut_ports[0])) self.dut.send_expect("show bonding config %d" % bond_port, "testpmd> ") self.dut.send_expect("show port info all", "testpmd> ") try: slaves = {} slaves['active'] = [self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [self.dut_ports[0]] self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves) self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up")
[ "def test_round_robin_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_peer_link_status_change(self, duthost1, duthost2, ptfadapter, ptfhost, collect,\n get_routes, mclag_intf_num, pre_setup_peerlink):\n dst_route1 = ipaddress.IPv4Interface(get_routes[duthost1.hostname][2])\n dst_route2 = ipaddress.IPv4Interface(get_routes[duthost2.hostname][2])\n active_mclag_interfaces = sorted(collect[duthost1.hostname]['ptf_map'].values())[:mclag_intf_num]\n standby_mclag_interfaces = sorted(collect[duthost2.hostname]['ptf_map'].values())[:mclag_intf_num]\n indx = 0\n\n # Check MCLAG status is OK\n check_keepalive_link(duthost1, duthost2, 'OK')\n # Check mclag interfaces on standby have same MAC as active device\n for lag in collect[duthost1.hostname]['mclag_interfaces']:\n dut1_sys_id = duthost1.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n dut2_sys_id = duthost2.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n pytest_assert(dut1_sys_id == dut2_sys_id,\n \"Mclag standby device {} system ID shoule be same as active device, but is {}\"\n .format(lag, dut2_sys_id))\n\n # To be able to predict trough which DUT traffic will traverse,\n # use PortChannel member as source port, not PortChannel\n for mclag_intf1, mclag_intf2 in zip(active_mclag_interfaces, standby_mclag_interfaces):\n indx += 1\n dst_ip1 = str(str(dst_route1.ip + indx))\n dst_ip2 = str(str(dst_route2.ip + indx))\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')", "def testDownlink(self, time_interval):\r\n print \"Testing downlink...\"\r\n first_idx = np.zeros(1)\r\n self.zeroPPS()\r\n Npackets = np.ceil(time_interval * self.data_rate)\r\n print \"Npackets = \", Npackets\r\n count = 0\r\n while count < Npackets:\r\n try:\r\n packet, data, header, saddr = self.parsePacketData()\r\n except TypeError:\r\n continue\r\n print \"Done!\"\r\n if not packet:\r\n print \"No packet\"\r\n continue\r\n else:\r\n packet_count = (np.fromstring(packet[-4:],dtype = '>I'))\r\n print packet_count\r\n count += 1\r\n print \"Count is \", count\r\n if (packet_count - first_idx < 1):\r\n return -1\r\n return 0", "def test_pass_through():\n (board_id, _) = complete_bamr_board_transfer_to_sut()\n (recv_board_id, _) = complete_board_transfer_from_sut()\n\n assert board_id == recv_board_id, \"Board ID mismatch\"", "def testValidDownlink(self):\n\n # Adding one downlink packet results in the parser returning data from\n # the last collected downlink frame. There is no such frame, so the\n # parser returns back an empty JSON string.\n self.console.write((self.getFilepath(\"downlink1\") + \"\\n\").encode())\n response = json.loads(self.console.readline().rstrip())\n self.assertIsNone(response)\n\n # Sending downlink packet 1 again tells the parser that a downlink frame\n # has been completed, so it dumps the data contained in the previously\n # sent downlink frame.\n self.console.write((self.getFilepath(\"downlink1\") + \"\\n\").encode())\n response = json.loads(self.console.readline().rstrip())\n expectedResponse = json.load(open(self.getFilepath(\"expected_output.json\")))\n self.assertDictEqual(response, expectedResponse)", "def runTest(self):\n try:\n print(\"Lag l3 load balancing test based on protocol\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n max_itrs = 99\n rcv_count = [0, 0]\n for i in range(0, max_itrs):\n if i % 2 == 0:\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n else:\n print(\"icmp\")\n pkt = simple_icmp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_icmp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n print('des_src={}, rcv_port={}'.format(\n self.servers[1][1].ipv4, rcv_idx))\n rcv_count[rcv_idx] += 1\n\n print(rcv_count)\n for i in range(0, 2):\n self.assertTrue(\n (rcv_count[i] >= ((max_itrs/2) * 0.8)), \"Not all paths are equally balanced\")\n finally:\n pass", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def runTest(self):\n try:\n print(\"Lag remove lag member test\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n pkts_num = 10\n begin_port = 2000\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n\n self.lag_configer.remove_lag_member_by_port_idx(\n lag_obj=self.servers[11][1].l3_lag_obj, port_idx=18)\n\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n verify_no_packet(self, exp_pkt, self.get_dev_port_index(18))\n self.lag_configer.create_lag_member(lag_obj=self.servers[11][1].l3_lag_obj,\n lag_port_idxs=range(18, 19))\n finally:\n pass", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def should_demote(self, *args, **kwargs) -> bool:\n return True", "def verify_bound_promisc_opt(self, mode_set):\n unbound_port = self.dut_ports[3]\n bond_port = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port,\n False,\n self.dut_ports[0],\n self.dut_ports[1],\n self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (unbound_port, bond_port), \"testpmd> \")\n self.start_port(bond_port)\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n port_disabled_num = 0\n testpmd_all_ports = self.dut_ports\n testpmd_all_ports.append(bond_port)\n for port_id in testpmd_all_ports:\n value = self.get_detail_from_port_info(\"Promiscuous mode: \", \"enabled\", port_id)\n if not value:\n port_disabled_num += 1\n self.verify(port_disabled_num == 0,\n \"Not all slaves of bonded device turn promiscuous mode on by default.\")\n\n ether_ip = {}\n ether = {}\n ether['dest_mac'] = \"00:11:22:33:44:55\"\n ether_ip['ether'] = ether\n\n send_param = {}\n pkt_count = 1\n send_param['pkt_count'] = pkt_count\n pkt_info = [ether_ip, send_param]\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0]]\n slaves['inactive'] = []\n\n pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(do_transmit and pkt_size != LACP_MESSAGE_SIZE,\n \"Data not received by slave or bonding device when promiscuous enabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and\n pkt_now[bond_port][0] == pkt_count,\n \"Data not received by slave or bonding device when promiscuous enabled\")\n\n self.dut.send_expect(\"set promisc %s off\" % bond_port, \"testpmd> \")\n port_disabled_num = 0\n testpmd_all_ports = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], bond_port]\n for port_id in testpmd_all_ports:\n value = self.get_detail_from_port_info('Promiscuous mode: ', 'disabled', port_id)\n if value == 'disabled':\n port_disabled_num += 1\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(port_disabled_num == 4,\n \"Not all slaves of bonded device turn promiscuous mode off in mode %d.\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(port_disabled_num == 1,\n \"Not only turn bound device promiscuous mode off in mode %d\" % mode_set)\n else:\n self.verify(port_disabled_num == 2,\n \"Not only the primary slave turn promiscous mode off in mode %d, \" % mode_set +\n \" when bonded device promiscous disabled.\")\n\n if mode_set != MODE_LACP:\n send_param['verify'] = True\n pkt_now, summary = self.send_customized_packet_to_slave(unbound_port, bond_port, *pkt_info, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(not do_transmit or\n pkt_size == LACP_MESSAGE_SIZE,\n \"Data received by slave or bonding device when promiscuous disabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == 0 and\n pkt_now[bond_port][0] == 0,\n \"Data received by slave or bonding device when promiscuous disabled\")\n\n pkt_now, summary = self.send_default_packet_to_slave(self.dut_ports[3], bond_port, pkt_count, **slaves)\n if mode_set == MODE_LACP:\n do_transmit = False\n pkt_size = 0\n if pkt_now[unbound_port][0]:\n do_transmit = True\n pkt_size = pkt_now[unbound_port][2] / pkt_now[unbound_port][0]\n self.verify(not do_transmit or\n pkt_size != LACP_MESSAGE_SIZE,\n \"RX or TX packet number not correct when promiscuous disabled\")\n else:\n self.verify(pkt_now[self.dut_ports[0]][0] == pkt_now[bond_port][0] and\n pkt_now[self.dut_ports[3]][0] == pkt_now[bond_port][0] and\n pkt_now[bond_port][0] == pkt_count,\n \"RX or TX packet number not correct when promiscuous disabled\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_repairing(self):\n # Pair the devices.\n self.log.info(\"Pairing the devices ...\")\n if not bt_test_utils.pair_pri_to_sec(\n self.car, self.ph, attempts=1, auto_confirm=False):\n self.log.error(\"Failed to pair devices.\")\n return False\n\n # Timed wait for the profile priorities to propagate.\n time.sleep(BOND_TO_SDP_WAIT)\n\n # Set the priority to OFF for ALL car profiles.\n self.car.log.info(\"Set priorities off ...\")\n car_bt_utils.set_car_profile_priorities_off(self.car, self.ph)\n\n # Now unpair the devices.\n self.log.info(\"Resetting the devices ...\")\n for ad in self.android_devices:\n bt_test_utils.clear_bonded_devices(ad)\n # Give the stack time to unbond.\n time.sleep(UNBOND_TIMEOUT)\n\n # Pair them again!\n self.log.info(\"Pairing them again ...\")\n if not bt_test_utils.pair_pri_to_sec(\n self.car, self.ph, attempts=1, auto_confirm=False):\n self.log.error(\"Faild to pair devices.\")\n return False\n\n # Timed wait for the profile priorities to propagate.\n time.sleep(BOND_TO_SDP_WAIT)\n\n # Check the default priorities.\n ph_hfp_p = self.car.droid.bluetoothHfpClientGetPriority(\n self.ph.droid.bluetoothGetLocalAddress())\n if ph_hfp_p != BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value:\n self.hf.log.error(\"HFP priority found: {}, expected: {}.\".format(\n ph_hfp_p, BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value))\n return False\n\n ph_a2dp_p = self.car.droid.bluetoothA2dpSinkGetPriority(\n self.ph.droid.bluetoothGetLocalAddress())\n if ph_a2dp_p != BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value:\n self.ph.log.error(\"A2DP priority found: {}, expected {}.\".format(\n ph_a2dp_p, BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value))\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that receiving and transmitting the packets correctly in the round robin mode, when bringing all slaves of the bonding device link down.
def test_round_robin_all_slaves_down(self): bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") try: slaves = {} slaves['active'] = [] slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves) self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up")
[ "def test_round_robin_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n stat = self.tester.get_port_status(self.tester.get_local_port(self.dut_ports[0]))\n self.dut.send_expect(\"show bonding config %d\" % bond_port, \"testpmd> \")\n self.dut.send_expect(\"show port info all\", \"testpmd> \")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def runTest(self):\n try:\n print(\"Lag l3 load balancing test based on protocol\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n max_itrs = 99\n rcv_count = [0, 0]\n for i in range(0, max_itrs):\n if i % 2 == 0:\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n else:\n print(\"icmp\")\n pkt = simple_icmp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_icmp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n print('des_src={}, rcv_port={}'.format(\n self.servers[1][1].ipv4, rcv_idx))\n rcv_count[rcv_idx] += 1\n\n print(rcv_count)\n for i in range(0, 2):\n self.assertTrue(\n (rcv_count[i] >= ((max_itrs/2) * 0.8)), \"Not all paths are equally balanced\")\n finally:\n pass", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def runTest(self):\n try:\n print(\"Lag remove lag member test\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n pkts_num = 10\n begin_port = 2000\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n\n self.lag_configer.remove_lag_member_by_port_idx(\n lag_obj=self.servers[11][1].l3_lag_obj, port_idx=18)\n\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n verify_no_packet(self, exp_pkt, self.get_dev_port_index(18))\n self.lag_configer.create_lag_member(lag_obj=self.servers[11][1].l3_lag_obj,\n lag_port_idxs=range(18, 19))\n finally:\n pass", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_peer_link_status_change(self, duthost1, duthost2, ptfadapter, ptfhost, collect,\n get_routes, mclag_intf_num, pre_setup_peerlink):\n dst_route1 = ipaddress.IPv4Interface(get_routes[duthost1.hostname][2])\n dst_route2 = ipaddress.IPv4Interface(get_routes[duthost2.hostname][2])\n active_mclag_interfaces = sorted(collect[duthost1.hostname]['ptf_map'].values())[:mclag_intf_num]\n standby_mclag_interfaces = sorted(collect[duthost2.hostname]['ptf_map'].values())[:mclag_intf_num]\n indx = 0\n\n # Check MCLAG status is OK\n check_keepalive_link(duthost1, duthost2, 'OK')\n # Check mclag interfaces on standby have same MAC as active device\n for lag in collect[duthost1.hostname]['mclag_interfaces']:\n dut1_sys_id = duthost1.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n dut2_sys_id = duthost2.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n pytest_assert(dut1_sys_id == dut2_sys_id,\n \"Mclag standby device {} system ID shoule be same as active device, but is {}\"\n .format(lag, dut2_sys_id))\n\n # To be able to predict trough which DUT traffic will traverse,\n # use PortChannel member as source port, not PortChannel\n for mclag_intf1, mclag_intf2 in zip(active_mclag_interfaces, standby_mclag_interfaces):\n indx += 1\n dst_ip1 = str(str(dst_route1.ip + indx))\n dst_ip2 = str(str(dst_route2.ip + indx))\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')", "def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)", "def runTest(self):\n try:\n print(\"Lag disable egress lag member test\")\n \n pkts_num = 10\n begin_port = 2000\n exp_drop = []\n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n if rcv_idx == 18:\n exp_drop.append(src_port)\n\n # disable egress of lag member: port18\n print(\"disable port18 egress\")\n status = sai_thrift_set_lag_member_attribute(self.client,\n self.servers[11][1].l3_lag_obj.lag_members[1],\n egress_disable=True)\n self.assertEqual(status, SAI_STATUS_SUCCESS)\n\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n if src_port in exp_drop:\n verify_no_packet(self, exp_pkt, self.get_dev_port_index(18))\n verify_packet(self, exp_pkt, self.get_dev_port_index(17))\n finally:\n pass", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def test_pass_through():\n (board_id, _) = complete_bamr_board_transfer_to_sut()\n (recv_board_id, _) = complete_board_transfer_from_sut()\n\n assert board_id == recv_board_id, \"Board ID mismatch\"", "def test_repairing(self):\n # Pair the devices.\n self.log.info(\"Pairing the devices ...\")\n if not bt_test_utils.pair_pri_to_sec(\n self.car, self.ph, attempts=1, auto_confirm=False):\n self.log.error(\"Failed to pair devices.\")\n return False\n\n # Timed wait for the profile priorities to propagate.\n time.sleep(BOND_TO_SDP_WAIT)\n\n # Set the priority to OFF for ALL car profiles.\n self.car.log.info(\"Set priorities off ...\")\n car_bt_utils.set_car_profile_priorities_off(self.car, self.ph)\n\n # Now unpair the devices.\n self.log.info(\"Resetting the devices ...\")\n for ad in self.android_devices:\n bt_test_utils.clear_bonded_devices(ad)\n # Give the stack time to unbond.\n time.sleep(UNBOND_TIMEOUT)\n\n # Pair them again!\n self.log.info(\"Pairing them again ...\")\n if not bt_test_utils.pair_pri_to_sec(\n self.car, self.ph, attempts=1, auto_confirm=False):\n self.log.error(\"Faild to pair devices.\")\n return False\n\n # Timed wait for the profile priorities to propagate.\n time.sleep(BOND_TO_SDP_WAIT)\n\n # Check the default priorities.\n ph_hfp_p = self.car.droid.bluetoothHfpClientGetPriority(\n self.ph.droid.bluetoothGetLocalAddress())\n if ph_hfp_p != BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value:\n self.hf.log.error(\"HFP priority found: {}, expected: {}.\".format(\n ph_hfp_p, BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value))\n return False\n\n ph_a2dp_p = self.car.droid.bluetoothA2dpSinkGetPriority(\n self.ph.droid.bluetoothGetLocalAddress())\n if ph_a2dp_p != BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value:\n self.ph.log.error(\"A2DP priority found: {}, expected {}.\".format(\n ph_a2dp_p, BtEnum.BluetoothPriorityLevel.PRIORITY_ON.value))\n return False\n\n return True", "def testDownlink(self, time_interval):\r\n print \"Testing downlink...\"\r\n first_idx = np.zeros(1)\r\n self.zeroPPS()\r\n Npackets = np.ceil(time_interval * self.data_rate)\r\n print \"Npackets = \", Npackets\r\n count = 0\r\n while count < Npackets:\r\n try:\r\n packet, data, header, saddr = self.parsePacketData()\r\n except TypeError:\r\n continue\r\n print \"Done!\"\r\n if not packet:\r\n print \"No packet\"\r\n continue\r\n else:\r\n packet_count = (np.fromstring(packet[-4:],dtype = '>I'))\r\n print packet_count\r\n count += 1\r\n print \"Count is \", count\r\n if (packet_count - first_idx < 1):\r\n return -1\r\n return 0", "def test_04_streaming_balanced(self):\n balanced_rx = [self.spawn_receiver(self.EB1,\n count=1,\n address=\"balanced/test-address\")\n for _ in range(2)]\n self.EB1.wait_address(\"balanced/test-address\", subscribers=2)\n\n tx = self.spawn_sender(self.EA1,\n count=2,\n address=\"balanced/test-address\")\n out_text, out_error = tx.communicate(timeout=TIMEOUT)\n if tx.returncode:\n raise Exception(\"sender failed: %s %s\" % (out_text, out_error))\n\n for rx in balanced_rx:\n out_text, out_error = rx.communicate(timeout=TIMEOUT)\n if rx.returncode:\n raise Exception(\"receiver failed: %s %s\" % (out_text, out_error))\n\n self._wait_address_gone(self.EA1, \"balanced/test-address\")\n self._wait_address_gone(self.EB1, \"balanced/test-address\")\n self._wait_address_gone(self.INT_A, \"balanced/test-address\")\n self._wait_address_gone(self.INT_B, \"balanced/test-address\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the RX packets are all correct in the activebackup mode.
def verify_active_backup_rx(self, unbound_port, bond_port, **slaves): pkt_count = 100 pkt_now = {} slave_num = slaves['active'].__len__() if slave_num != 0: active_flag = 1 else: active_flag = 0 pkt_now, summary = self.send_default_packet_to_slave(unbound_port, bond_port, pkt_count=pkt_count, **slaves) self.verify(pkt_now[bond_port][0] == pkt_count * slave_num, "Not correct RX pkt on bond port in mode 1") self.verify(pkt_now[unbound_port][0] == pkt_count * active_flag, "Not correct TX pkt on unbound port in mode 1") for slave in slaves['inactive']: self.verify(pkt_now[slave][0] == 0, "Not correct RX pkt on inactive port in mode 1") for slave in slaves['active']: self.verify(pkt_now[slave][0] == pkt_count, "Not correct RX pkt on active port in mode 1")
[ "def test_active_backup_rx_tx(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n time.sleep(5)\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)", "def verify_active_backup_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 0\n pkt_now = {}\n\n if slaves['active'].__len__() != 0:\n primary_port = slaves['active'][0]\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * active_flag, \"Not correct RX pkt on bond port in mode 1\")\n if active_flag == 1:\n self.verify(pkt_now[primary_port][0] == pkt_count, \"Not correct TX pkt on primary port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on inactive port in mode 1\")\n for slave in [slave for slave in slaves['active'] if slave != primary_port]:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on backup port in mode 1\")", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def check_available(self):\n self._available = None\n self.last_check_time = datetime.datetime.now()\n if self.device_up:\n self.check_local_ip()\n if self.gateway and not self.check_gateway():\n self.status = 'Gateway {} not reachable'.format(self.gateway)\n logger.critical('Gateway {} not reachable'.format(self.gateway))\n ping_ip = self.target_ip\n if ping_ip:\n self.check_test_route()\n (returncode,output) = run('/bin/ping -q -n -c{ping_count:n} -W{timeout:n} -i{ping_interval} -I{device} {target_ip}'.format(\n ping_count = self.ping_count,\n timeout = self.timeout,\n device = self.device,\n target_ip = ping_ip,\n ping_interval=self.ping_interval,\n ))\n if returncode == 0:\n report = REPORT.search(output)\n rtt = RTT.search(output)\n if report:\n self.last_loss = int(report.groupdict()['loss'])\n else:\n self.last_loss = None\n if rtt:\n self.last_rtt = float(rtt.groupdict()['avg'])\n else:\n self.last_rtt = None\n\n self._available = report and rtt and\\\n self.last_loss<=self.max_loss and\\\n self.last_rtt<=self.max_rtt\n if self._available:\n self.status='OK'\n elif self.last_loss>self.max_loss:\n self.status='Too much loss {}%'.format(self.last_loss)\n elif self.last_rtt>self.max_rtt:\n self.status='Too long RTT {}ms'.format(self.last_rtt)\n else:\n self.status = 'ping test failed : {}'.format(output)\n else:\n self._available = True\n else:\n self.status = 'Device {} is down or link state is unknown'.format(self.device)\n self._available = False\n\n self.update_leds()\n return self._available", "def check_rx_tx_messages():\n for msg in Simulator.tx_msgs:\n if msg in Simulator.rx_msgs:\n Simulator.rx_msgs.remove(msg)\n else:\n print(\"ERROR: msg was transmitted but not received\")\n msg.show()\n hexdump(msg)\n if len(Simulator.rx_msgs) > 0:\n print(\"ERROR: msgs were received but not transmitted:\")\n for i in range(len(Simulator.rx_msgs)):\n print(\"msg {}:\".format(i))\n Simulator.rx_msgs[i].show()\n hexdump(Simulator.rx_msgs[i])\n else:\n print(\"SUCCESS: all msgs were successfully delivered!\")", "def test_03_is04_is05_rx_match(self):\n\n test = Test(\"Receivers shown in Connection API matches those shown in Node API\")\n\n valid, result = self.get_is04_resources(\"receivers\")\n if not valid:\n return test.FAIL(result)\n valid, result = self.get_is05_resources(\"receivers\")\n if not valid:\n return test.FAIL(result)\n\n if not self.check_is04_in_is05(\"receivers\"):\n return test.FAIL(\"Unable to find all Receivers from IS-04 in IS-05\")\n\n if not self.check_is05_in_is04(\"receivers\"):\n return test.FAIL(\"Unable to find all Receivers from IS-05 in IS-04\")\n\n return test.PASS()", "def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt):\n packets = ptfadapter.dataplane.packet_queues[(0, port)]\n for packet in packets:\n if six.PY2:\n if exp_pkt.pkt_match(packet):\n return True\n else:\n if exp_pkt.pkt_match(packet[0]):\n return True\n return False", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def runTest(self):\n try:\n print(\"Lag l3 load balancing test based on protocol\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n max_itrs = 99\n rcv_count = [0, 0]\n for i in range(0, max_itrs):\n if i % 2 == 0:\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n else:\n print(\"icmp\")\n pkt = simple_icmp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_icmp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n print('des_src={}, rcv_port={}'.format(\n self.servers[1][1].ipv4, rcv_idx))\n rcv_count[rcv_idx] += 1\n\n print(rcv_count)\n for i in range(0, 2):\n self.assertTrue(\n (rcv_count[i] >= ((max_itrs/2) * 0.8)), \"Not all paths are equally balanced\")\n finally:\n pass", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def check_pcaps():\n print_debug(\"check_pcaps() called\")\n\n # Check of the pcaps to make sure none were submitted with TCP packets but no TCP packets have the SYN flag\n # only call if no alerts fired\n if os.path.getsize(JOB_ALERT_LOG) == 0:\n try:\n if os.path.exists(TCPDUMP_BINARY):\n for pcap in PCAP_FILES:\n # check for TCP packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s -p tcp 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) > 0:\n # check for SYN packets; this only works on IPv4 packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"tcp[tcpflags] & tcp-syn != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n # check IPv6 packets too\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"ip6 and tcp and ip6[0x35] & 0x2 != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n print_error(\"As Dalton says, \\\"pain don\\'t hurt.\\\" But an incomplete pcap sure can.\"\n \"\\n\\n\"\n \"The pcap file \\'%s\\' contains TCP traffic but does not \"\n \"contain any TCP packets with the SYN flag set.\"\n \"\\n\\n\"\n \"Almost all IDS rules that look for TCP traffic require \"\n \"an established connection.\\nYou will need to provide a more complete \"\n \"pcap if you want accurate results.\"\n \"\\n\\n\"\n \"If you need help crafting a pcap, Flowsynth may be able to help --\\n\"\n \"https://github.com/secureworks/flowsynth\"\n \"\\n\\n\"\n \"And, \\\"there's always barber college....\\\"\" % os.path.basename(pcap))\n else:\n print_debug(\"In check_pcaps() -- no tcpdump binary found at %s\" % TCPDUMP_BINARY)\n except Exception as e:\n if not str(e).startswith(\"As Dalton says\"):\n print_debug(\"Error doing TCP SYN check in check_pcaps():\\n%s\" % e)\n\n # check snaplen of pcaps\n try:\n for pcap in PCAP_FILES:\n snaplen_offset = 16\n pcapng = False\n little_endian = False\n snaplen = 65535\n\n # get first 40 bytes of pcap file\n with open(pcap, 'rb') as fh:\n bytes = fh.read(44)\n\n magic = binascii.hexlify(bytes[0:4]).decode('ascii')\n if magic.lower() == '0a0d0d0a':\n # this is pcapng and these aren't the byte-order magic bytes\n snaplen_offset = 40\n pcapng = True\n # get the correct byte-order magic bytes for pcapng\n magic = binascii.hexlify(bytes[8:12]).decode('ascii')\n else:\n # this is libpcap, we have the magic\n pcapng = False\n # now determine endian-ness\n if magic.lower() == 'a1b2c3d4':\n # this is \"big endian\"\n little_endian = False\n elif magic.lower() == '4d3c2b1a' or magic.lower() == 'd4c3b2a1':\n # this is little endian\n little_endian = True\n else:\n print_debug(\"in check_pcaps() - Pcap Byte-Order Magic field not found in file \\'%s\\'. Is this a valid pcap?\" % os.path.basename(pcap))\n continue\n\n # get snaplen\n if little_endian:\n snaplen = struct.unpack('<i', bytes[snaplen_offset:snaplen_offset+4])[0]\n else:\n snaplen = struct.unpack('>i', bytes[snaplen_offset:snaplen_offset+4])[0]\n\n # Python 2.4 doesn't support this so doing it the ugly way\n #print_debug(\"Packet capture file \\'%s\\' is format %s, %s, and has snaplen of %d bytes.\" % (os.path.basename(pcap), ('pcapng' if pcapng else 'libpcap'), ('little endian' if little_endian else 'big endian'), snaplen))\n debug_msg = \"Packet capture file \\'%s\\' is format \" % os.path.basename(pcap)\n if pcapng:\n debug_msg += \"pcapng, \"\n else:\n debug_msg += \"libpcap, \"\n if little_endian:\n debug_msg += \"little endian, and has snaplen of %d bytes.\" % snaplen\n else:\n debug_msg += \"big endian, and has snaplen of %d bytes.\" % snaplen\n print_debug(debug_msg)\n\n if snaplen < 65535:\n print_debug(\"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen))\n\n # validate snaplen\n if snaplen < 1514:\n warning_msg = ''\n if not os.path.getsize(JOB_ERROR_LOG) == 0:\n warning_msg += \"\\n----------------\\n\\n\"\n warning_msg += \"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen)\n if snaplen == 1500:\n warning_msg += \"\\n\\nSome sandboxes (Bluecoat/Norman) will put a hardcoded snaplen of 1500 bytes\\n\"\n warning_msg += \"on pcaps even when the packets are larger than 1500 bytes. This can result in the sensor throwing away these\\n\"\n warning_msg += \"packets and not inspecting them. If this is the case, try saving the file in Wireshark in pcapng format, opening up\\n\"\n warning_msg += \"that pcapng file in Wireshark, and saving it as a libpcap file. This should set the snaplen to 65535.\"\n warning_msg += \"\\n\\nThis is just a warning message about the pcap. The job ran successfully and the generated alerts as well as other\\n\"\n warning_msg += \"results have been returned.\"\n print_error(warning_msg)\n except Exception as e:\n if not str(e).startswith(\"Warning:\"):\n print_debug(\"Error doing snaplen check in check_pcaps(): %s\" % e)", "def _is_dropping_pkts(self):\n now = rospy.get_time()\n\n if len(self._dropped_times) < self._drops_per_hour:\n return False\n\n return abs(now - self._dropped_times[0]) < 3600", "def _verify_test_profile(self):\n verify_ok = True\n print(\"[Verifying test profile...]\")\n profile_tx_mac = None\n profile_rx_macs = []\n print(\"Checking TX and RX device MACs...\")\n for section in self.__test_profile.sections():\n if(re.search('^[T|R]X', section)):\n (typ, mac) = section.split()\n if(typ == 'RX'):\n profile_rx_macs.append(mac)\n elif(typ == 'TX'):\n profile_tx_mac = mac\n\n if(self.__tx_dev['mac'] == profile_tx_mac):\n print(\"[ OK ] TX Device MAC\")\n else:\n self.__logger.error(\"[FAIL] TX Device MAC\")\n print(\" Connected TX MAC: %s\" % (self.__tx_dev['mac']))\n print(\" Profile TX MAC : %s\" % (profile_tx_mac))\n verify_ok = False\n\n connected_rx_macs = []\n for rx in self.__rx_devs:\n connected_rx_macs.append(rx['mac'])\n\n spm = set(profile_rx_macs)\n scm = set(connected_rx_macs)\n if((len(spm - scm) == 0) and (len(scm - spm) == 0)):\n print(\"[ OK ] RX Device MACs\")\n else:\n self.__logger.error(\"[FAIL] RX Device MACs\")\n both = set(profile_rx_macs+connected_rx_macs)\n hdr_str = \"{:<17} {:<17}\".format(\"Profile\", \"Connected\")\n print(hdr_str)\n print(\"{:=^{width}}\".format('', width=len(hdr_str)))\n for i in both:\n print(\"{:<17} {:<17}\".format(i if i in profile_rx_macs else '', i if i in connected_rx_macs else ''))\n verify_ok = False\n\n if(verify_ok):\n return True\n else:\n return False", "def autobackup(self):\n try:\n #dbg.prn(dbg.BKP,\"autobackup-->\")\n if((int(time.time()) - self.lastActive)<10): #has been idle for less than 10 seconds, wait some more\n dbg.prn(dbg.BKP,\"busy... idle for:\",(int(time.time())-self.lastActive))\n # the machine is busy (there's most likely a live game going on) make sure there are no events being backed up right now\n if(self.current): \n self.stop(stopAll = True)\n return\n # check if there is a live event\n if(enc.busy()): #there is a live event - wait until it's done in order to start the back up\n self.lastActive = int(time.time())\n return\n if(len(self.events)>0): #events are being backed up - wait until that's done to check for new events\n dbg.prn(dbg.BKP,\"already backing up...\",len(self.events))\n return\n #######################################\n # local backup checks\n\n # get the device that has autobackup folder:\n drives = pu.disk.list()\n self.backupDrive = False\n # look for which one to use for back up\n for drive in drives:\n if(not os.path.exists(drive+backupEvent.autoDir)): #this is not the auto-backup for pxp\n continue\n self.backupDrive = drive.decode('UTF-8') #decoding is required for drives that may have odd mount points (e.g. cyrillic letters in the directory name)\n #########################################\n # cloud backup checks\n \n # get all cloud events\n settings = pu.disk.cfgGet(section=\"uploads\")\n if('autoupload' in settings and int(settings['autoupload'])): #automatic cloud upload is enabled\n cloudList = self.getCloudEvents()\n dbg.prn(dbg.BKP, \"cloud events:\",cloudList)\n else:\n dbg.prn(dbg.BKP, \"cloud upload disabled\")\n cloudList = False #automatic upload is disabled\n if(not self.backupDrive): \n return #did not find an auto-backup device - nothing to do if not backing up locally or to cloud\n\n # get all events in the system\n elist = pxp._listEvents(showDeleted=False)\n # go through all events that exist and verify that they're identical on the backup device\n for event in elist:\n if(not('datapath' in event)):\n continue #this event does not have a folder with video/tags - nothing to do here\n #### local backup ####\n if(self.backupDrive):\n # see if this event exists on the backup device\n if(os.path.exists(self.backupDrive+backupEvent.autoDir+event['datapath'])):\n # the event was already backed up\n # check for differences in video (simple size check - less io operations)\n vidSize = pu.disk.dirSize(c.wwwroot+event['datapath']+'/video')\n bkpSize = pu.disk.dirSize(self.backupDrive+backupEvent.autoDir+event['datapath']+'/video')\n if(bkpSize!=vidSize): #there's a mismatch in the video - backup the whole event again\n self.add(hid=event['hid'],auto=True)\n else:\n # the video is identical, check data file\n oldDb = self.backupDrive+backupEvent.autoDir+event['datapath']+'/pxp.db'\n newDb = c.wwwroot+event['datapath']+'/pxp.db'\n md5old = hashlib.md5(open(oldDb, 'rb').read()).hexdigest()\n md5new = hashlib.md5(open(newDb, 'rb').read()).hexdigest()\n if(md5old!=md5new): #the database is different - back up the new database\n self.add(hid=event['hid'],dataOnly=True,auto=True)\n else: #this event doesn't exist on the backup drive - back it up\n dbg.prn(dbg.BKP,\"event doesn't exist\", event)\n self.add(hid=event['hid'],auto=True)\n #end if backupDrive\n if(not type(cloudList) is dict):\n # could not get cloud events: either no internet connection, \n # or this device is deactivated, or the customer is deactivated\n # or uploading to cloud is disabled\n continue \n #### cloud backup ####\n # check if this event exists in the cloud\n if(event['hid'] in cloudList): #the event exists in the cloud, verify its checksum and number of segments (that's what's being uploaded)\n # count segments in the local event\n segs = len(glob.glob(c.wwwroot+event['name']+'/video/*.ts'))\n # get md5 checksum of the local .db\n md5local = hashlib.md5(open(c.wwwroot+event['name']+'/pxp.db', 'rb').read()).hexdigest()\n cfg = pxp._cfgGet()\n if(not cfg): \n return False #the encoder is not initalized yet most likely (???) - if got to this point, the encoder HAS TO BE initialized!!\n try:\n # get details about remote event\n response = pu.io.url(c.cloud+\"ajEvent/ajax\",params={\"v0\":cfg[1],\"v4\":cfg[2],\"id\":cloudList[event['hid']]['id']})\n if(not response): #connection error\n dbg.prn(dbg.BKP,\"could not get info about cloud event:\",cloudList[event['hid']]['id'])\n continue\n data = json.loads(response)\n data = data['entries']\n dbg.prn(dbg.BKP,\"NUM SEGS>>>>>>> remote:\",data['segs'],', local:',segs)\n # TODO \n # -add a check to see if each segment's md5 matches\n # -add resume: i.e. only upload segments that are partially or not uploaded to cloud. \n # do not do full re-upload if 1 segment is missing\n if(int(data['segs'])!=segs): #number of segments is different - upload the video file\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']})\n elif(data['md5']!=md5local): #video is the same, meteadata is different - upload just metadata\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']}, dataOnly=True)\n except Exception, e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]autobackup.url:\",response,e,sys.exc_info()[-1].tb_lineno)\n else: #this event doesn't exist in the cloud yet - upload it (video and metedata)\n self.add(hid=event['hid'], priority=2, cloud=True)\n\n #end for event in elist\n except Exception as e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]bkpmgr.autobackup\",e,sys.exc_info()[-1].tb_lineno)", "def test_getRxBytes(self):\n msg = b'12345'\n self.radio.bufferRxMsg(msg, True)\n assert(self.radio.getRxBytes() == msg)", "def test_04_streaming_balanced(self):\n balanced_rx = [self.spawn_receiver(self.EB1,\n count=1,\n address=\"balanced/test-address\")\n for _ in range(2)]\n self.EB1.wait_address(\"balanced/test-address\", subscribers=2)\n\n tx = self.spawn_sender(self.EA1,\n count=2,\n address=\"balanced/test-address\")\n out_text, out_error = tx.communicate(timeout=TIMEOUT)\n if tx.returncode:\n raise Exception(\"sender failed: %s %s\" % (out_text, out_error))\n\n for rx in balanced_rx:\n out_text, out_error = rx.communicate(timeout=TIMEOUT)\n if rx.returncode:\n raise Exception(\"receiver failed: %s %s\" % (out_text, out_error))\n\n self._wait_address_gone(self.EA1, \"balanced/test-address\")\n self._wait_address_gone(self.EB1, \"balanced/test-address\")\n self._wait_address_gone(self.INT_A, \"balanced/test-address\")\n self._wait_address_gone(self.INT_B, \"balanced/test-address\")", "def check_for_received_packet(self):\n return self._wrapper.check_for_received_packet()", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the TX packets are all correct in the activebackup mode.
def verify_active_backup_tx(self, unbound_port, bond_port, **slaves): pkt_count = 0 pkt_now = {} if slaves['active'].__len__() != 0: primary_port = slaves['active'][0] active_flag = 1 else: active_flag = 0 pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves) self.verify(pkt_now[bond_port][0] == pkt_count * active_flag, "Not correct RX pkt on bond port in mode 1") if active_flag == 1: self.verify(pkt_now[primary_port][0] == pkt_count, "Not correct TX pkt on primary port in mode 1") for slave in slaves['inactive']: self.verify(pkt_now[slave][0] == 0, "Not correct TX pkt on inactive port in mode 1") for slave in [slave for slave in slaves['active'] if slave != primary_port]: self.verify(pkt_now[slave][0] == 0, "Not correct TX pkt on backup port in mode 1")
[ "def test_active_backup_rx_tx(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n time.sleep(5)\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)", "def verify_active_backup_rx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n slave_num = slaves['active'].__len__()\n if slave_num != 0:\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_slave(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * slave_num, \"Not correct RX pkt on bond port in mode 1\")\n self.verify(pkt_now[unbound_port][0] == pkt_count * active_flag, \"Not correct TX pkt on unbound port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct RX pkt on inactive port in mode 1\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Not correct RX pkt on active port in mode 1\")", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def autobackup(self):\n try:\n #dbg.prn(dbg.BKP,\"autobackup-->\")\n if((int(time.time()) - self.lastActive)<10): #has been idle for less than 10 seconds, wait some more\n dbg.prn(dbg.BKP,\"busy... idle for:\",(int(time.time())-self.lastActive))\n # the machine is busy (there's most likely a live game going on) make sure there are no events being backed up right now\n if(self.current): \n self.stop(stopAll = True)\n return\n # check if there is a live event\n if(enc.busy()): #there is a live event - wait until it's done in order to start the back up\n self.lastActive = int(time.time())\n return\n if(len(self.events)>0): #events are being backed up - wait until that's done to check for new events\n dbg.prn(dbg.BKP,\"already backing up...\",len(self.events))\n return\n #######################################\n # local backup checks\n\n # get the device that has autobackup folder:\n drives = pu.disk.list()\n self.backupDrive = False\n # look for which one to use for back up\n for drive in drives:\n if(not os.path.exists(drive+backupEvent.autoDir)): #this is not the auto-backup for pxp\n continue\n self.backupDrive = drive.decode('UTF-8') #decoding is required for drives that may have odd mount points (e.g. cyrillic letters in the directory name)\n #########################################\n # cloud backup checks\n \n # get all cloud events\n settings = pu.disk.cfgGet(section=\"uploads\")\n if('autoupload' in settings and int(settings['autoupload'])): #automatic cloud upload is enabled\n cloudList = self.getCloudEvents()\n dbg.prn(dbg.BKP, \"cloud events:\",cloudList)\n else:\n dbg.prn(dbg.BKP, \"cloud upload disabled\")\n cloudList = False #automatic upload is disabled\n if(not self.backupDrive): \n return #did not find an auto-backup device - nothing to do if not backing up locally or to cloud\n\n # get all events in the system\n elist = pxp._listEvents(showDeleted=False)\n # go through all events that exist and verify that they're identical on the backup device\n for event in elist:\n if(not('datapath' in event)):\n continue #this event does not have a folder with video/tags - nothing to do here\n #### local backup ####\n if(self.backupDrive):\n # see if this event exists on the backup device\n if(os.path.exists(self.backupDrive+backupEvent.autoDir+event['datapath'])):\n # the event was already backed up\n # check for differences in video (simple size check - less io operations)\n vidSize = pu.disk.dirSize(c.wwwroot+event['datapath']+'/video')\n bkpSize = pu.disk.dirSize(self.backupDrive+backupEvent.autoDir+event['datapath']+'/video')\n if(bkpSize!=vidSize): #there's a mismatch in the video - backup the whole event again\n self.add(hid=event['hid'],auto=True)\n else:\n # the video is identical, check data file\n oldDb = self.backupDrive+backupEvent.autoDir+event['datapath']+'/pxp.db'\n newDb = c.wwwroot+event['datapath']+'/pxp.db'\n md5old = hashlib.md5(open(oldDb, 'rb').read()).hexdigest()\n md5new = hashlib.md5(open(newDb, 'rb').read()).hexdigest()\n if(md5old!=md5new): #the database is different - back up the new database\n self.add(hid=event['hid'],dataOnly=True,auto=True)\n else: #this event doesn't exist on the backup drive - back it up\n dbg.prn(dbg.BKP,\"event doesn't exist\", event)\n self.add(hid=event['hid'],auto=True)\n #end if backupDrive\n if(not type(cloudList) is dict):\n # could not get cloud events: either no internet connection, \n # or this device is deactivated, or the customer is deactivated\n # or uploading to cloud is disabled\n continue \n #### cloud backup ####\n # check if this event exists in the cloud\n if(event['hid'] in cloudList): #the event exists in the cloud, verify its checksum and number of segments (that's what's being uploaded)\n # count segments in the local event\n segs = len(glob.glob(c.wwwroot+event['name']+'/video/*.ts'))\n # get md5 checksum of the local .db\n md5local = hashlib.md5(open(c.wwwroot+event['name']+'/pxp.db', 'rb').read()).hexdigest()\n cfg = pxp._cfgGet()\n if(not cfg): \n return False #the encoder is not initalized yet most likely (???) - if got to this point, the encoder HAS TO BE initialized!!\n try:\n # get details about remote event\n response = pu.io.url(c.cloud+\"ajEvent/ajax\",params={\"v0\":cfg[1],\"v4\":cfg[2],\"id\":cloudList[event['hid']]['id']})\n if(not response): #connection error\n dbg.prn(dbg.BKP,\"could not get info about cloud event:\",cloudList[event['hid']]['id'])\n continue\n data = json.loads(response)\n data = data['entries']\n dbg.prn(dbg.BKP,\"NUM SEGS>>>>>>> remote:\",data['segs'],', local:',segs)\n # TODO \n # -add a check to see if each segment's md5 matches\n # -add resume: i.e. only upload segments that are partially or not uploaded to cloud. \n # do not do full re-upload if 1 segment is missing\n if(int(data['segs'])!=segs): #number of segments is different - upload the video file\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']})\n elif(data['md5']!=md5local): #video is the same, meteadata is different - upload just metadata\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']}, dataOnly=True)\n except Exception, e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]autobackup.url:\",response,e,sys.exc_info()[-1].tb_lineno)\n else: #this event doesn't exist in the cloud yet - upload it (video and metedata)\n self.add(hid=event['hid'], priority=2, cloud=True)\n\n #end for event in elist\n except Exception as e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]bkpmgr.autobackup\",e,sys.exc_info()[-1].tb_lineno)", "def test_incremental_backup_corrupt_full(self):\n fname = self.id().split('.')[3]\n node = self.make_simple_node(\n base_dir=\"{0}/{1}/node\".format(module_name, fname),\n initdb_params=['--data-checksums'],\n pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}\n )\n backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')\n self.init_pb(backup_dir)\n self.add_instance(backup_dir, 'node', node)\n self.set_archiving(backup_dir, 'node', node)\n node.start()\n\n backup_id = self.backup_node(backup_dir, 'node', node)\n file = os.path.join(\n backup_dir, \"backups\", \"node\", backup_id,\n \"database\", \"postgresql.conf\")\n os.remove(file)\n\n try:\n self.validate_pb(backup_dir, 'node')\n # we should die here because exception is what we expect to happen\n self.assertEqual(\n 1, 0,\n \"Expecting Error because of validation of corrupted backup.\\n\"\n \" Output: {0} \\n CMD: {1}\".format(\n repr(self.output), self.cmd))\n except ProbackupException as e:\n self.assertTrue(\n \"INFO: Validate backups of the instance 'node'\\n\" in e.message and\n \"WARNING: Backup file \\\"{0}\\\" is not found\\n\".format(\n file) in e.message and\n \"WARNING: Backup {0} data files are corrupted\\n\".format(\n backup_id) in e.message and\n \"WARNING: Some backups are not valid\\n\" in e.message,\n \"\\n Unexpected Error Message: {0}\\n CMD: {1}\".format(\n repr(e.message), self.cmd))\n\n try:\n self.backup_node(backup_dir, 'node', node, backup_type=\"page\")\n # we should die here because exception is what we expect to happen\n self.assertEqual(\n 1, 0,\n \"Expecting Error because page backup should not be possible \"\n \"without valid full backup.\\n Output: {0} \\n CMD: {1}\".format(\n repr(self.output), self.cmd))\n except ProbackupException as e:\n self.assertIn(\n \"ERROR: Valid backup on current timeline is not found. \"\n \"Create new FULL backup before an incremental one.\",\n e.message,\n \"\\n Unexpected Error Message: {0}\\n CMD: {1}\".format(\n repr(e.message), self.cmd))\n\n self.assertEqual(\n self.show_pb(backup_dir, 'node', backup_id)['status'], \"CORRUPT\")\n self.assertEqual(\n self.show_pb(backup_dir, 'node')[1]['status'], \"ERROR\")\n\n # Clean after yourself\n self.del_test_dir(module_name, fname)", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def test_dmp_capabilities_have_backup_options(self):\n # Setup: Create a request context with mocked out send_* methods and set up the capabilities service\n rc = utils.MockRequestContext()\n capabilities_service = CapabilitiesService()\n workspace_service = WorkspaceService()\n capabilities_service._service_provider = utils.get_mock_service_provider({constants.WORKSPACE_SERVICE_NAME: workspace_service})\n\n # If: I request the dmp capabilities of this server\n capabilities_service._handle_dmp_capabilities_request(rc, None)\n\n # Then: The response should include backup capabilities\n rc.send_response.assert_called_once()\n capabilities_result = rc.send_response.mock_calls[0][1][0]\n features = capabilities_result.capabilities.features\n backup_options_list = [feature for feature in features if feature.feature_name == 'backup']\n # There should be exactly one feature containing backup options\n self.assertEqual(len(backup_options_list), 1)\n backup_options = backup_options_list[0]\n # The backup options should be enabled\n self.assertTrue(backup_options.enabled)\n # And the backup options should contain at least 1 option\n self.assertGreater(len(backup_options.options_metadata), 0)", "def check_pcaps():\n print_debug(\"check_pcaps() called\")\n\n # Check of the pcaps to make sure none were submitted with TCP packets but no TCP packets have the SYN flag\n # only call if no alerts fired\n if os.path.getsize(JOB_ALERT_LOG) == 0:\n try:\n if os.path.exists(TCPDUMP_BINARY):\n for pcap in PCAP_FILES:\n # check for TCP packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s -p tcp 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) > 0:\n # check for SYN packets; this only works on IPv4 packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"tcp[tcpflags] & tcp-syn != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n # check IPv6 packets too\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"ip6 and tcp and ip6[0x35] & 0x2 != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n print_error(\"As Dalton says, \\\"pain don\\'t hurt.\\\" But an incomplete pcap sure can.\"\n \"\\n\\n\"\n \"The pcap file \\'%s\\' contains TCP traffic but does not \"\n \"contain any TCP packets with the SYN flag set.\"\n \"\\n\\n\"\n \"Almost all IDS rules that look for TCP traffic require \"\n \"an established connection.\\nYou will need to provide a more complete \"\n \"pcap if you want accurate results.\"\n \"\\n\\n\"\n \"If you need help crafting a pcap, Flowsynth may be able to help --\\n\"\n \"https://github.com/secureworks/flowsynth\"\n \"\\n\\n\"\n \"And, \\\"there's always barber college....\\\"\" % os.path.basename(pcap))\n else:\n print_debug(\"In check_pcaps() -- no tcpdump binary found at %s\" % TCPDUMP_BINARY)\n except Exception as e:\n if not str(e).startswith(\"As Dalton says\"):\n print_debug(\"Error doing TCP SYN check in check_pcaps():\\n%s\" % e)\n\n # check snaplen of pcaps\n try:\n for pcap in PCAP_FILES:\n snaplen_offset = 16\n pcapng = False\n little_endian = False\n snaplen = 65535\n\n # get first 40 bytes of pcap file\n with open(pcap, 'rb') as fh:\n bytes = fh.read(44)\n\n magic = binascii.hexlify(bytes[0:4]).decode('ascii')\n if magic.lower() == '0a0d0d0a':\n # this is pcapng and these aren't the byte-order magic bytes\n snaplen_offset = 40\n pcapng = True\n # get the correct byte-order magic bytes for pcapng\n magic = binascii.hexlify(bytes[8:12]).decode('ascii')\n else:\n # this is libpcap, we have the magic\n pcapng = False\n # now determine endian-ness\n if magic.lower() == 'a1b2c3d4':\n # this is \"big endian\"\n little_endian = False\n elif magic.lower() == '4d3c2b1a' or magic.lower() == 'd4c3b2a1':\n # this is little endian\n little_endian = True\n else:\n print_debug(\"in check_pcaps() - Pcap Byte-Order Magic field not found in file \\'%s\\'. Is this a valid pcap?\" % os.path.basename(pcap))\n continue\n\n # get snaplen\n if little_endian:\n snaplen = struct.unpack('<i', bytes[snaplen_offset:snaplen_offset+4])[0]\n else:\n snaplen = struct.unpack('>i', bytes[snaplen_offset:snaplen_offset+4])[0]\n\n # Python 2.4 doesn't support this so doing it the ugly way\n #print_debug(\"Packet capture file \\'%s\\' is format %s, %s, and has snaplen of %d bytes.\" % (os.path.basename(pcap), ('pcapng' if pcapng else 'libpcap'), ('little endian' if little_endian else 'big endian'), snaplen))\n debug_msg = \"Packet capture file \\'%s\\' is format \" % os.path.basename(pcap)\n if pcapng:\n debug_msg += \"pcapng, \"\n else:\n debug_msg += \"libpcap, \"\n if little_endian:\n debug_msg += \"little endian, and has snaplen of %d bytes.\" % snaplen\n else:\n debug_msg += \"big endian, and has snaplen of %d bytes.\" % snaplen\n print_debug(debug_msg)\n\n if snaplen < 65535:\n print_debug(\"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen))\n\n # validate snaplen\n if snaplen < 1514:\n warning_msg = ''\n if not os.path.getsize(JOB_ERROR_LOG) == 0:\n warning_msg += \"\\n----------------\\n\\n\"\n warning_msg += \"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen)\n if snaplen == 1500:\n warning_msg += \"\\n\\nSome sandboxes (Bluecoat/Norman) will put a hardcoded snaplen of 1500 bytes\\n\"\n warning_msg += \"on pcaps even when the packets are larger than 1500 bytes. This can result in the sensor throwing away these\\n\"\n warning_msg += \"packets and not inspecting them. If this is the case, try saving the file in Wireshark in pcapng format, opening up\\n\"\n warning_msg += \"that pcapng file in Wireshark, and saving it as a libpcap file. This should set the snaplen to 65535.\"\n warning_msg += \"\\n\\nThis is just a warning message about the pcap. The job ran successfully and the generated alerts as well as other\\n\"\n warning_msg += \"results have been returned.\"\n print_error(warning_msg)\n except Exception as e:\n if not str(e).startswith(\"Warning:\"):\n print_debug(\"Error doing snaplen check in check_pcaps(): %s\" % e)", "def test_valid_transaction(self):\n amount = 700\n before_amount_source, before_amount_destination = self.acc_1.available_cash, self.acc_2.available_cash\n self.c.post('/transfer/', {'source-id': self.acc_1.id, 'destination-id': self.acc_2.id, 'amount': amount}, follow=True)\n self.acc_1.refresh_from_db()\n self.acc_2.refresh_from_db()\n self.assertEqual(before_amount_source-amount, self.acc_1.available_cash)\n self.assertEqual(before_amount_destination+amount, self.acc_2.available_cash)\n self.assertTrue(Transaction.objects.first().success)", "def test_invalid_transaction(self):\n amount = 200\n before_amount_source, before_amount_destination = self.acc_2.available_cash, self.acc_1.available_cash\n self.c.post('/transfer/', {'source-id': self.acc_2.id, 'destination-id': self.acc_1.id, 'amount': amount}, follow=True)\n self.acc_1.refresh_from_db()\n self.acc_2.refresh_from_db()\n self.assertEqual(before_amount_source, self.acc_2.available_cash)\n self.assertEqual(before_amount_destination, self.acc_1.available_cash)\n self.assertFalse(Transaction.objects.first().success)", "def test_partial_key_backup_verification_success(self) -> None:\n # Arrange\n mediator = KeyCeremonyMediator(\"mediator_verification\", CEREMONY_DETAILS)\n KeyCeremonyOrchestrator.perform_round_1(self.GUARDIANS, mediator)\n KeyCeremonyOrchestrator.perform_round_2(self.GUARDIANS, mediator)\n\n # Round 3 - Guardians only\n verification1 = self.GUARDIAN_1.verify_election_partial_key_backup(\n GUARDIAN_2_ID,\n )\n verification2 = self.GUARDIAN_2.verify_election_partial_key_backup(\n GUARDIAN_1_ID,\n )\n\n # Act\n mediator.receive_backup_verifications([verification1])\n\n # Assert\n self.assertFalse(mediator.get_verification_state().all_sent)\n self.assertFalse(mediator.all_backups_verified())\n self.assertIsNone(mediator.publish_joint_key())\n\n # Act\n mediator.receive_backup_verifications([verification2])\n joint_key = mediator.publish_joint_key()\n\n # Assert\n self.assertTrue(mediator.get_verification_state().all_sent)\n self.assertTrue(mediator.all_backups_verified())\n self.assertIsNotNone(joint_key)", "def test_backup_bin_list():\n\tbackup_and_restore(\n\t\tput_data,\n\t\tNone,\n\t\tlambda context: check_data(context,\n\t\t\tTrue, True, True,\n\t\t\tTrue, False,\n\t\t\tTrue, True, True,\n\t\t\tTrue),\n\t\tbackup_opts=[\"--bin-list\", BIN_NAME_1],\n\t\trestore_opts=[\"--wait\"],\n\t\trestore_delay=1\n\t)", "def test_validateBlockTxRequest(self):\n # Test request rejected if start time passed\n contents = {'startTime': time.time() - 1.0, 'length': self.nodeParams.config.commConfig['maxTxBlockSize']}\n assert(validateBlockTxRequest(contents, [], self.nodeParams) == False)\n\n # Test request rejected if block too long \n contents = {'startTime': time.time() + 1.0, 'length': self.nodeParams.config.commConfig['maxTxBlockSize'] + 1}\n assert(validateBlockTxRequest(contents, [], self.nodeParams) == False)\n\n # Test for request acceptance \n contents = {'startTime': time.time() + 1.0, 'length': self.nodeParams.config.commConfig['maxTxBlockSize']}\n assert(validateBlockTxRequest(contents, [], self.nodeParams) == True)", "def verify(self):\n for f, i, test in zip(self._final, self._initial, self._flow_tests):\n if test.flow_count is not None:\n print(f)\n print(test)\n TestCase().assertEqual(f.flow_count, test.flow_count)\n TestCase().assertEqual(f.pkts, i.pkts + test.match_num)", "def get_status_is_full_backup(self):\n return self.parsed_status_file['IsFullBackup']", "def test_case01(self):\n self.assertTrue(self.vpnmd.is_alive())\n self.assertTrue(self.tun2socks.is_alive())", "def verify_tx_thread_allocation(self, session, vmname, vnic):\n # for vnic in set(vmUtil.get_vnic_no(session, vmname)):\n _LOGGER.debug(f'Executing command : cat vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx | grep ethernet{vnic}.ctxPerDev -i')\n stdin, stdout, stderr = session.exec_command(f'cat vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx | grep ethernet{vnic}.ctxPerDev -i')\n r = stdout.read().decode()\n # print(f'verify tx :{r}')\n _LOGGER.debug(f'{r}')\n st = re.search('\"(.*?)\"', r)\n if st:\n status = st.group()\n return True if int(status.strip('\"')) == 1 else False\n else:\n # print('nope')\n return False", "def test_match_braintree_state(self):\n SubscriptionStatusManager.match_braintree_state()\n\n # Refresh references\n self.braintree_customer_active = BraintreeUser.objects.get(user=self.user_active)\n self.braintree_customer_pending = BraintreeUser.objects.get(user=self.user_pending)\n self.braintree_customer_past_due = BraintreeUser.objects.get(user=self.user_past_due)\n self.braintree_customer_cancelled = BraintreeUser.objects.get(user=self.user_cancelled)\n self.braintree_customer_expired = BraintreeUser.objects.get(user=self.user_expired)\n\n # Check active subscription - pending cancel state should not have been changed\n self.assertTrue(self.braintree_customer_active.active)\n self.assertTrue(self.braintree_customer_active.pending_cancel)\n self.assertEqual(timezone.make_aware(datetime.datetime(2016,7,24,0,2,0),pytz.utc), self.braintree_customer_active.expiry_date)\n\n # Check pending subscription - again no change to pending cancel\n self.assertTrue(self.braintree_customer_pending.active)\n self.assertFalse(self.braintree_customer_pending.pending_cancel)\n self.assertEqual(timezone.make_aware(datetime.datetime(2016,11,02,0,2,0),pytz.utc), self.braintree_customer_pending.expiry_date)\n\n # Check past due subscription\n self.assertFalse(self.braintree_customer_past_due.active)\n self.assertFalse(self.braintree_customer_past_due.pending_cancel)\n self.assertEqual(timezone.make_aware(datetime.datetime(2016,10,03,0,2,0),pytz.utc), self.braintree_customer_past_due.expiry_date)\n\n # Check cancelled subscription\n self.assertFalse(self.braintree_customer_cancelled.active)\n self.assertFalse(self.braintree_customer_cancelled.pending_cancel)\n self.assertIsNone(self.braintree_customer_cancelled.expiry_date)\n self.assertEqual(\"\", self.braintree_customer_cancelled.subscription_id)\n\n # Check expired subscription\n self.assertFalse(self.braintree_customer_expired.active)\n self.assertFalse(self.braintree_customer_expired.pending_cancel)\n self.assertIsNone(self.braintree_customer_expired.expiry_date)\n self.assertEqual(\"\", self.braintree_customer_expired.subscription_id)", "def check_backup():\n last = last_backup()\n loc = backup_location()\n if not exists(loc):\n makedirs(loc)\n backups = list(scandir(loc))\n if not last or len(backups) == 0:\n return run_backup()\n else:\n now = datetime.now().replace(second=59, microsecond=999999)\n try:\n delta = (now - last).seconds / 3600\n if delta > backup_interval():\n return run_backup()\n except ValueError as err:\n return err" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify receiving and transmitting the packets correctly in the activebackup mode.
def test_active_backup_rx_tx(self): bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") time.sleep(5) slaves = {} slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [] self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)
[ "def verify_active_backup_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 0\n pkt_now = {}\n\n if slaves['active'].__len__() != 0:\n primary_port = slaves['active'][0]\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * active_flag, \"Not correct RX pkt on bond port in mode 1\")\n if active_flag == 1:\n self.verify(pkt_now[primary_port][0] == pkt_count, \"Not correct TX pkt on primary port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on inactive port in mode 1\")\n for slave in [slave for slave in slaves['active'] if slave != primary_port]:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on backup port in mode 1\")", "def verify_active_backup_rx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n slave_num = slaves['active'].__len__()\n if slave_num != 0:\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_slave(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * slave_num, \"Not correct RX pkt on bond port in mode 1\")\n self.verify(pkt_now[unbound_port][0] == pkt_count * active_flag, \"Not correct TX pkt on unbound port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct RX pkt on inactive port in mode 1\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Not correct RX pkt on active port in mode 1\")", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def autobackup(self):\n try:\n #dbg.prn(dbg.BKP,\"autobackup-->\")\n if((int(time.time()) - self.lastActive)<10): #has been idle for less than 10 seconds, wait some more\n dbg.prn(dbg.BKP,\"busy... idle for:\",(int(time.time())-self.lastActive))\n # the machine is busy (there's most likely a live game going on) make sure there are no events being backed up right now\n if(self.current): \n self.stop(stopAll = True)\n return\n # check if there is a live event\n if(enc.busy()): #there is a live event - wait until it's done in order to start the back up\n self.lastActive = int(time.time())\n return\n if(len(self.events)>0): #events are being backed up - wait until that's done to check for new events\n dbg.prn(dbg.BKP,\"already backing up...\",len(self.events))\n return\n #######################################\n # local backup checks\n\n # get the device that has autobackup folder:\n drives = pu.disk.list()\n self.backupDrive = False\n # look for which one to use for back up\n for drive in drives:\n if(not os.path.exists(drive+backupEvent.autoDir)): #this is not the auto-backup for pxp\n continue\n self.backupDrive = drive.decode('UTF-8') #decoding is required for drives that may have odd mount points (e.g. cyrillic letters in the directory name)\n #########################################\n # cloud backup checks\n \n # get all cloud events\n settings = pu.disk.cfgGet(section=\"uploads\")\n if('autoupload' in settings and int(settings['autoupload'])): #automatic cloud upload is enabled\n cloudList = self.getCloudEvents()\n dbg.prn(dbg.BKP, \"cloud events:\",cloudList)\n else:\n dbg.prn(dbg.BKP, \"cloud upload disabled\")\n cloudList = False #automatic upload is disabled\n if(not self.backupDrive): \n return #did not find an auto-backup device - nothing to do if not backing up locally or to cloud\n\n # get all events in the system\n elist = pxp._listEvents(showDeleted=False)\n # go through all events that exist and verify that they're identical on the backup device\n for event in elist:\n if(not('datapath' in event)):\n continue #this event does not have a folder with video/tags - nothing to do here\n #### local backup ####\n if(self.backupDrive):\n # see if this event exists on the backup device\n if(os.path.exists(self.backupDrive+backupEvent.autoDir+event['datapath'])):\n # the event was already backed up\n # check for differences in video (simple size check - less io operations)\n vidSize = pu.disk.dirSize(c.wwwroot+event['datapath']+'/video')\n bkpSize = pu.disk.dirSize(self.backupDrive+backupEvent.autoDir+event['datapath']+'/video')\n if(bkpSize!=vidSize): #there's a mismatch in the video - backup the whole event again\n self.add(hid=event['hid'],auto=True)\n else:\n # the video is identical, check data file\n oldDb = self.backupDrive+backupEvent.autoDir+event['datapath']+'/pxp.db'\n newDb = c.wwwroot+event['datapath']+'/pxp.db'\n md5old = hashlib.md5(open(oldDb, 'rb').read()).hexdigest()\n md5new = hashlib.md5(open(newDb, 'rb').read()).hexdigest()\n if(md5old!=md5new): #the database is different - back up the new database\n self.add(hid=event['hid'],dataOnly=True,auto=True)\n else: #this event doesn't exist on the backup drive - back it up\n dbg.prn(dbg.BKP,\"event doesn't exist\", event)\n self.add(hid=event['hid'],auto=True)\n #end if backupDrive\n if(not type(cloudList) is dict):\n # could not get cloud events: either no internet connection, \n # or this device is deactivated, or the customer is deactivated\n # or uploading to cloud is disabled\n continue \n #### cloud backup ####\n # check if this event exists in the cloud\n if(event['hid'] in cloudList): #the event exists in the cloud, verify its checksum and number of segments (that's what's being uploaded)\n # count segments in the local event\n segs = len(glob.glob(c.wwwroot+event['name']+'/video/*.ts'))\n # get md5 checksum of the local .db\n md5local = hashlib.md5(open(c.wwwroot+event['name']+'/pxp.db', 'rb').read()).hexdigest()\n cfg = pxp._cfgGet()\n if(not cfg): \n return False #the encoder is not initalized yet most likely (???) - if got to this point, the encoder HAS TO BE initialized!!\n try:\n # get details about remote event\n response = pu.io.url(c.cloud+\"ajEvent/ajax\",params={\"v0\":cfg[1],\"v4\":cfg[2],\"id\":cloudList[event['hid']]['id']})\n if(not response): #connection error\n dbg.prn(dbg.BKP,\"could not get info about cloud event:\",cloudList[event['hid']]['id'])\n continue\n data = json.loads(response)\n data = data['entries']\n dbg.prn(dbg.BKP,\"NUM SEGS>>>>>>> remote:\",data['segs'],', local:',segs)\n # TODO \n # -add a check to see if each segment's md5 matches\n # -add resume: i.e. only upload segments that are partially or not uploaded to cloud. \n # do not do full re-upload if 1 segment is missing\n if(int(data['segs'])!=segs): #number of segments is different - upload the video file\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']})\n elif(data['md5']!=md5local): #video is the same, meteadata is different - upload just metadata\n self.add(hid=event['hid'], priority=2, cloud=True, remoteParams={'id':cloudList[event['hid']]['id']}, dataOnly=True)\n except Exception, e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]autobackup.url:\",response,e,sys.exc_info()[-1].tb_lineno)\n else: #this event doesn't exist in the cloud yet - upload it (video and metedata)\n self.add(hid=event['hid'], priority=2, cloud=True)\n\n #end for event in elist\n except Exception as e:\n dbg.prn(dbg.ERR|dbg.BKP,\"[---]bkpmgr.autobackup\",e,sys.exc_info()[-1].tb_lineno)", "def is_alive(dev):\n CRASH_RETRIES = 50\n ETH_P_ALL = 3\n\n def isresp(pkt):\n \"\"\"\n Probe Request: wlan.fc.type_subtype == 0x0004\n Probe Response: wlan.fc.type_subtype == 0x0005\n Authentication frame: wlan.fc.type_subtype == 0x000b\n Association Request: wlan.fc.type_subtype == 0x0000\n Association Response: wlan.fc.type_subtype == 0x0001\n\n Beacon: wlan.fc.type_subtype == 0x0008\n\n :param pkt:\n :return:\n \"\"\"\n header_length = struct.unpack('h', pkt[2:4])[0]\n\n hexdump(pkt)\n\n pkt = pkt[header_length:]\n\n resp = False\n check_frame_type(pkt)\n\n if (len(pkt) >= 30 and pkt[0:1] == b\"\\xb0\"\n and pkt[4:10] == frames.mac2str(AP_MAC)\n and pkt[28:30] == b\"\\x00\\x00\"):\n logging.info(f'received auth response from: {AP_MAC}')\n hexdump(pkt)\n resp = True\n return resp\n\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_ALL))\n s.bind((dev, ETH_P_ALL))\n\n logging.info(\"checking aliveness of fuzzed access point {}\".format(AP_MAC))\n\n retries = CRASH_RETRIES\n alive = False\n\n hexdump(AUTH_REQ_SAE)\n\n while retries:\n s.send(AUTH_REQ_SAE)\n logging.info('sent AUTH_REQ_SAE')\n\n start_time = time.time()\n while (time.time() - start_time) < 1:\n ans = s.recv(1024)\n alive = isresp(ans)\n if alive:\n s.send(DEAUTH)\n s.close()\n if retries != CRASH_RETRIES:\n logging.info(\"retried authentication {} times\".format(CRASH_RETRIES - retries))\n return alive\n\n retries -= 1\n\n s.close()\n\n return alive", "def Check_Communications(self):\n self.comm_status = False\n (stdoutdata, stderrdata) = self.runcmd([self.edtsaodir+\"/fclr\"])\n if stdoutdata.split()[1] == 'done' and stderrdata == '':\n self.comm_status = True\n self.bss_relay_status = False\n self.relay.openPhidget(403840) # Serial number 403840 is the Vbb control Phidgets relay\n self.relay.waitForAttach(10000)\n if (self.relay.isAttached() and self.relay.getSerialNum() == 403840):\n self.bss_relay_status = True\n self.relay.closePhidget()\n return", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def check_available(self):\n self._available = None\n self.last_check_time = datetime.datetime.now()\n if self.device_up:\n self.check_local_ip()\n if self.gateway and not self.check_gateway():\n self.status = 'Gateway {} not reachable'.format(self.gateway)\n logger.critical('Gateway {} not reachable'.format(self.gateway))\n ping_ip = self.target_ip\n if ping_ip:\n self.check_test_route()\n (returncode,output) = run('/bin/ping -q -n -c{ping_count:n} -W{timeout:n} -i{ping_interval} -I{device} {target_ip}'.format(\n ping_count = self.ping_count,\n timeout = self.timeout,\n device = self.device,\n target_ip = ping_ip,\n ping_interval=self.ping_interval,\n ))\n if returncode == 0:\n report = REPORT.search(output)\n rtt = RTT.search(output)\n if report:\n self.last_loss = int(report.groupdict()['loss'])\n else:\n self.last_loss = None\n if rtt:\n self.last_rtt = float(rtt.groupdict()['avg'])\n else:\n self.last_rtt = None\n\n self._available = report and rtt and\\\n self.last_loss<=self.max_loss and\\\n self.last_rtt<=self.max_rtt\n if self._available:\n self.status='OK'\n elif self.last_loss>self.max_loss:\n self.status='Too much loss {}%'.format(self.last_loss)\n elif self.last_rtt>self.max_rtt:\n self.status='Too long RTT {}ms'.format(self.last_rtt)\n else:\n self.status = 'ping test failed : {}'.format(output)\n else:\n self._available = True\n else:\n self.status = 'Device {} is down or link state is unknown'.format(self.device)\n self._available = False\n\n self.update_leds()\n return self._available", "def check_connection(self):\n # Send the client an echo signal to ask it to repeat back\n self.send(\"E\", \"z\");\n # Check if \"e\" gets sent back\n if (self.recv(2, \"e\") != \"z\"):\n # If the client didn't confirm, the connection might be lost\n self.__connection_lost();", "def check_pcaps():\n print_debug(\"check_pcaps() called\")\n\n # Check of the pcaps to make sure none were submitted with TCP packets but no TCP packets have the SYN flag\n # only call if no alerts fired\n if os.path.getsize(JOB_ALERT_LOG) == 0:\n try:\n if os.path.exists(TCPDUMP_BINARY):\n for pcap in PCAP_FILES:\n # check for TCP packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s -p tcp 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) > 0:\n # check for SYN packets; this only works on IPv4 packets\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"tcp[tcpflags] & tcp-syn != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n # check IPv6 packets too\n if len(subprocess.Popen(\"%s -nn -q -c 1 -r %s \\\"ip6 and tcp and ip6[0x35] & 0x2 != 0\\\" 2>/dev/null\" % (TCPDUMP_BINARY, pcap), shell=True, stdout=subprocess.PIPE).stdout.read()) == 0:\n print_error(\"As Dalton says, \\\"pain don\\'t hurt.\\\" But an incomplete pcap sure can.\"\n \"\\n\\n\"\n \"The pcap file \\'%s\\' contains TCP traffic but does not \"\n \"contain any TCP packets with the SYN flag set.\"\n \"\\n\\n\"\n \"Almost all IDS rules that look for TCP traffic require \"\n \"an established connection.\\nYou will need to provide a more complete \"\n \"pcap if you want accurate results.\"\n \"\\n\\n\"\n \"If you need help crafting a pcap, Flowsynth may be able to help --\\n\"\n \"https://github.com/secureworks/flowsynth\"\n \"\\n\\n\"\n \"And, \\\"there's always barber college....\\\"\" % os.path.basename(pcap))\n else:\n print_debug(\"In check_pcaps() -- no tcpdump binary found at %s\" % TCPDUMP_BINARY)\n except Exception as e:\n if not str(e).startswith(\"As Dalton says\"):\n print_debug(\"Error doing TCP SYN check in check_pcaps():\\n%s\" % e)\n\n # check snaplen of pcaps\n try:\n for pcap in PCAP_FILES:\n snaplen_offset = 16\n pcapng = False\n little_endian = False\n snaplen = 65535\n\n # get first 40 bytes of pcap file\n with open(pcap, 'rb') as fh:\n bytes = fh.read(44)\n\n magic = binascii.hexlify(bytes[0:4]).decode('ascii')\n if magic.lower() == '0a0d0d0a':\n # this is pcapng and these aren't the byte-order magic bytes\n snaplen_offset = 40\n pcapng = True\n # get the correct byte-order magic bytes for pcapng\n magic = binascii.hexlify(bytes[8:12]).decode('ascii')\n else:\n # this is libpcap, we have the magic\n pcapng = False\n # now determine endian-ness\n if magic.lower() == 'a1b2c3d4':\n # this is \"big endian\"\n little_endian = False\n elif magic.lower() == '4d3c2b1a' or magic.lower() == 'd4c3b2a1':\n # this is little endian\n little_endian = True\n else:\n print_debug(\"in check_pcaps() - Pcap Byte-Order Magic field not found in file \\'%s\\'. Is this a valid pcap?\" % os.path.basename(pcap))\n continue\n\n # get snaplen\n if little_endian:\n snaplen = struct.unpack('<i', bytes[snaplen_offset:snaplen_offset+4])[0]\n else:\n snaplen = struct.unpack('>i', bytes[snaplen_offset:snaplen_offset+4])[0]\n\n # Python 2.4 doesn't support this so doing it the ugly way\n #print_debug(\"Packet capture file \\'%s\\' is format %s, %s, and has snaplen of %d bytes.\" % (os.path.basename(pcap), ('pcapng' if pcapng else 'libpcap'), ('little endian' if little_endian else 'big endian'), snaplen))\n debug_msg = \"Packet capture file \\'%s\\' is format \" % os.path.basename(pcap)\n if pcapng:\n debug_msg += \"pcapng, \"\n else:\n debug_msg += \"libpcap, \"\n if little_endian:\n debug_msg += \"little endian, and has snaplen of %d bytes.\" % snaplen\n else:\n debug_msg += \"big endian, and has snaplen of %d bytes.\" % snaplen\n print_debug(debug_msg)\n\n if snaplen < 65535:\n print_debug(\"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen))\n\n # validate snaplen\n if snaplen < 1514:\n warning_msg = ''\n if not os.path.getsize(JOB_ERROR_LOG) == 0:\n warning_msg += \"\\n----------------\\n\\n\"\n warning_msg += \"Warning: \\'%s\\' was captured using a snaplen of %d bytes. This may mean you have truncated packets.\" % (os.path.basename(pcap), snaplen)\n if snaplen == 1500:\n warning_msg += \"\\n\\nSome sandboxes (Bluecoat/Norman) will put a hardcoded snaplen of 1500 bytes\\n\"\n warning_msg += \"on pcaps even when the packets are larger than 1500 bytes. This can result in the sensor throwing away these\\n\"\n warning_msg += \"packets and not inspecting them. If this is the case, try saving the file in Wireshark in pcapng format, opening up\\n\"\n warning_msg += \"that pcapng file in Wireshark, and saving it as a libpcap file. This should set the snaplen to 65535.\"\n warning_msg += \"\\n\\nThis is just a warning message about the pcap. The job ran successfully and the generated alerts as well as other\\n\"\n warning_msg += \"results have been returned.\"\n print_error(warning_msg)\n except Exception as e:\n if not str(e).startswith(\"Warning:\"):\n print_debug(\"Error doing snaplen check in check_pcaps(): %s\" % e)", "def test_dmp_capabilities_have_backup_options(self):\n # Setup: Create a request context with mocked out send_* methods and set up the capabilities service\n rc = utils.MockRequestContext()\n capabilities_service = CapabilitiesService()\n workspace_service = WorkspaceService()\n capabilities_service._service_provider = utils.get_mock_service_provider({constants.WORKSPACE_SERVICE_NAME: workspace_service})\n\n # If: I request the dmp capabilities of this server\n capabilities_service._handle_dmp_capabilities_request(rc, None)\n\n # Then: The response should include backup capabilities\n rc.send_response.assert_called_once()\n capabilities_result = rc.send_response.mock_calls[0][1][0]\n features = capabilities_result.capabilities.features\n backup_options_list = [feature for feature in features if feature.feature_name == 'backup']\n # There should be exactly one feature containing backup options\n self.assertEqual(len(backup_options_list), 1)\n backup_options = backup_options_list[0]\n # The backup options should be enabled\n self.assertTrue(backup_options.enabled)\n # And the backup options should contain at least 1 option\n self.assertGreater(len(backup_options.options_metadata), 0)", "def Check_Communications(self):\n self.comm_status = False\n try:\n self.ser.close()\n self.ser.open()\n if self.ser.isOpen():\n self.ser.flushInput()\n self.ser.write('SYS:ERR?\\r\\n')\n time.sleep(0.1)\n status = int(self.ser.readline().split()[0])\n if status == 0:\n self.comm_status = True\n return\n else:\n self.ser.close()\n return\n except Exception as e:\n print \"No communication to BK Precision Back-Bias supply. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.ser.close()\n return", "def check_for_received_packet(self):\n return self._wrapper.check_for_received_packet()", "def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt):\n packets = ptfadapter.dataplane.packet_queues[(0, port)]\n for packet in packets:\n if six.PY2:\n if exp_pkt.pkt_match(packet):\n return True\n else:\n if exp_pkt.pkt_match(packet[0]):\n return True\n return False", "def test_pass_through():\n (board_id, _) = complete_bamr_board_transfer_to_sut()\n (recv_board_id, _) = complete_board_transfer_from_sut()\n\n assert board_id == recv_board_id, \"Board ID mismatch\"", "def test_case01(self):\n self.assertTrue(self.vpnmd.is_alive())\n self.assertTrue(self.tun2socks.is_alive())", "def runTest(self):\n try:\n print(\"Lag l3 load balancing test based on protocol\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n max_itrs = 99\n rcv_count = [0, 0]\n for i in range(0, max_itrs):\n if i % 2 == 0:\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n else:\n print(\"icmp\")\n pkt = simple_icmp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_icmp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n print('des_src={}, rcv_port={}'.format(\n self.servers[1][1].ipv4, rcv_idx))\n rcv_count[rcv_idx] += 1\n\n print(rcv_count)\n for i in range(0, 2):\n self.assertTrue(\n (rcv_count[i] >= ((max_itrs/2) * 0.8)), \"Not all paths are equally balanced\")\n finally:\n pass", "def testTCPFramerTransactionFull(self):\n msg = b\"\\x00\\x01\\x12\\x34\\x00\\x04\\xff\\x02\\x12\\x34\"\n self._tcp.addToFrame(msg)\n self.assertTrue(self._tcp.checkFrame())\n result = self._tcp.getFrame()\n self.assertEqual(msg[7:], result)\n self._tcp.advanceFrame()", "def should_write_mpack_information(self):\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that receiving and transmitting that packets correctly in the activebackup mode, when bringing all slaves of the bonding device link down.
def test_active_backup_all_slaves_down(self): bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") try: slaves = {} slaves['active'] = [] slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves) self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up")
[ "def test_active_backup_rx_tx(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n time.sleep(5)\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_active_backup_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 0\n pkt_now = {}\n\n if slaves['active'].__len__() != 0:\n primary_port = slaves['active'][0]\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * active_flag, \"Not correct RX pkt on bond port in mode 1\")\n if active_flag == 1:\n self.verify(pkt_now[primary_port][0] == pkt_count, \"Not correct TX pkt on primary port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on inactive port in mode 1\")\n for slave in [slave for slave in slaves['active'] if slave != primary_port]:\n self.verify(pkt_now[slave][0] == 0, \"Not correct TX pkt on backup port in mode 1\")", "def verify_active_backup_rx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n slave_num = slaves['active'].__len__()\n if slave_num != 0:\n active_flag = 1\n else:\n active_flag = 0\n\n pkt_now, summary = self.send_default_packet_to_slave(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n self.verify(pkt_now[bond_port][0] == pkt_count * slave_num, \"Not correct RX pkt on bond port in mode 1\")\n self.verify(pkt_now[unbound_port][0] == pkt_count * active_flag, \"Not correct TX pkt on unbound port in mode 1\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Not correct RX pkt on inactive port in mode 1\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Not correct RX pkt on active port in mode 1\")", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def identify_failures():\n global heartbeat_slaves\n while True:\n for slave_ip in heartbeat_slaves.keys():\n if heartbeat_slaves[slave_ip] != -1 and heartbeat_slaves[slave_ip] < time.time():\n print \"%s failed. Expected at time %s but current time is %s\" % (slave_ip, heartbeat_slaves[slave_ip], time.time())\n if heartbeat_slaves[slave_ip] != -1 and heartbeat_slaves[slave_ip] + 30 < time.time(): # 30 second grace period for testing\n heartbeat_lock.acquire()\n slave_ips.remove(slave_ip)\n del heartbeat_slaves[slave_ip]\n print \"Deleted %s backup\" % (slave_ip)\n heartbeat_lock.release()\n time.sleep(1)", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_round_robin_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_autostate_disabled(self, duthosts, enum_frontend_dut_hostname):\n\n duthost = duthosts[enum_frontend_dut_hostname]\n dut_hostname = duthost.hostname\n\n # Collect DUT configuration and status\n vlan_members_facts = duthost.get_running_config_facts().get('VLAN_MEMBER')\n if vlan_members_facts is None:\n pytest.skip('No vlan available on DUT {hostname}'.format(hostname=dut_hostname))\n ifs_status = duthost.get_interfaces_status()\n ip_ifs = duthost.show_ip_interface()['ansible_facts']['ip_interfaces']\n\n # Find out all vlans which meet the following requirements:\n # 1. The oper_state of vlan interface is 'up'\n # 2. The oper_state of at least one member in the vlan is 'up'\n vlan_available = []\n for vlan in vlan_members_facts:\n if ip_ifs.get(vlan, {}).get('oper_state') == 'up':\n for member in vlan_members_facts[vlan]:\n if ifs_status.get(member, {}).get('oper') == 'up':\n vlan_available.append(vlan)\n break\n if len(vlan_available) == 0:\n pytest.skip('No applicable VLAN available on DUT {hostname} for this test case'.\n format(hostname=dut_hostname))\n\n # Pick a vlan for test\n vlan = vlan_available[0]\n vlan_members = list(vlan_members_facts[vlan].keys())\n\n try:\n # Shutdown all the members in vlan.\n self.shutdown_multiple_with_confirm(duthost, vlan_members, err_handler=pytest.fail)\n\n # Check whether the oper_state of vlan interface is changed as expected.\n ip_ifs = duthost.show_ip_interface()['ansible_facts']['ip_interfaces']\n if len(vlan_available) > 1:\n # If more than one vlan comply with the above test requirements, then there are members in other vlans\n # that are still up. Therefore, the bridge is still up, and vlan interface should be up.\n pytest_assert(ip_ifs.get(vlan, {}).get('oper_state') == \"up\",\n 'vlan interface of {vlan} is not up as expected'.format(vlan=vlan))\n else:\n # If only one vlan comply with the above test requirements, then all the vlan members across all the\n # vlans are down. Therefore, the bridge is down, and vlan interface should be down.\n pytest_assert(ip_ifs.get(vlan, {}).get('oper_state') == \"down\",\n 'vlan interface of {vlan} is not down as expected'.format(vlan=vlan))\n finally:\n # Restore all interfaces to their original admin_state.\n self.restore_interface_admin_state(duthost, ifs_status)", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def test_round_robin_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n stat = self.tester.get_port_status(self.tester.get_local_port(self.dut_ports[0]))\n self.dut.send_expect(\"show bonding config %d\" % bond_port, \"testpmd> \")\n self.dut.send_expect(\"show port info all\", \"testpmd> \")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_peer_link_status_change(self, duthost1, duthost2, ptfadapter, ptfhost, collect,\n get_routes, mclag_intf_num, pre_setup_peerlink):\n dst_route1 = ipaddress.IPv4Interface(get_routes[duthost1.hostname][2])\n dst_route2 = ipaddress.IPv4Interface(get_routes[duthost2.hostname][2])\n active_mclag_interfaces = sorted(collect[duthost1.hostname]['ptf_map'].values())[:mclag_intf_num]\n standby_mclag_interfaces = sorted(collect[duthost2.hostname]['ptf_map'].values())[:mclag_intf_num]\n indx = 0\n\n # Check MCLAG status is OK\n check_keepalive_link(duthost1, duthost2, 'OK')\n # Check mclag interfaces on standby have same MAC as active device\n for lag in collect[duthost1.hostname]['mclag_interfaces']:\n dut1_sys_id = duthost1.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n dut2_sys_id = duthost2.shell(\n \"teamdctl {} state item get team_device.ifinfo.dev_addr\".format(lag))['stdout']\n pytest_assert(dut1_sys_id == dut2_sys_id,\n \"Mclag standby device {} system ID shoule be same as active device, but is {}\"\n .format(lag, dut2_sys_id))\n\n # To be able to predict trough which DUT traffic will traverse,\n # use PortChannel member as source port, not PortChannel\n for mclag_intf1, mclag_intf2 in zip(active_mclag_interfaces, standby_mclag_interfaces):\n indx += 1\n dst_ip1 = str(str(dst_route1.ip + indx))\n dst_ip2 = str(str(dst_route2.ip + indx))\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf1,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip2, duthost1.facts[\"router_mac\"], get_routes, collect)\n generate_and_verify_traffic(duthost1, duthost2, ptfadapter, ptfhost, mclag_intf2,\n dst_ip1, duthost1.facts[\"router_mac\"], get_routes, collect, pkt_action='DROP')", "def test_dmp_capabilities_have_backup_options(self):\n # Setup: Create a request context with mocked out send_* methods and set up the capabilities service\n rc = utils.MockRequestContext()\n capabilities_service = CapabilitiesService()\n workspace_service = WorkspaceService()\n capabilities_service._service_provider = utils.get_mock_service_provider({constants.WORKSPACE_SERVICE_NAME: workspace_service})\n\n # If: I request the dmp capabilities of this server\n capabilities_service._handle_dmp_capabilities_request(rc, None)\n\n # Then: The response should include backup capabilities\n rc.send_response.assert_called_once()\n capabilities_result = rc.send_response.mock_calls[0][1][0]\n features = capabilities_result.capabilities.features\n backup_options_list = [feature for feature in features if feature.feature_name == 'backup']\n # There should be exactly one feature containing backup options\n self.assertEqual(len(backup_options_list), 1)\n backup_options = backup_options_list[0]\n # The backup options should be enabled\n self.assertTrue(backup_options.enabled)\n # And the backup options should contain at least 1 option\n self.assertGreater(len(backup_options.options_metadata), 0)", "def test_failover(self):\n pass", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def test_slave_down(self):\n self._slave_1.refresh_role.side_effect = DbError\n status = self._cluster.update_cluster()\n assert_equals(status, {\n 'master_down': False,\n 'master_up': False,\n 'slaves_down': [self._slave_1],\n 'slaves_up': [],\n 'out': []\n })\n assert_items_equal([self._slave_2], self._cluster.slaves)\n assert_items_equal([self._lost_1, self._lost_2, self._slave_1],\n self._cluster.lost)\n assert_equals(self._master, self._cluster.master)", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def test_slave_out_of_sync(self):\n self._slave_1.timestamp = 900\n status = self._cluster.update_cluster()\n assert_equals(status, {\n 'master_down': False,\n 'master_up': False,\n 'slaves_down': [self._slave_1],\n 'slaves_up': [],\n 'out': []\n })\n assert_items_equal([self._slave_2], self._cluster.slaves)\n assert_items_equal([self._lost_1, self._lost_2, self._slave_1],\n self._cluster.lost)\n assert_equals(self._master, self._cluster.master)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate the MAC type from the string into the int.
def translate_mac_str_into_int(self, mac_str): mac_hex = '0x' for mac_part in mac_str.split(':'): mac_hex += mac_part return int(mac_hex, 16)
[ "def _convert_char_to_type(type_char):\n # type: (Any) -> TypeCode\n typecode = type_char\n if not isinstance(type_char, int):\n typecode = ord(type_char)\n\n try:\n return TypeCode(typecode)\n except ValueError:\n raise RuntimeError(\n \"Typecode {0} ({1}) isn't supported.\".format(\n type_char, typecode\n )\n )", "def str2type(type, string):\n if type in {'B', 'b', 'H', 'h', 'l', 'L'}:\n return int(string, 0)\n elif type in {'f', 'd'}:\n return float(string)\n raise ValueError('Type is not in accepted types. ')", "def str_to_protocol(prot):\n # if int was original string, return int value\n try: return int(prot)\n except Exception as e: pass\n p = {\n \"icmp\": 1,\n \"igmp\": 2,\n \"tcp\": 6,\n \"udp\": 17,\n \"gre\": 47,\n \"ah\": 51,\n \"icmp6\": 58, \"icmpv6\": 58,\n \"eigrp\": 88,\n \"ospf\": 89, \"ospfigp\": 89,\n \"pim\": 103\n }\n return p.get(prot.lower(), 0)", "def map_edit_type_str_to_int(type_str):\n if not isinstance(type_str, str):\n return None\n\n # use anime as the default value\n mapped_int = EditType.subject.value\n\n try:\n mapped_int = EditType[type_str].value\n except KeyError as keyError:\n logger.warning(keyError)\n logger.warning(\n 'Malformed edit type received: %s, assigning the default value: subject',\n type_str)\n return mapped_int", "def get_oui(mac):\n mac = mac.upper()\n for c in REPLACE_CHARS:\n mac = mac.replace(c,'')\n mac = mac[:6]\n try:\n return oui_table[mac]\n except KeyError:\n print mac\n return 'unknown OUI'", "def _translate_type(type_name):\n if not isinstance(type_name, str):\n raise Exception('Type name must be a string')\n type_name = _sanitize_identifier(type_name)\n\n return _ASN1_BUILTIN_TYPES.get(type_name, type_name)", "def decode_vote_code(cls, code):\n assert(len(code) == 7)\n code_checked = code.lower()\n int25s = [cls.base25.index(c) for c in code]\n codable_int = 0\n for i in range(len(code)):\n codable_int = codable_int + (25**i)*int25s[i]\n return codable_int", "def translate_ip_str_into_int(self, ip_str):\n ip_part_list = ip_str.split('.')\n ip_part_list.reverse()\n num = 0\n ip_int = 0\n for ip_part in ip_part_list:\n ip_part_int = int(ip_part) << (num * 8)\n ip_int += ip_part_int\n num += 1\n return ip_int", "def etherType(data: list):\n HexEType = \"\".join(data)\n strType = \"INCONNU\"\n estIPV4 = False\n if HexEType.lower() == \"0800\":\n strType = \"IPV4\"\n estIPV4 = True\n elif HexEType.lower() == \"0806\":\n strType = \"ARP REQUEST/RESPONSE\"\n elif HexEType.lower() == \"86dd\":\n strType = \"IPV6\"\n\n return f\"Type Ethernet :\\t\\t{strType} (0x{HexEType})\", estIPV4", "def int_atom(atom: str) -> int:\n\n atom = atom.capitalize().strip()\n return NAMES_ELEMENT[atom]", "def toTypeInt(cls, o):\n\n tint = None\n try:\n tint = cls.TYPE_INTEGERS[o]\n except KeyError:\n for m in (cls.TYPE_MAP, cls.TYPECODE_MAP, cls.TYPE_INTEGERS, cls.TYPENAME_MAP):\n try:\n tint = m[o]\n if isinstance(tint, str):\n tint = cls.TYPE_INTEGERS[tint]\n break\n except KeyError:\n pass\n\n return tint", "def _to_int( self, str ):\n tmp = 1\n try:\n tmp = int( str)\n except ValueError:\n pass\n\n return tmp", "def get_typeid(typeinfo):\n return int.from_bytes(hashlib.md5(typeinfo.encode(\"ascii\")).digest()[:8], \"little\")", "def get_device_type_id(name):\n devices = {'ATSHA204A': 0,\n 'ATECC108A': 1, \n 'ATECC508A': 2,\n 'ATECC608A': 3,\n 'UNKNOWN': 0x20 }\n return devices.get(name.upper())", "def getMWTTypeFromStr(type):\n if type:\n if type.lower() == 'vpc':\n return TransitionType.MARK_AS_VPC\n if type.lower() == 'ireflv':\n return TransitionType.MARK_AS_IREFLV\n if type.lower() == 'lvc':\n return TransitionType.MARK_AS_LVC\n if type.lower() == 'id':\n return TransitionType.MARK_AS_ID\n return TransitionType.MARK_AS_OTH", "def getint(str, default=None):\r\n if str == '':\r\n if default==None:\r\n raise ValueError(\"None type object is unexpected\")\r\n else:\r\n return default\r\n else:\r\n try:\r\n val = int(str)\r\n return val\r\n except (ValueError, TypeError):\r\n try:\r\n val = int(str, 16)\r\n return val\r\n except:\r\n raise", "def decode(self, string):\n return 2**(len(string)) - 1 + int(string, 2)", "def parse_script_type(t):\n\n if t == 'pay-to-multi-pubkey-hash':\n r = \"P2MS\"\n elif t == 'pay-to-pubkey':\n r = \"P2PK\"\n elif t == 'pay-to-pubkey-hash':\n r = \"P2PKH\"\n elif t == 'pay-to-script-hash':\n r = \"P2PSH\"\n else:\n r = \"unknown\"\n\n return r", "def convert_wire_category(s):\n return WireCategory[first_upper(str(s))]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the hash value with the source and destination MAC.
def mac_hash(self, dest_mac, src_mac): dest_port_mac = self.translate_mac_str_into_int(dest_mac) src_port_mac = self.translate_mac_str_into_int(src_mac) src_xor_dest = dest_port_mac ^ src_port_mac xor_value_1 = src_xor_dest >> 32 xor_value_2 = (src_xor_dest >> 16) ^ (xor_value_1 << 16) xor_value_3 = src_xor_dest ^ (xor_value_1 << 32) ^ (xor_value_2 << 16) return htons(xor_value_1 ^ xor_value_2 ^ xor_value_3)
[ "def udp_hash(self, dest_port, src_port):\n return htons(dest_port ^ src_port)", "def ipv4_hash(self, dest_ip, src_ip):\n dest_ip_int = self.translate_ip_str_into_int(dest_ip)\n src_ip_int = self.translate_ip_str_into_int(src_ip)\n return htonl(dest_ip_int ^ src_ip_int)", "def generate_hash(ip_address, listening_port, public_key):\n finger_type_test(ip_address, listening_port, public_key)\n concated = \"%s%d%s\" % (ip_address, listening_port, public_key)\n ash = sha256(concated)\n return ash.hexdigest()[:4]", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def __generatemac(self):\n mac = virtinst.util.randomMAC()\n\n return mac", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def hashGeneretor(inputString):\n\treturn hashlib.sha256(inputString.encode('utf-8')).hexdigest()", "def stable_hash(self, source, digits=9):\r\n\r\n return int(sha1(source.encode()).hexdigest(), 16) % (10 ** digits)", "def copy(self) -> HashFunction:", "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def generate_mac_address(self):\n address = self._call(\"generateMACAddress\")\n return address", "def hash(self):\n\n return hash_barcode(self.data)", "def hash(self):\n return self.wh", "def header_hash(self): \n return hashlib.sha256((str(self.index) + str(self.timestamp) + str(self.tx) + str(self.previous_block)).encode('utf-8')).hexdigest()", "def hash(cls, host, port):\n return str(cls.compile(host, port))", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def _generate_hash(cls, recipient_id, email_subject, email_body):\n hash_value = utils.convert_to_hash(\n recipient_id + email_subject + email_body,\n 100)\n\n return hash_value", "def _compute_mac(self):\n\n if self._tag:\n return self._tag\n\n # Step 5 in NIST SP 800-38D, Algorithm 4 - Compute S\n self._pad_cache_and_update()\n self._update(long_to_bytes(8 * self._auth_len, 8))\n self._update(long_to_bytes(8 * self._msg_len, 8))\n s_tag = self._signer.digest()\n\n # Step 6 - Compute T\n self._tag = self._tag_cipher.encrypt(s_tag)[:self._mac_len]\n\n return self._tag", "def get_hash(self):\n return hashlib.md5(next(iter(self.get_clusters())).encode('utf-8') + '-'.join(sorted(host.host_id for host in set(self.hosts))).encode('utf-8')).hexdigest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate the IP type from the string into the int.
def translate_ip_str_into_int(self, ip_str): ip_part_list = ip_str.split('.') ip_part_list.reverse() num = 0 ip_int = 0 for ip_part in ip_part_list: ip_part_int = int(ip_part) << (num * 8) ip_int += ip_part_int num += 1 return ip_int
[ "def TypeCheck(ipType, number):\r\n transported_type = ipType # tcp, upd, icmp, icmpv6 or numbers\r\n target_dict = {22: \"SSH\", 25: \"SMTP\", 53: \"DNS\", 80: \"HTTP\", 110: \"POP3\", 143: \"IMAP\", 389: \"LDAP\",\r\n 443: \"HTTPS\", 445: \"SMB\", 465: \"SMTPS\", 993: \"IMAPS\", 995: \"POP3S\", 1433: \"MSSQL\", 2049: \"NFS\",\r\n 3306: \"MySQL/Aurora\", 3389: \"RDP\", 5439: \"Redshift\", 5432: \"PostgreSQL\", 1521: \"Oracle-RDS\",\r\n 5985: \"WirnRM-HTTP\", 5986: \"WinRM-HTTPS\", 2007: \"Elastic-Graphics\"}\r\n\r\n if type(number) == int:\r\n if number in target_dict:\r\n transported_type = target_dict.get(number)\r\n elif type(number) == \"tcp\" or type(number) == \"udp\":\r\n transported_type = \"Custom {0} Rule\".format(ipType.upper())\r\n return transported_type\r\n else:\r\n if '-' in str(number):\r\n transported_type = \"Custom {0} Rule\".format(ipType.upper())\r\n\r\n return transported_type", "def ip_str_to_int(s):\n try:\n # IPv4\n n = int.from_bytes(inet_pton(AF_INET, s), 'big')\n return n | 0xffff00000000\n except OSError:\n # IPv6\n return int.from_bytes(inet_pton(AF_INET6, s), 'big')", "def ip2int(ip):\n return struct.unpack('!L',socket.inet_aton(ip))[0]", "def str_to_protocol(prot):\n # if int was original string, return int value\n try: return int(prot)\n except Exception as e: pass\n p = {\n \"icmp\": 1,\n \"igmp\": 2,\n \"tcp\": 6,\n \"udp\": 17,\n \"gre\": 47,\n \"ah\": 51,\n \"icmp6\": 58, \"icmpv6\": 58,\n \"eigrp\": 88,\n \"ospf\": 89, \"ospfigp\": 89,\n \"pim\": 103\n }\n return p.get(prot.lower(), 0)", "def str2type(type, string):\n if type in {'B', 'b', 'H', 'h', 'l', 'L'}:\n return int(string, 0)\n elif type in {'f', 'd'}:\n return float(string)\n raise ValueError('Type is not in accepted types. ')", "def ip_to_int(str_ip_address):\n ip_in_str = str_ip_address.split(\".\")\n if len(ip_in_str) != 4:\n raise Exception(\"Invalid IPv4 address: {}\".format(str_ip_address))\n ip_in_int = map(lambda x: int(x, 10), ip_in_str)\n for addr in ip_in_int:\n if addr < 0 or addr > 255:\n raise Exception(\"Invalid IPv4 address: {}\".format(str_ip_address))\n return ip_in_int", "def int2ip(val):\n return socket.inet_ntoa(struct.pack('!L',val))", "def _to_int( self, str ):\n tmp = 1\n try:\n tmp = int( str)\n except ValueError:\n pass\n\n return tmp", "def map_edit_type_str_to_int(type_str):\n if not isinstance(type_str, str):\n return None\n\n # use anime as the default value\n mapped_int = EditType.subject.value\n\n try:\n mapped_int = EditType[type_str].value\n except KeyError as keyError:\n logger.warning(keyError)\n logger.warning(\n 'Malformed edit type received: %s, assigning the default value: subject',\n type_str)\n return mapped_int", "def number_type(text):\n return int(text)", "def trans_type(_value, _type):\n if _type == 'int':\n return int(_value)\n if _type == 'string':\n return str(_value)\n return _value", "def str_to_int(self, string):\n try:\n return int(string)\n except ValueError:\n return None", "def long2ip(value):\n return iptools.long2ip(value)", "def _to_number(cls, string):\n num = ast.literal_eval(string)\n if isinstance(num, (int, float)):\n return num\n return string", "def __string_to_int(self, address: str) -> int:\n match = self.ADDRESS_RE.match(address)\n if not match:\n raise CouldNotParseAddress(address)\n area = int(match.group(\"area\"))\n main = int(match.group(\"main\"))\n line = int(match.group(\"line\"))\n if area > self.MAX_AREA or main > self.MAX_MAIN or line > self.MAX_LINE:\n raise CouldNotParseAddress(address)\n return (area << 12) + (main << 8) + line", "def __string_to_int(self, address: str) -> int:\n match = self.ADDRESS_RE.match(address)\n if not match:\n raise CouldNotParseAddress(address)\n main = int(match.group(\"main\"))\n middle = (\n int(match.group(\"middle\")) if match.group(\"middle\") is not None else None\n )\n sub = int(match.group(\"sub\"))\n if main > self.MAX_MAIN:\n raise CouldNotParseAddress(address)\n if middle is not None:\n if middle > self.MAX_MIDDLE:\n raise CouldNotParseAddress(address)\n if sub > self.MAX_SUB_LONG:\n raise CouldNotParseAddress(address)\n elif sub > self.MAX_SUB_SHORT:\n raise CouldNotParseAddress(address)\n return (\n (main << 11) + (middle << 8) + sub\n if middle is not None\n else (main << 11) + sub\n )", "def get_ip_cmd(addr_type):\n if addr_type == ADDR_TYPE_IPv4:\n return 'ip'\n else:\n return 'ipv6'", "def icmp_num_type_code(\n self,\n icmp_reported # type: int\n ):\n icmp_num_type = icmp_reported//256 # ICMP Type\n icmp_num_code = icmp_reported%256 # ICMP Code\n \n return (icmp_num_type,icmp_num_code)", "def parse_int(string):\r\n try:\r\n #Attempt parsing string to int\r\n return int(string)\r\n except ValueError:\r\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the hash value with the source and destination IP.
def ipv4_hash(self, dest_ip, src_ip): dest_ip_int = self.translate_ip_str_into_int(dest_ip) src_ip_int = self.translate_ip_str_into_int(src_ip) return htonl(dest_ip_int ^ src_ip_int)
[ "def udp_hash(self, dest_port, src_port):\n return htons(dest_port ^ src_port)", "def mac_hash(self, dest_mac, src_mac):\n dest_port_mac = self.translate_mac_str_into_int(dest_mac)\n src_port_mac = self.translate_mac_str_into_int(src_mac)\n src_xor_dest = dest_port_mac ^ src_port_mac\n xor_value_1 = src_xor_dest >> 32\n xor_value_2 = (src_xor_dest >> 16) ^ (xor_value_1 << 16)\n xor_value_3 = src_xor_dest ^ (xor_value_1 << 32) ^ (xor_value_2 << 16)\n return htons(xor_value_1 ^ xor_value_2 ^ xor_value_3)", "def generate_hash(ip_address, listening_port, public_key):\n finger_type_test(ip_address, listening_port, public_key)\n concated = \"%s%d%s\" % (ip_address, listening_port, public_key)\n ash = sha256(concated)\n return ash.hexdigest()[:4]", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def __hash__(self):\n return hash((self.bike.public_key, self.remote))", "def hash(cls, host, port):\n return str(cls.compile(host, port))", "def ip_collision_obfuscation(ip: str) -> str:\n microcelled_ip = microcelling(ip)\n\n if microcelled_ip:\n ip_hash = \"%x\" % binascii.crc32(bytes(microcelled_ip, \"ascii\"))\n else:\n ip_hash = _INVALID_STRING\n\n return ip_hash", "def _node_hash(self, node, port):\n\n return hash(frozenset([node['mgmt_ip'], port]))", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def compute_hash(self) -> str:\n # Replace dependencies with their hashes and functions with source.\n computation = self._subs_dependencies_with_hash(self.computation)\n computation = self._subs_tasks_with_src(computation)\n # Return the hash of the resulting computation.\n comp_hash: str = joblib.hash(cloudpickle.dumps(computation))\n return comp_hash", "def get_hash(self):\n return hashlib.md5(next(iter(self.get_clusters())).encode('utf-8') + '-'.join(sorted(host.host_id for host in set(self.hosts))).encode('utf-8')).hexdigest()", "def hash_tag(source_entity, context):\r\n if type(context) == list:\r\n if context:\r\n keys = [source_entity] + context\r\n else:\r\n keys = [source_entity, '']\r\n else:\r\n if context == 'None':\r\n keys = [source_entity, '']\r\n else:\r\n keys = [source_entity, context]\r\n return md5_constructor(':'.join(keys).encode('utf-8')).hexdigest()", "def stable_hash(self, source, digits=9):\r\n\r\n return int(sha1(source.encode()).hexdigest(), 16) % (10 ** digits)", "def get_hash(self, descriptor):", "def _hash(self, flow):\n r = flow.request\n\n _, _, path, _, query, _ = urlparse.urlparse(r.url)\n queriesArray = urlparse.parse_qsl(query, keep_blank_values=True)\n\n key = [\n str(r.port),\n str(r.scheme),\n str(r.method),\n str(path),\n ]\n\n if not self.ignore_content:\n form_contents = r.urlencoded_form or r.multipart_form\n if self.ignore_payload_params and form_contents:\n key.extend(\n p for p in form_contents\n if p[0] not in self.ignore_payload_params\n )\n else:\n key.append(str(r.content))\n\n if not self.ignore_host:\n key.append(r.host)\n\n filtered = []\n ignore_params = self.ignore_params or []\n for p in queriesArray:\n if p[0] not in ignore_params:\n filtered.append(p)\n for p in filtered:\n key.append(p[0])\n key.append(p[1])\n\n if self.headers:\n headers = []\n for i in self.headers:\n v = r.headers.get(i)\n headers.append((i, v))\n key.append(headers)\n return hashlib.sha256(repr(key)).digest()", "def hashGeneretor(inputString):\n\treturn hashlib.sha256(inputString.encode('utf-8')).hexdigest()", "def address_hasher(address):\n return hashlib.md5(address).hexdigest()", "def hash32(value): # -> int:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the hash value with the source and destination port.
def udp_hash(self, dest_port, src_port): return htons(dest_port ^ src_port)
[ "def hash(cls, host, port):\n return str(cls.compile(host, port))", "def mac_hash(self, dest_mac, src_mac):\n dest_port_mac = self.translate_mac_str_into_int(dest_mac)\n src_port_mac = self.translate_mac_str_into_int(src_mac)\n src_xor_dest = dest_port_mac ^ src_port_mac\n xor_value_1 = src_xor_dest >> 32\n xor_value_2 = (src_xor_dest >> 16) ^ (xor_value_1 << 16)\n xor_value_3 = src_xor_dest ^ (xor_value_1 << 32) ^ (xor_value_2 << 16)\n return htons(xor_value_1 ^ xor_value_2 ^ xor_value_3)", "def generate_hash(ip_address, listening_port, public_key):\n finger_type_test(ip_address, listening_port, public_key)\n concated = \"%s%d%s\" % (ip_address, listening_port, public_key)\n ash = sha256(concated)\n return ash.hexdigest()[:4]", "def ipv4_hash(self, dest_ip, src_ip):\n dest_ip_int = self.translate_ip_str_into_int(dest_ip)\n src_ip_int = self.translate_ip_str_into_int(src_ip)\n return htonl(dest_ip_int ^ src_ip_int)", "def _node_hash(self, node, port):\n\n return hash(frozenset([node['mgmt_ip'], port]))", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def stable_hash(self, source, digits=9):\r\n\r\n return int(sha1(source.encode()).hexdigest(), 16) % (10 ** digits)", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def get_hash(self, descriptor):", "def get_hash(self):\n return hashlib.md5(next(iter(self.get_clusters())).encode('utf-8') + '-'.join(sorted(host.host_id for host in set(self.hosts))).encode('utf-8')).hexdigest()", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def _hash(self, flow):\n r = flow.request\n\n _, _, path, _, query, _ = urlparse.urlparse(r.url)\n queriesArray = urlparse.parse_qsl(query, keep_blank_values=True)\n\n key = [\n str(r.port),\n str(r.scheme),\n str(r.method),\n str(path),\n ]\n\n if not self.ignore_content:\n form_contents = r.urlencoded_form or r.multipart_form\n if self.ignore_payload_params and form_contents:\n key.extend(\n p for p in form_contents\n if p[0] not in self.ignore_payload_params\n )\n else:\n key.append(str(r.content))\n\n if not self.ignore_host:\n key.append(r.host)\n\n filtered = []\n ignore_params = self.ignore_params or []\n for p in queriesArray:\n if p[0] not in ignore_params:\n filtered.append(p)\n for p in filtered:\n key.append(p[0])\n key.append(p[1])\n\n if self.headers:\n headers = []\n for i in self.headers:\n v = r.headers.get(i)\n headers.append((i, v))\n key.append(headers)\n return hashlib.sha256(repr(key)).digest()", "def compute_hash(self) -> str:\n # Replace dependencies with their hashes and functions with source.\n computation = self._subs_dependencies_with_hash(self.computation)\n computation = self._subs_tasks_with_src(computation)\n # Return the hash of the resulting computation.\n comp_hash: str = joblib.hash(cloudpickle.dumps(computation))\n return comp_hash", "def __hash__(self):\n return hash((self.bike.public_key, self.remote))", "def hash32(value): # -> int:\n ...", "def copy(self) -> HashFunction:", "def _generate_hash(cls, recipient_id, email_subject, email_body):\n hash_value = utils.convert_to_hash(\n recipient_id + email_subject + email_body,\n 100)\n\n return hash_value", "def _hash(self: Syscall) -> int:\n return hash(canonical_form.canonicalize(self))", "def hash(self):\n return self.wh" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the hash value by the policy and active slave number.
def policy_and_slave_hash(self, policy, **slaves): global S_MAC_IP_PORT source = S_MAC_IP_PORT global D_MAC_IP_PORT dest_mac = D_MAC_IP_PORT[0] dest_ip = D_MAC_IP_PORT[1] dest_port = D_MAC_IP_PORT[2] hash_values = [] if len(slaves['active']) != 0: for src_mac, src_ip, src_port in source: if policy == "L2": hash_value = self.mac_hash(dest_mac, src_mac) elif policy == "L23": hash_value = self.mac_hash(dest_mac, src_mac) ^ self.ipv4_hash(dest_ip, src_ip) else: hash_value = self.ipv4_hash(dest_ip, src_ip) ^ self.udp_hash(dest_port, src_port) if policy in ("L23", "L34"): hash_value ^= hash_value >> 16 hash_value ^= hash_value >> 8 hash_value = hash_value % len(slaves['active']) hash_values.append(hash_value) return hash_values
[ "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def get_hash(self, descriptor):", "def _get_hash_partial(self):\n hash_value = 0\n \n # available\n hash_value ^= self.available\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # format\n hash_value ^= self.format.value << 1\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n # pack_id\n hash_value ^= self.pack_id\n \n # sort_value\n hash_value ^= self.sort_value << 5\n \n # tags\n tags = self.tags\n if (tags is not None):\n hash_value ^= len(tags) << 9\n \n for tag in tags:\n hash_value ^= hash(tag)\n \n # type\n hash_value ^= self.type.value << 13\n \n # user\n hash_value ^= hash(self.user)\n \n return hash_value", "def hash_value(self) -> int:\n res = 0\n for i in range(BOARD_SIZE):\n res *= 3\n res += self.state[i]\n\n return res", "def generate_hash(value, hash='sha1'):\n sha_obj = getattr(hashlib, hash)(value)\n return sha_obj.hexdigest()", "def hash_value(self) -> int:\n res = 0\n for i in range(BOARD_SIZE):\n res *= 3\n res += int(self.state[i])\n\n return res", "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def _make_hash(self, sid, secret):\n return hmac.new(secret, sid, sha).hexdigest()[:8]", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def compute_hash(self) -> str:\n # Replace dependencies with their hashes and functions with source.\n computation = self._subs_dependencies_with_hash(self.computation)\n computation = self._subs_tasks_with_src(computation)\n # Return the hash of the resulting computation.\n comp_hash: str = joblib.hash(cloudpickle.dumps(computation))\n return comp_hash", "def hash(self):\n assert self.__hash, \\\n \"Tried to use hash() after spent. See:\\n\" \\\n + TREEPRNG_DOC_URL + \"#the-treeprng-life-cycle\"\n hash = self.__hash.copy()\n hash.update(\"h\")\n self.is_dict = True\n return long(hash.hexdigest(), 16)", "def __getHash(self, hspl):\n subject = hspl.find(\"{%s}subject\" % getHSPLNamespace())\n action = hspl.find(\"{%s}action\" % getHSPLNamespace())\n trafficConstraints = hspl.find(\"{%s}traffic-constraints\" % getHSPLNamespace())\n h = 1\n h = 37 * h + hash(etree.tostring(subject))\n h = 37 * h + hash(etree.tostring(action))\n h = 37 * h + hash(etree.tostring(trafficConstraints))\n return h", "def create_hash():\n hash = hashlib.sha1()\n hash.update(os.urandom(5))\n return hash.hexdigest()", "def __hash__(self):\n return hash(self.__proc_uuid) ^ hash(self.__job_uuid)", "def hash(self):\n return self.wh", "def hash(val):\n h = SHA512.new()\n h.update(Utils.safe_enc(val))\n return h.hexdigest()", "def hash(x):\r\n return (randint(1, 5*c)*x + randint(1, 5*c)) % c", "def get_hash(self):\r\n path = self.files[self.idx_image]\r\n filename = path.split(\"/\")[-1]\r\n with open(path,\"rb\") as f:\r\n hash_object = hashlib.sha512(f.read())\r\n hex_dig = hash_object.hexdigest()\r\n hash = filename + \" \"+ hex_dig\r\n return hash", "def get_hash(self) -> int:\n return encode_base62(id_dec=self.pk)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the hash value by the given slave port id.
def slave_map_hash(self, port, order_ports): if len(order_ports) == 0: return None else: order_ports = order_ports.split() return order_ports.index(str(port))
[ "def _node_hash(self, node, port):\n\n return hash(frozenset([node['mgmt_ip'], port]))", "def get_hash(self,job_id):\n time.sleep(3)\n endpoint = '/hash'\n response = requests.get(baseUrl + endpoint + '/' + job_id)\n return response", "def udp_hash(self, dest_port, src_port):\n return htons(dest_port ^ src_port)", "def lookupIDFromHash(mode, hashValue):\n\ttry:\n\t\twith getConnection(mode).cursor() as cursor:\n\t\t\tsql = \"SELECT tradeid FROM trades WHERE hash='{0}'\".format(hashValue)\n\t\t\tcursor.execute(sql)\n\t\t\trow = cursor.fetchone()\n\t\t\tif row == None:\n\t\t\t\tlogger.debug('lookupIDFromHash(): id not found for {0}'.format(hashValue))\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\treturn row['tradeid']\n\n\texcept:\n\t\tlogger.exception('lookupIDFromHash(): ')", "def _find_slot(self, key):\n hashkey = hash(key) % self._size\n slot = self._slots[hashkey]\n return slot", "def hashid(self) :\n\t\ttry :\n\t\t\treturn self._hashid\n\t\texcept Exception as e:\n\t\t\traise e", "def find_hashring_node(self, data):\n return", "def get_value(self, key):\r\n index = self.horner_hash(key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i ** 2) % self.table_size\r\n if self.hash_table[j] and self.hash_table[j].key == key:\r\n return self.hash_table[j].value\r\n return None", "def find_sha256_hash(db, url):\n script, url_id = find_script(db, url, want_code=False)\n if script:\n return (script.get('sha256'), url_id)\n return (None, None)", "def get_hash(self, descriptor):", "def djb2(self, key):\n # 5381 & 33 are prime numbers\n # hashed_var = 5381\n\n # string_bytes = s.encode()\n\n # for b in string_bytes:\n # hash_var = ((hash_var << 5) + hash_var) + b\n\n # return hash_var", "def get_detail_from_port_info(self, key_str, regx_str, port):\n out = self.dut.send_expect(\"show port info %d\" % port, \"testpmd> \")\n find_value = self.get_value_from_str(key_str, regx_str, out)\n return find_value", "def find_line_with_hash(lines, hex_id):\n for line_id, line in enumerate(lines):\n if hex_id in line: \n return (line_id, line)\n\n return None", "def hash(cls, host, port):\n return str(cls.compile(host, port))", "def _get_self_port(self, dev_id, dev_port):\n self_pid = None\n for pmap in self.portmap:\n if pmap[0] == dev_id and pmap[1] == dev_port:\n self_pid = pmap[2]\n break\n if self_pid is None:\n raise Exception(\"Cannot find DeviceID:{0} PortID:{1} in port map.\".format(dev_id, dev_port))\n return self_pid", "def get_candidate(self, id):\n return self.candidate_hash[id]\n #for c in self.candidates:\n # if c.id == id:\n # return c\n #return False", "def policy_and_slave_hash(self, policy, **slaves):\n global S_MAC_IP_PORT\n source = S_MAC_IP_PORT\n\n global D_MAC_IP_PORT\n dest_mac = D_MAC_IP_PORT[0]\n dest_ip = D_MAC_IP_PORT[1]\n dest_port = D_MAC_IP_PORT[2]\n\n hash_values = []\n if len(slaves['active']) != 0:\n for src_mac, src_ip, src_port in source:\n if policy == \"L2\":\n hash_value = self.mac_hash(dest_mac, src_mac)\n elif policy == \"L23\":\n hash_value = self.mac_hash(dest_mac, src_mac) ^ self.ipv4_hash(dest_ip, src_ip)\n else:\n hash_value = self.ipv4_hash(dest_ip, src_ip) ^ self.udp_hash(dest_port, src_port)\n\n if policy in (\"L23\", \"L34\"):\n hash_value ^= hash_value >> 16\n hash_value ^= hash_value >> 8\n hash_value = hash_value % len(slaves['active'])\n hash_values.append(hash_value)\n\n return hash_values", "def lookup_value(self, string):\n index = self.hash_value(string)\n if self.hash_table[index]:\n return index\n else:\n return -1", "def job_hash( self, job, hash_by=None ):\n if hash_by is None:\n hash_by = [ \"job\" ]\n hash_bys = util.listify( hash_by )\n for hash_by in hash_bys:\n job_hash = self._try_hash_for_job( job, hash_by )\n if job_hash:\n return job_hash\n\n # Fall back to just hashing by job id, should always return a value.\n return self._try_hash_for_job( job, \"job\" )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting the packets correctly in the XOR mode.
def verify_xor_tx(self, unbound_port, bond_port, policy, vlan_tag=False, **slaves): pkt_count = 100 pkt_now = {} pkt_now, summary = self.send_customized_packet_to_unbound_port(unbound_port, bond_port, policy, vlan_tag=False, pkt_count=pkt_count, **slaves) hash_values = [] hash_values = self.policy_and_slave_hash(policy, **slaves) order_ports = self.get_bond_active_slaves(bond_port) for slave in slaves['active']: slave_map_hash = self.slave_map_hash(slave, order_ports) self.verify(pkt_now[slave][0] == pkt_count * hash_values.count(slave_map_hash), "XOR load balance transmit error on the link up port") for slave in slaves['inactive']: self.verify(pkt_now[slave][0] == 0, "XOR load balance transmit error on the link down port")
[ "def test_xor_tx(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)", "def test_simple_xor(self):\n self.assert_to_cnf_transformation(\n 'A xor B',\n '(not B or not A) and (A or B)')", "def test_negated_xor(self):\n self.assert_to_cnf_transformation(\n 'not (A xor B)',\n '(not A or B) and (A or not B)')", "def test_xor(self):\n xor_thousand = self.leet ^ 1000\n self.assertEqual(xor_thousand, bitmath.Bit(1745))\n xor_five_hundred = self.leet ^ 500\n self.assertEqual(xor_five_hundred, bitmath.Bit(1229))", "def test_xor_gate(self):\n inputs = [[1.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]]\n output_vector = [[0.0],\n [1.0],\n [1.0],\n [0.0]]\n inputs = np.array(inputs, dtype='float32')\n output_vector = np.array(output_vector)\n net = NeuralNetwork(inputs, output_vector)\n net.train()\n output = net.feed(np.array([[0, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[1, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[0, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)\n output = net.feed(np.array([[1, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)", "def test_xor_l34_forward(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.set_balance_policy_for_bonding_device(bond_port, \"l34\")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", False, **slaves)\n self.vlan_strip_and_filter('off', self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], self.dut_ports[3], bond_port)\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", True, **slaves)", "def xor( a, b ):\n return bool( a ) != bool( b )", "def verify_packet(self, packet, context):\n pass", "def run_XOR():\n network = FFBPNetwork(2, 1)\n network.add_hidden_layer(3)\n\n xor_features = [[0, 0], [1, 0], [0, 1], [1, 1]]\n xor_labels = [[0], [1], [1], [0]]\n\n data = NNData(xor_features, xor_labels, 1)\n network.train(data, 10001, order=NNData.Order.RANDOM)", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_bitwise_xor(self, _, a, b, skip_to_glow=False):\n utils.compare_tracing_methods(\n SimpleBitwiseXorModule(),\n a,\n b,\n fusible_ops={\"aten::bitwise_xor\"},\n )", "def checksum(self):\n\n if (sum(self.packet) % 2 == 0) and (self.control_bit == 1) or\\\n (sum(self.packet) % 2 == 1) and (self.control_bit == 0):\n return True\n else:\n return False", "def test_from_bytes_cliff():\n data = bytes([0b00000001])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is True", "def test_from_bytes_no_cliff():\n data = bytes([0b00000000])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is False", "def test_problem11(self):\n blocklen = 16\n for i in range(100):\n guess, real = cryptanalysis.encryption_detection_oracle_ecb_cbc(ciphers.black_box1, blocklen, True)\n self.assertEqual(real, guess)", "def mustRetransmit(self):\n if self.syn or self.fin or self.dlen:\n return True\n return False", "def XOR(self,other):\n raise OpNotAllowedError(\"Cannot do operation on Bit instance\")", "def test_control_bit_of_cnot(self):\n\n qr = QuantumRegister(2, \"qr\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[1])\n circuit.x(qr[0])\n circuit.cx(qr[0], qr[1])\n\n new_pm = PassManager(CommutativeCancellation())\n new_circuit = new_pm.run(circuit)\n expected = QuantumCircuit(qr)\n expected.cx(qr[0], qr[1])\n expected.x(qr[0])\n expected.cx(qr[0], qr[1])\n\n self.assertEqual(expected, new_circuit)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the XOR mode.
def test_xor_tx(self): bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") slaves = {} slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [] self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves)
[ "def verify_xor_tx(self, unbound_port, bond_port, policy, vlan_tag=False, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_customized_packet_to_unbound_port(unbound_port, bond_port, policy, vlan_tag=False, pkt_count=pkt_count, **slaves)\n\n hash_values = []\n hash_values = self.policy_and_slave_hash(policy, **slaves)\n\n order_ports = self.get_bond_active_slaves(bond_port)\n for slave in slaves['active']:\n slave_map_hash = self.slave_map_hash(slave, order_ports)\n self.verify(pkt_now[slave][0] == pkt_count * hash_values.count(slave_map_hash),\n \"XOR load balance transmit error on the link up port\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0,\n \"XOR load balance transmit error on the link down port\")", "def test_negated_xor(self):\n self.assert_to_cnf_transformation(\n 'not (A xor B)',\n '(not A or B) and (A or not B)')", "def test_simple_xor(self):\n self.assert_to_cnf_transformation(\n 'A xor B',\n '(not B or not A) and (A or B)')", "def test_xor(self):\n xor_thousand = self.leet ^ 1000\n self.assertEqual(xor_thousand, bitmath.Bit(1745))\n xor_five_hundred = self.leet ^ 500\n self.assertEqual(xor_five_hundred, bitmath.Bit(1229))", "def test_xor_gate(self):\n inputs = [[1.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]]\n output_vector = [[0.0],\n [1.0],\n [1.0],\n [0.0]]\n inputs = np.array(inputs, dtype='float32')\n output_vector = np.array(output_vector)\n net = NeuralNetwork(inputs, output_vector)\n net.train()\n output = net.feed(np.array([[0, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[1, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[0, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)\n output = net.feed(np.array([[1, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)", "def test_xor_l34_forward(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.set_balance_policy_for_bonding_device(bond_port, \"l34\")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", False, **slaves)\n self.vlan_strip_and_filter('off', self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], self.dut_ports[3], bond_port)\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", True, **slaves)", "def xor( a, b ):\n return bool( a ) != bool( b )", "def verify_packet(self, packet, context):\n pass", "def run_XOR():\n network = FFBPNetwork(2, 1)\n network.add_hidden_layer(3)\n\n xor_features = [[0, 0], [1, 0], [0, 1], [1, 1]]\n xor_labels = [[0], [1], [1], [0]]\n\n data = NNData(xor_features, xor_labels, 1)\n network.train(data, 10001, order=NNData.Order.RANDOM)", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_bitwise_xor(self, _, a, b, skip_to_glow=False):\n utils.compare_tracing_methods(\n SimpleBitwiseXorModule(),\n a,\n b,\n fusible_ops={\"aten::bitwise_xor\"},\n )", "def test_from_bytes_no_cliff():\n data = bytes([0b00000000])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is False", "def test_from_bytes_cliff():\n data = bytes([0b00000001])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is True", "def checksum(self):\n\n if (sum(self.packet) % 2 == 0) and (self.control_bit == 1) or\\\n (sum(self.packet) % 2 == 1) and (self.control_bit == 0):\n return True\n else:\n return False", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_control_bit_of_cnot(self):\n\n qr = QuantumRegister(2, \"qr\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[1])\n circuit.x(qr[0])\n circuit.cx(qr[0], qr[1])\n\n new_pm = PassManager(CommutativeCancellation())\n new_circuit = new_pm.run(circuit)\n expected = QuantumCircuit(qr)\n expected.cx(qr[0], qr[1])\n expected.x(qr[0])\n expected.cx(qr[0], qr[1])\n\n self.assertEqual(expected, new_circuit)", "def XOR(self,other):\n raise OpNotAllowedError(\"Cannot do operation on Bit instance\")", "def test_problem11(self):\n blocklen = 16\n for i in range(100):\n guess, real = cryptanalysis.encryption_detection_oracle_ecb_cbc(ciphers.black_box1, blocklen, True)\n self.assertEqual(real, guess)", "def mustRetransmit(self):\n if self.syn or self.fin or self.dlen:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the XOR mode, when bringing any one slave of the bonding device link down.
def test_xor_tx_one_slave_down(self): bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") try: slaves = {} slaves['active'] = [self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [self.dut_ports[0]] self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up")
[ "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_xor_tx(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)", "def verify_xor_tx(self, unbound_port, bond_port, policy, vlan_tag=False, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_customized_packet_to_unbound_port(unbound_port, bond_port, policy, vlan_tag=False, pkt_count=pkt_count, **slaves)\n\n hash_values = []\n hash_values = self.policy_and_slave_hash(policy, **slaves)\n\n order_ports = self.get_bond_active_slaves(bond_port)\n for slave in slaves['active']:\n slave_map_hash = self.slave_map_hash(slave, order_ports)\n self.verify(pkt_now[slave][0] == pkt_count * hash_values.count(slave_map_hash),\n \"XOR load balance transmit error on the link up port\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0,\n \"XOR load balance transmit error on the link down port\")", "def test_xor_l34_forward(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.set_balance_policy_for_bonding_device(bond_port, \"l34\")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", False, **slaves)\n self.vlan_strip_and_filter('off', self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], self.dut_ports[3], bond_port)\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", True, **slaves)", "def test_negated_xor(self):\n self.assert_to_cnf_transformation(\n 'not (A xor B)',\n '(not A or B) and (A or not B)')", "def test_simple_xor(self):\n self.assert_to_cnf_transformation(\n 'A xor B',\n '(not B or not A) and (A or B)')", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def should_demote(self, *args, **kwargs) -> bool:\n return True", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def test_not_equal_on_equal(self):\n a = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n b = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)", "def test_flipping_network(self):\n # setup environment\n run_command_blocking(netem_change.format(\"corrupt 1%\"))\n\n run_command_blocking(TestbTCPFramework.start_client)\n self.assertTrue(filecmp.cmp(TestbTCPFramework.input_file, \"out.file\"))", "def test_xor_gate(self):\n inputs = [[1.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]]\n output_vector = [[0.0],\n [1.0],\n [1.0],\n [0.0]]\n inputs = np.array(inputs, dtype='float32')\n output_vector = np.array(output_vector)\n net = NeuralNetwork(inputs, output_vector)\n net.train()\n output = net.feed(np.array([[0, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[1, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[0, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)\n output = net.feed(np.array([[1, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)", "def test_from_bytes_cliff():\n data = bytes([0b00000001])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is True", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def test_from_bytes_no_cliff():\n data = bytes([0b00000000])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the XOR mode, when bringing all slaves of the bonding device link down.
def test_xor_tx_all_slaves_down(self): bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") try: slaves = {} slaves['active'] = [] slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] self.verify_xor_tx(self.dut_ports[3], bond_port, "L2", False, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up")
[ "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_xor_tx(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)", "def verify_xor_tx(self, unbound_port, bond_port, policy, vlan_tag=False, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_customized_packet_to_unbound_port(unbound_port, bond_port, policy, vlan_tag=False, pkt_count=pkt_count, **slaves)\n\n hash_values = []\n hash_values = self.policy_and_slave_hash(policy, **slaves)\n\n order_ports = self.get_bond_active_slaves(bond_port)\n for slave in slaves['active']:\n slave_map_hash = self.slave_map_hash(slave, order_ports)\n self.verify(pkt_now[slave][0] == pkt_count * hash_values.count(slave_map_hash),\n \"XOR load balance transmit error on the link up port\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0,\n \"XOR load balance transmit error on the link down port\")", "def test_xor_l34_forward(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.set_balance_policy_for_bonding_device(bond_port, \"l34\")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", False, **slaves)\n self.vlan_strip_and_filter('off', self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], self.dut_ports[3], bond_port)\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L34\", True, **slaves)", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_negated_xor(self):\n self.assert_to_cnf_transformation(\n 'not (A xor B)',\n '(not A or B) and (A or not B)')", "def test_simple_xor(self):\n self.assert_to_cnf_transformation(\n 'A xor B',\n '(not B or not A) and (A or B)')", "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def should_demote(self, *args, **kwargs) -> bool:\n return True", "def test_autostate_disabled(self, duthosts, enum_frontend_dut_hostname):\n\n duthost = duthosts[enum_frontend_dut_hostname]\n dut_hostname = duthost.hostname\n\n # Collect DUT configuration and status\n vlan_members_facts = duthost.get_running_config_facts().get('VLAN_MEMBER')\n if vlan_members_facts is None:\n pytest.skip('No vlan available on DUT {hostname}'.format(hostname=dut_hostname))\n ifs_status = duthost.get_interfaces_status()\n ip_ifs = duthost.show_ip_interface()['ansible_facts']['ip_interfaces']\n\n # Find out all vlans which meet the following requirements:\n # 1. The oper_state of vlan interface is 'up'\n # 2. The oper_state of at least one member in the vlan is 'up'\n vlan_available = []\n for vlan in vlan_members_facts:\n if ip_ifs.get(vlan, {}).get('oper_state') == 'up':\n for member in vlan_members_facts[vlan]:\n if ifs_status.get(member, {}).get('oper') == 'up':\n vlan_available.append(vlan)\n break\n if len(vlan_available) == 0:\n pytest.skip('No applicable VLAN available on DUT {hostname} for this test case'.\n format(hostname=dut_hostname))\n\n # Pick a vlan for test\n vlan = vlan_available[0]\n vlan_members = list(vlan_members_facts[vlan].keys())\n\n try:\n # Shutdown all the members in vlan.\n self.shutdown_multiple_with_confirm(duthost, vlan_members, err_handler=pytest.fail)\n\n # Check whether the oper_state of vlan interface is changed as expected.\n ip_ifs = duthost.show_ip_interface()['ansible_facts']['ip_interfaces']\n if len(vlan_available) > 1:\n # If more than one vlan comply with the above test requirements, then there are members in other vlans\n # that are still up. Therefore, the bridge is still up, and vlan interface should be up.\n pytest_assert(ip_ifs.get(vlan, {}).get('oper_state') == \"up\",\n 'vlan interface of {vlan} is not up as expected'.format(vlan=vlan))\n else:\n # If only one vlan comply with the above test requirements, then all the vlan members across all the\n # vlans are down. Therefore, the bridge is down, and vlan interface should be down.\n pytest_assert(ip_ifs.get(vlan, {}).get('oper_state') == \"down\",\n 'vlan interface of {vlan} is not down as expected'.format(vlan=vlan))\n finally:\n # Restore all interfaces to their original admin_state.\n self.restore_interface_admin_state(duthost, ifs_status)", "def test_backward(self):\n self.global_block_idx = 0\n self.net = SimpleNet()\n self._check_all(self.net)", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def test_round_robin_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_flipping_network(self):\n # setup environment\n run_command_blocking(netem_change.format(\"corrupt 1%\"))\n\n run_command_blocking(TestbTCPFramework.start_client)\n self.assertTrue(filecmp.cmp(TestbTCPFramework.input_file, \"out.file\"))", "def test_bond_check_false():\n bond_length = 3.0\n observed = ga.bond_check(bond_length)\n assert observed == False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open or shutdown the vlan strip and filter option of specified port.
def vlan_strip_and_filter(self, action='off', *ports): for port_id in ports: self.dut.send_expect("vlan set strip %s %d" % (action, port_id), "testpmd> ") self.dut.send_expect("vlan set filter %s %d" % (action, port_id), "testpmd> ")
[ "def update_switch_port_vlans_on_device(self, interface, port):", "def _clear_port_vlan_cfg(self, port):\n rid = self.xmlproxy.nb.Ports.find(port)\n prow = self.xmlproxy.nb.Ports.getRow(rid)\n if prow['pvid'] != 1:\n # Set default pvid and try to remove VLAN\n self.xmlproxy.nb.Ports.set.pvid(port, 1)\n # Try to remove VLAN if it is unused.\n vrid = self.xmlproxy.nb.Vlans.find(prow['pvid'])\n if vrid > 0:\n self.xmlproxy.nb.Vlans.delRow(vrid)", "def update_switch_port_vlans(self, interface, port):\n #pass\n interface = str(interface)\n mode = port['switch_port_mode']\n pre_check_data = self.pre_check_update_switch_port_vlans(\n interface, mode, port)\n if not pre_check_data:\n return\n requested_vlan_tags, current_running_config = pre_check_data\n commands = self.update_switch_port_vlans_on_device(interface, port)\n self.post_check_update_switch_port_vlans(\n interface, current_running_config, port, requested_vlan_tags)\n return commands", "def add_vlan(self, vlan_range, port, port_mode, qnq, ctag):\n\n config = inject.instance('config')\n supported_os = config.SUPPORTED_OS\n self._load_vlan_command_templates()\n self.validate_vlan_methods_incoming_parameters(vlan_range, port, port_mode)\n port_name = self.get_port_name(port)\n self.logger.info('Start vlan configuration: vlan {0}; interface {1}.'.format(vlan_range, port_name))\n vlan_config_actions = OrderedDict()\n interface_config_actions = OrderedDict()\n vlan_config_actions['configure_vlan'] = vlan_range\n #vlan_config_actions['state_active'] = []\n #vlan_config_actions['no_shutdown'] = []\n\n self.configure_vlan(vlan_config_actions)\n self.cli.exit_configuration_mode()\n\n interface_config_actions['configure_interface'] = port_name\n interface_config_actions['no_shutdown'] = []\n if supported_os and 'NOS' in supported_os:\n interface_config_actions['switchport'] = []\n if 'trunk' in port_mode and vlan_range == '':\n interface_config_actions['switchport_mode_trunk'] = []\n elif 'trunk' in port_mode and vlan_range != '':\n interface_config_actions['switchport_mode_trunk'] = []\n interface_config_actions['trunk_allow_vlan'] = [vlan_range]\n elif 'access' in port_mode and vlan_range != '':\n if not qnq or qnq is False:\n self.logger.info('qnq is {0}'.format(qnq))\n interface_config_actions['switchport_mode_access'] = []\n interface_config_actions['access_allow_vlan'] = [vlan_range]\n if qnq and qnq is True:\n if not self._does_interface_support_qnq(port_name):\n raise Exception('interface does not support QnQ')\n interface_config_actions['qnq'] = []\n\n self.configure_vlan_on_interface(interface_config_actions)\n result = self.cli.send_command('show running-config interface {0}'.format(port_name))\n self.logger.info('Vlan configuration completed: \\n{0}'.format(result))\n\n return 'Vlan Configuration Completed'", "def set_vlan_port_eth_mode(self, eth, vlan):\n return [\"vlan port %s mode tag vlan %S\" % (eth, vlan)]", "def set_stack_port_up(self, port_no, valve=None):\n self.set_stack_port_status(port_no, 3, valve)", "def open_firewall_port(ceph_node, port, protocol):\n ceph_node.open_firewall_port(port, protocol)", "def validate_vlan_methods_incoming_parameters(self, vlan_range, port, port_mode):\n\n self.logger.info('Vlan Configuration Started')\n if len(port) < 1:\n raise Exception('BrocadeHandlerBase', 'Port list is empty')\n if vlan_range == '' and port_mode == 'access':\n raise Exception('BrocadeHandlerBase', 'Switchport type is Access, but vlan id/range is empty')\n if (',' in vlan_range or '-' in vlan_range) and port_mode == 'access':\n raise Exception('BrocadeHandlerBase', 'Only one vlan could be assigned to the interface in Access mode')", "def __init__(self, port):\n self.port = port\n self.integration_bridge = cfg.CONF.df.integration_bridge\n self.lport = self.port.get_logical_port()\n self.tap = self._create_tap_device()\n self.is_blocking = True\n self._is_deleted = False", "def plug_port_into_network(self, device_id, host_id, port_id,\n net_id, tenant_id, port_name, device_owner,\n sg, orig_sg, vnic_type, segments=None,\n switch_bindings=None):", "def check_if_port_available_factory(port):\n def check_if_port_available():\n \"\"\"\n Check if a port is in use\n :return bool not_in_use: True if not in use, False if in use\n \"\"\"\n check_port_command = \"netstat -tuna | grep -E \\\"{:d}\\s\\\"\".format(port)\n return not check_nonzero_exit(check_port_command)\n return check_if_port_available", "def toggle(module, curr_switch, toggle_ports, toggle_speed, port_speed, splitter_ports, quad_ports, task, msg):\n output = ''\n cli = pn_cli(module)\n clicopy = cli\n count = 0\n\n for speed in toggle_speed:\n if int(port_speed.strip('g'))/int(speed.strip('g')) >= 4:\n is_splittable = True\n else:\n is_splittable = False\n\n while (count <= 10):\n cli = clicopy\n cli += 'switch %s lldp-show format local-port ' % curr_switch\n cli += 'parsable-delim ,'\n out = run_command(module, cli, task, msg)\n if out:\n local_ports = out.split()\n break\n else:\n time.sleep(3)\n count += 1\n\n if not local_ports:\n module.fail_json(\n unreachable=False,\n failed=True,\n exception='',\n summary='Unable to discover topology',\n task='Fabric creation',\n msg='Fabric creation failed',\n changed=False\n )\n\n _undiscovered_ports = sorted(list(set(toggle_ports) - set(local_ports)),\n key=lambda x: int(x))\n non_splittable_ports = []\n undiscovered_ports = []\n\n for _port in _undiscovered_ports:\n if splitter_ports.get(_port, 0) == 1:\n undiscovered_ports.append(\"%s-%s\" % (_port, int(_port)+3))\n elif splitter_ports.get(_port, 0) == 0:\n undiscovered_ports.append(_port)\n else:\n # Skip intermediate splitter ports\n continue\n if not is_splittable:\n non_splittable_ports.append(_port)\n undiscovered_ports = \",\".join(undiscovered_ports)\n\n if not undiscovered_ports:\n continue\n\n cli = clicopy\n cli += 'switch %s port-config-modify port %s ' % (curr_switch, undiscovered_ports)\n cli += 'disable'\n run_command(module, cli, task, msg)\n\n if non_splittable_ports:\n non_splittable_ports = \",\".join(non_splittable_ports)\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % non_splittable_ports\n cli += 'speed %s enable' % speed\n run_command(module, cli, task, msg)\n else:\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % undiscovered_ports\n cli += 'speed %s enable' % speed\n run_command(module, cli, task, msg)\n\n time.sleep(10)\n\n # Revert undiscovered ports back to their original speed\n cli = clicopy\n cli += 'switch %s lldp-show format local-port ' % curr_switch\n cli += 'parsable-delim ,'\n local_ports = run_command(module, cli, task, msg)\n _undiscovered_ports = sorted(list(set(toggle_ports) - set(local_ports)),\n key=lambda x: int(x))\n disable_ports = []\n undiscovered_ports = []\n for _port in _undiscovered_ports:\n if _port in quad_ports:\n disable_ports.append(str(_port))\n # dont add to undiscovered ports\n elif splitter_ports.get(_port, 0) == 1:\n splitter_ports_range = set(map(str, (range(int(_port), int(_port)+4))))\n if not splitter_ports_range.intersection(set(local_ports)):\n disable_ports.append(\"%s-%s\" % (_port, int(_port)+3))\n undiscovered_ports.append(_port)\n elif splitter_ports.get(_port, 0) == 0:\n disable_ports.append(str(_port))\n undiscovered_ports.append(_port)\n else:\n # Skip intermediate splitter ports\n pass\n\n disable_ports = \",\".join(disable_ports)\n if disable_ports:\n cli = clicopy\n cli += 'switch %s port-config-modify port %s disable' % (curr_switch, disable_ports)\n run_command(module, cli, task, msg)\n\n undiscovered_ports = \",\".join(undiscovered_ports)\n if not undiscovered_ports:\n return 'Toggle completed successfully '\n\n cli = clicopy\n cli += 'switch %s port-config-modify ' % curr_switch\n cli += 'port %s ' % undiscovered_ports\n cli += 'speed %s enable' % port_speed\n run_command(module, cli, task, msg)\n output += 'Toggle completed successfully '\n\n return output", "def set_switch_port(self, vlan, vport_num):\n return [\"switchport vlan %s tag vport %s\" % (vlan, vport_num)]", "def print_port(filter_port, packet_port):\n if filter_port in [packet_port, 'Any', 'any', 'ANY']:\n return True\n return False", "def port_rst_drop():\n os.system(\"sudo iptables -A OUTPUT -p tcp --tcp-flags RST RST --sport 8000 -j DROP\")", "def free_port(self, port):\n \n self.logging.debug(\"Freeing port %d\" %(port))\n try:\n os.remove(self.get_file_name(port))\n except OSError:\n pass", "def mtc_shut_member_ports(line, config_file_first):\n global empty_first_file\n intf = map(int, re.findall('\\d+', line))[0]\n port = map(int, re.findall('\\d+', line))[1]\n if(port!=0):\n config_file_first.write('interface Ethernet' + str(intf) + '/' + str(port) + '\\n')\n config_file_first.write(\"shut\\n\")\n config_file_first.write('interface Ethernet' + str(intf) + '/' + str(port+1) + '\\n')\n config_file_first.write(\"shut\\n\")\n config_file_first.write('interface Ethernet' + str(intf) + '/' + str(port+2) + '\\n')\n config_file_first.write(\"shut\\n\")\n config_file_first.write('interface Ethernet' + str(intf) + '/' + str(port+3) + '\\n')\n config_file_first.write(\"shut\\n\")\n config_file_first.write('interface Ethernet' + str(intf) + '/' + str(port) + '\\n')\n config_file_first.write(\"speed 40000\\n\")\n config_file_first.write(\"no shut\\n\")\n empty_first_file=0\n return 1\n else:\n return 0", "def set_vlan_filter_mode(self, ip_host_num):\n return [\"vlan-filter-mode iphost %s tag-filter untag-filter discard\" % iphost_num]", "def set_port(self, port_name):\r\n global port\r\n port = port_name\r\n print(\"port set to: \" + port)\r\n self.port_menu()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the XOR mode, when choosing the l34 as the load balance policy.
def test_xor_l34_forward(self): bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.set_balance_policy_for_bonding_device(bond_port, "l34") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") slaves = {} slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [] self.verify_xor_tx(self.dut_ports[3], bond_port, "L34", False, **slaves) self.vlan_strip_and_filter('off', self.dut_ports[0], self.dut_ports[1], self.dut_ports[2], self.dut_ports[3], bond_port) self.verify_xor_tx(self.dut_ports[3], bond_port, "L34", True, **slaves)
[ "def verify_xor_tx(self, unbound_port, bond_port, policy, vlan_tag=False, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_customized_packet_to_unbound_port(unbound_port, bond_port, policy, vlan_tag=False, pkt_count=pkt_count, **slaves)\n\n hash_values = []\n hash_values = self.policy_and_slave_hash(policy, **slaves)\n\n order_ports = self.get_bond_active_slaves(bond_port)\n for slave in slaves['active']:\n slave_map_hash = self.slave_map_hash(slave, order_ports)\n self.verify(pkt_now[slave][0] == pkt_count * hash_values.count(slave_map_hash),\n \"XOR load balance transmit error on the link up port\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0,\n \"XOR load balance transmit error on the link down port\")", "def test_xor_tx(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n slaves = {}\n slaves['active'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = []\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)", "def test_xor(self):\n xor_thousand = self.leet ^ 1000\n self.assertEqual(xor_thousand, bitmath.Bit(1745))\n xor_five_hundred = self.leet ^ 500\n self.assertEqual(xor_five_hundred, bitmath.Bit(1229))", "def test_negated_xor(self):\n self.assert_to_cnf_transformation(\n 'not (A xor B)',\n '(not A or B) and (A or not B)')", "def test_simple_xor(self):\n self.assert_to_cnf_transformation(\n 'A xor B',\n '(not B or not A) and (A or B)')", "def test_xor_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[2], self.dut_ports[1])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def test_xor_gate(self):\n inputs = [[1.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]]\n output_vector = [[0.0],\n [1.0],\n [1.0],\n [0.0]]\n inputs = np.array(inputs, dtype='float32')\n output_vector = np.array(output_vector)\n net = NeuralNetwork(inputs, output_vector)\n net.train()\n output = net.feed(np.array([[0, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[1, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 1)\n output = net.feed(np.array([[0, 0]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)\n output = net.feed(np.array([[1, 1]], dtype='float32'))[0][0]\n output = round(output, 3)\n self.assertAlmostEqual(output, 0)", "def runTest(self):\n try:\n print(\"Lag l3 load balancing test based on protocol\")\n \n self.recv_dev_port_idxs = self.get_dev_port_indexes(self.servers[11][1].l3_lag_obj.member_port_indexs)\n max_itrs = 99\n rcv_count = [0, 0]\n for i in range(0, max_itrs):\n if i % 2 == 0:\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n else:\n print(\"icmp\")\n pkt = simple_icmp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[1][1].mac,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_icmp_packet(eth_dst=self.servers[11][1].l3_lag_obj.neighbor_mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[11][1].ipv4,\n ip_src=self.servers[1][1].ipv4,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[1].dev_port_index, pkt)\n rcv_idx, _ = verify_packet_any_port(\n self, exp_pkt, self.recv_dev_port_idxs)\n print('des_src={}, rcv_port={}'.format(\n self.servers[1][1].ipv4, rcv_idx))\n rcv_count[rcv_idx] += 1\n\n print(rcv_count)\n for i in range(0, 2):\n self.assertTrue(\n (rcv_count[i] >= ((max_itrs/2) * 0.8)), \"Not all paths are equally balanced\")\n finally:\n pass", "def test_problem11(self):\n blocklen = 16\n for i in range(100):\n guess, real = cryptanalysis.encryption_detection_oracle_ecb_cbc(ciphers.black_box1, blocklen, True)\n self.assertEqual(real, guess)", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_NPNAndALPNNoOverlap(self):\n clientProtocols = [b'h2', b'http/1.1']\n serverProtocols = [b'spdy/3']\n negotiatedProtocol, lostReason = negotiateProtocol(\n serverProtocols=clientProtocols,\n clientProtocols=serverProtocols,\n )\n self.assertIsNone(negotiatedProtocol)\n self.assertEqual(lostReason.type, SSL.Error)", "def verify_packet(self, packet, context):\n pass", "def test_from_bytes_cliff():\n data = bytes([0b00000001])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is True", "def test_06_allow_forbid_negative_recharges(self):\n\tprint \"...starting test 2.09\"\n\tself.testHandler.handle_maxwell_request(\"voltage:0\")\n\tself.testHandler.handle_maxwell_request(\"phase_load:10\")\n\ttime.sleep(5)\n\tpower=float((self.testHandler.handle_network_request(\"get_active_power\", validity_level=\"medium\")).split(\" \")[0])\n print \"active power = \",power,\" watts\"\n\tself.testHandler.handle_network_request(\"set_credit_limit:-100\")\n\t#test for allow or forbid negative recharges\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"allowing negative credits\"\n\tself.testHandler.handle_network_request(\"allow_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\ttime.sleep(2)\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,-20)\n\tprint\"test for forbid negative recharge strarted\"\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"forbiding negative credits and recharging with 100\"\n\tself.testHandler.handle_network_request(\"forbid_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert(a!=-20.0)\n\tassert(a==100.0)\n\tprint'it should not accept the recharge and then print credits'\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a", "def test_promiscuous_pass(self):\n if _debug: TestVLAN._debug(\"test_promiscuous_pass\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # reach into the network and enable promiscuous mode\n tnet.vlan.nodes[2].promiscuous = True\n\n # make a PDU from node 1 to node 2\n pdu = PDU(b'data', source=1, destination=2)\n\n # node 1 sends the pdu to node 2, node 3 also gets a copy\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduDestination=2).success()\n\n # run the group\n tnet.run()", "async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)\n\n # lets the transaction queue processing run before ending the test\n await asyncio.sleep(1)", "def test_length_not_match_error(self, n_status, n_wires):\n with pytest.raises(\n ValueError,\n match=\"Wires length and flipping state length does not match, they must be equal length \",\n ):\n qml.FlipSign(n_status, wires=n_wires)", "def test_from_bytes_no_cliff():\n data = bytes([0b00000000])\n packet = Packet12.from_bytes(data)\n assert packet is not None\n assert type(packet) == Packet12\n assert packet.cliff_right is False", "def test_legacy_vpn_l2tp_ipsec_rsa_strongswan(self):\n vpn = VPN_TYPE.L2TP_IPSEC_RSA\n vpn_profile = self.generate_legacy_vpn_profile(\n vpn, self.vpn_server_addresses[vpn.name][0],\n self.ipsec_server_type[0])\n self.legacy_vpn_connection_test_logic(vpn_profile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the broadcast mode.
def verify_broadcast_tx(self, unbound_port, bond_port, **slaves): pkt_count = 100 pkt_now = {} pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves) for slave in slaves['active']: self.verify(pkt_now[slave][0] == pkt_count, "Slave TX packet not correct in mode 3") for slave in slaves['inactive']: self.verify(pkt_now[slave][0] == 0, "Slave TX packet not correct in mode 3") self.verify(pkt_now[unbound_port][0] == pkt_count, "Unbonded port RX packet not correct in mode 3") self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']), "Bonded device TX packet not correct in mode 3")
[ "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # make a broadcast PDU\n pdu = PDU(b'data', source=1, destination=0)\n if _debug: TestVLAN._debug(\" - pdu: %r\", pdu)\n\n # node 1 sends the pdu, node 2 and 3 each get it\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduSource=1).success()\n\n # run the group\n tnet.run()", "def test_broadcast_call():\n print('\\n', \"testing broadcast call\")\n call.nspv_logout()\n call.nspv_login(wif_real)\n rpc_call = call.nspv_spend(addr_send, 0.1)\n rep = call.type_convert(rpc_call)\n hex_res = rep.get(\"hex\")\n hex = [False, \"norealhexhere\", hex_res]\n retcode_failed = [-1, -2, -3]\n\n # Cae 1 - No hex given\n rpc_call = call.nspv_broadcast(hex[0])\n call.assert_error(rpc_call)\n\n # Case 2 - Non-valid hex, failed broadcast should contain appropriate retcode\n rpc_call = call.nspv_broadcast(hex[1])\n call.assert_in(rpc_call, \"retcode\", retcode_failed)\n\n # Case 3 - Hex of previous transaction\n rpc_call = call.nspv_broadcast(hex[2])\n call.assert_success(rpc_call)\n rep = call.type_convert(rpc_call)\n broadcast_res = rep.get(\"broadcast\")\n expected = rep.get(\"expected\")\n if broadcast_res == expected:\n pass\n else:\n raise AssertionError(\"Aseert equal braodcast: \", broadcast_res, expected)", "def _is_broadcast(self):\n pass", "def test_ipv6network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv6Network('1:2:3:4::/120')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv6Network('1:2:3:4::/120')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, net)", "def test_ipv4network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv4Network('1.2.3.0/24')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv4Network('1.2.3.0/24')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_4n.report(fn_name(), n, results, net)", "def send_broadcast_packet(self, broadcast_packet):\n print(\"Send broadcast message: \" + str(broadcast_packet.get_buf()))\n message = broadcast_packet.get_buf()\n self.stream.broadcast_to_none_registers(message, self.stream.get_server_address())", "def test_unicast_block_substituation(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = False\n\n def sideEffect(name, leg):\n return name\n self.UUT.interface.getActiveParameter.side_effect = sideEffect\n actual = self.UUT.generateUnicastBlock()\n expected = (\n \"\\nm=video {} RTP/AVP 103\\nc=IN IP4 {}\" + RTP_POST_ADDR\n ).format('destination_port', 'destination_ip')\n self.assertEqual(expected, actual)", "def _make_broadcast_socket(self):\n self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)", "def test_unicast_block_ignores_when_multicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = True\n expected = \"\"\n actual = self.UUT.generateUnicastBlock()\n self.assertEqual(expected, actual)", "def broadcast(self, message):\r\n print \"OK Broadcast!\", message", "def test_multicast_block_ignores_when_unicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = False\n expected = \"\"\n actual = self.UUT.generateMulticastBlock()\n self.assertEqual(expected, actual)", "def valid(packet):\n if is_udp(packet) and is_without_data(packet):\n return True\n return False", "def _send_magic_packets(task, dest_host, dest_port):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n with contextlib.closing(s) as sock:\n for port in task.ports:\n address = port.address.replace(':', '')\n\n # TODO(lucasagomes): Implement sending the magic packets with\n # SecureON password feature. If your NIC is capable of, you can\n # set the password of your SecureON using the ethtool utility.\n data = 'FFFFFFFFFFFF' + (address * 16)\n packet = bytearray.fromhex(data)\n\n try:\n sock.sendto(packet, (dest_host, dest_port))\n except socket.error as e:\n msg = (_(\"Failed to send Wake-On-Lan magic packets to \"\n \"node %(node)s port %(port)s. Error: %(error)s\") %\n {'node': task.node.uuid, 'port': port.address,\n 'error': e})\n LOG.exception(msg)\n raise exception.WolOperationError(msg)\n\n # let's not flood the network with broadcast packets\n time.sleep(0.5)", "async def test_protocol_factory_udp_broadcast():\n test_url1: str = \"udp+broadcast://localhost\"\n config: dict = {\"COT_URL\": test_url1}\n reader, writer = await pytak.protocol_factory(config)\n assert isinstance(reader, pytak.asyncio_dgram.aio.DatagramServer)\n assert isinstance(writer, pytak.asyncio_dgram.aio.DatagramClient)", "def is_unicast(self):\n return (self.integer & 1 << 40) == 0", "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def broadcast():\n message = request.form['broadcast_form_message']\n return sockets.broadcast(message)", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_bogon_arp_for_controller(self):\n replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": mac.BROADCAST_STR,\n \"arp_code\": arp.ARP_REQUEST,\n \"arp_source_ip\": \"8.8.8.8\",\n \"arp_target_ip\": \"10.0.0.254\",\n },\n )[self.DP_ID]\n # Must be no ARP reply to an ARP request not in our subnet.\n self.assertFalse(ValveTestBases.packet_outs_from_flows(replies))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the broadcast mode, when bringing any one slave of the bonding device link down.
def test_broadcast_tx_one_slave_down(self): bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") try: slaves = {} slaves['active'] = [self.dut_ports[1], self.dut_ports[2]] slaves['inactive'] = [self.dut_ports[0]] self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up")
[ "def test_broadcast_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # make a broadcast PDU\n pdu = PDU(b'data', source=1, destination=0)\n if _debug: TestVLAN._debug(\" - pdu: %r\", pdu)\n\n # node 1 sends the pdu, node 2 and 3 each get it\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduSource=1).success()\n\n # run the group\n tnet.run()", "def test_broadcast_call():\n print('\\n', \"testing broadcast call\")\n call.nspv_logout()\n call.nspv_login(wif_real)\n rpc_call = call.nspv_spend(addr_send, 0.1)\n rep = call.type_convert(rpc_call)\n hex_res = rep.get(\"hex\")\n hex = [False, \"norealhexhere\", hex_res]\n retcode_failed = [-1, -2, -3]\n\n # Cae 1 - No hex given\n rpc_call = call.nspv_broadcast(hex[0])\n call.assert_error(rpc_call)\n\n # Case 2 - Non-valid hex, failed broadcast should contain appropriate retcode\n rpc_call = call.nspv_broadcast(hex[1])\n call.assert_in(rpc_call, \"retcode\", retcode_failed)\n\n # Case 3 - Hex of previous transaction\n rpc_call = call.nspv_broadcast(hex[2])\n call.assert_success(rpc_call)\n rep = call.type_convert(rpc_call)\n broadcast_res = rep.get(\"broadcast\")\n expected = rep.get(\"expected\")\n if broadcast_res == expected:\n pass\n else:\n raise AssertionError(\"Aseert equal braodcast: \", broadcast_res, expected)", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def _is_broadcast(self):\n pass", "def exit_func():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.sendto(\"exit\", (\"<broadcast>\", IPORT))\n sock.close()", "def test_ipv6network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv6Network('1:2:3:4::/120')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv6Network('1:2:3:4::/120')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, net)", "def test_unicast_block_ignores_when_multicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = True\n expected = \"\"\n actual = self.UUT.generateUnicastBlock()\n self.assertEqual(expected, actual)", "def test_ipv4network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv4Network('1.2.3.0/24')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv4Network('1.2.3.0/24')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_4n.report(fn_name(), n, results, net)", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def test_multicast_block_ignores_when_unicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = False\n expected = \"\"\n actual = self.UUT.generateMulticastBlock()\n self.assertEqual(expected, actual)", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "async def test_protocol_factory_udp_broadcast():\n test_url1: str = \"udp+broadcast://localhost\"\n config: dict = {\"COT_URL\": test_url1}\n reader, writer = await pytak.protocol_factory(config)\n assert isinstance(reader, pytak.asyncio_dgram.aio.DatagramServer)\n assert isinstance(writer, pytak.asyncio_dgram.aio.DatagramClient)", "def test_ping_unknown_neighbor(self):\n echo_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"vid\": 0x100,\n \"ipv4_src\": \"10.0.0.1\",\n \"ipv4_dst\": \"10.0.0.99\",\n \"echo_request_data\": self.ICMP_PAYLOAD,\n },\n )[self.DP_ID]\n self.assertTrue(echo_replies)\n out_pkts = ValveTestBases.packet_outs_from_flows(echo_replies)\n self.assertTrue(out_pkts)\n for out_pkt in out_pkts:\n pkt = packet.Packet(out_pkt.data)\n exp_pkt = {\n \"arp_source_ip\": \"10.0.0.254\",\n \"arp_target_ip\": \"10.0.0.99\",\n \"opcode\": 1,\n \"eth_src\": FAUCET_MAC,\n \"eth_dst\": self.BROADCAST_MAC,\n }\n self.verify_pkt(pkt, exp_pkt)", "def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)", "def test_bogon_arp_for_controller(self):\n replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": mac.BROADCAST_STR,\n \"arp_code\": arp.ARP_REQUEST,\n \"arp_source_ip\": \"8.8.8.8\",\n \"arp_target_ip\": \"10.0.0.254\",\n },\n )[self.DP_ID]\n # Must be no ARP reply to an ARP request not in our subnet.\n self.assertFalse(ValveTestBases.packet_outs_from_flows(replies))", "def test_multicast_mroute_ok(self):\n self.setup_lxc12()\n self.lxc12.cmd_multicast_send(group='239.1.1.1', sport=10000, dport=5000, message='hello')\n result = self.fgt.process(line=\"FGT-B1-1:1 check [mroute] multicast vdom=multicast mroute\\n\")\n self.assertFalse(result)", "def _make_broadcast_socket(self):\n self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that transmitting packets correctly in the broadcast mode, when bringing all slaves of the bonding device link down.
def test_broadcast_tx_all_slaves_down(self): bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0) self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]) self.dut.send_expect("set portlist %d,%d" % (self.dut_ports[3], bond_port), "testpmd> ") self.start_all_ports() self.dut.send_expect("start", "testpmd> ") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "down") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "down") try: slaves = {} slaves['active'] = [] slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]] self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves) finally: self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), "up") self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), "up")
[ "def test_broadcast_tx_one_slave_down(self):\n bond_port = self.create_bonded_device(MODE_BROADCAST, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = [self.dut_ports[1], self.dut_ports[2]]\n slaves['inactive'] = [self.dut_ports[0]]\n\n self.verify_broadcast_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")", "def verify_broadcast_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 100\n pkt_now = {}\n\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count, \"Slave TX packet not correct in mode 3\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Slave TX packet not correct in mode 3\")\n self.verify(pkt_now[unbound_port][0] == pkt_count, \"Unbonded port RX packet not correct in mode 3\")\n self.verify(pkt_now[bond_port][0] == pkt_count * len(slaves['active']),\n \"Bonded device TX packet not correct in mode 3\")", "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # make a broadcast PDU\n pdu = PDU(b'data', source=1, destination=0)\n if _debug: TestVLAN._debug(\" - pdu: %r\", pdu)\n\n # node 1 sends the pdu, node 2 and 3 each get it\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduSource=1).success()\n\n # run the group\n tnet.run()", "def test_broadcast_call():\n print('\\n', \"testing broadcast call\")\n call.nspv_logout()\n call.nspv_login(wif_real)\n rpc_call = call.nspv_spend(addr_send, 0.1)\n rep = call.type_convert(rpc_call)\n hex_res = rep.get(\"hex\")\n hex = [False, \"norealhexhere\", hex_res]\n retcode_failed = [-1, -2, -3]\n\n # Cae 1 - No hex given\n rpc_call = call.nspv_broadcast(hex[0])\n call.assert_error(rpc_call)\n\n # Case 2 - Non-valid hex, failed broadcast should contain appropriate retcode\n rpc_call = call.nspv_broadcast(hex[1])\n call.assert_in(rpc_call, \"retcode\", retcode_failed)\n\n # Case 3 - Hex of previous transaction\n rpc_call = call.nspv_broadcast(hex[2])\n call.assert_success(rpc_call)\n rep = call.type_convert(rpc_call)\n broadcast_res = rep.get(\"broadcast\")\n expected = rep.get(\"expected\")\n if broadcast_res == expected:\n pass\n else:\n raise AssertionError(\"Aseert equal braodcast: \", broadcast_res, expected)", "def verify_bound_mac_opt(self, mode_set):\n mac_address_0_orig = self.get_port_mac(self.dut_ports[0])\n mac_address_1_orig = self.get_port_mac(self.dut_ports[1])\n mac_address_2_orig = self.get_port_mac(self.dut_ports[2])\n mac_address_3_orig = self.get_port_mac(self.dut_ports[3])\n\n bond_port = self.create_bonded_device(mode_set, SOCKET_1)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[1])\n\n mac_address_bond_orig = self.get_port_mac(bond_port)\n self.verify(mac_address_1_orig == mac_address_bond_orig,\n \"Bonded device MAC address not same with first slave MAC\")\n\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now == mac_address_2_now,\n \"NOT all slaves MAC address same with bonding device in mode %d\" % mode_set)\n else:\n self.verify(mac_address_1_orig == mac_address_bond_now and\n mac_address_bond_now != mac_address_2_now,\n \"All slaves should not be the same in mode %d\"\n % mode_set)\n\n new_mac = \"00:11:22:00:33:44\"\n self.set_mac_for_bonding_device(bond_port, new_mac)\n self.start_port(bond_port)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n if mode_set in [MODE_ROUND_ROBIN, MODE_XOR_BALANCE, MODE_BROADCAST]:\n self.verify(mac_address_1_now == mac_address_2_now == mac_address_bond_now == new_mac,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set == MODE_LACP:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now != new_mac and\n mac_address_2_now != new_mac and\n mac_address_1_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n elif mode_set in [MODE_ACTIVE_BACKUP, MODE_TLB_BALANCE]:\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == new_mac and\n mac_address_bond_now != mac_address_2_now,\n \"Set mac failed for bonding device in mode %d\" % mode_set)\n\n self.set_primary_for_bonding_device(bond_port, self.dut_ports[2], False)\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac,\n \"Slave MAC changed when set primary slave\")\n\n mac_address_1_orig = mac_address_1_now\n self.remove_slave_from_bonding_device(bond_port, False, self.dut_ports[2])\n mac_address_2_now = self.get_port_mac(self.dut_ports[2])\n self.verify(mac_address_2_now == mac_address_2_orig,\n \"MAC not back to original after removing the port\")\n\n mac_address_1_now = self.get_port_mac(self.dut_ports[1])\n mac_address_bond_now = self.get_port_mac(bond_port)\n self.verify(mac_address_bond_now == new_mac and\n mac_address_1_now == mac_address_1_orig,\n \"Bonding device or slave MAC changed after removing the primary slave\")\n\n self.remove_all_slaves(bond_port)\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def _is_broadcast(self):\n pass", "def test_ipv6network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv6Network('1:2:3:4::/120')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv6Network('1:2:3:4::/120')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, net)", "def exit_func():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.sendto(\"exit\", (\"<broadcast>\", IPORT))\n sock.close()", "def testUnsubscribeAll(self):\r\n nVis = NAOVision(IP, PORT)\r\n nVis._subscribeToVideoProxy(0)\r\n nVis._unsubscribeAll()\r\n\r\n #Testing for bottom client\r\n boolB0 = \"_clientBottom_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Testing for top client\r\n boolT0 = \"_clientTop_0\" in self._videoProxy.getSubscribers()\r\n\r\n #Making sure that none of the two modules exist\r\n boolAll = boolB0 and boolT0\r\n\r\n #boolAll should return false if both modules\r\n #don't exist in the subscribers list\r\n self.assertEqual(boolAll, False)", "def test_ipv4network_broadcast_address(self):\n for n in (10**0, 10**6):\n net = ip.IPv4Network('1.2.3.0/24')\n time1, result1 = timefn(n, lambda: net.broadcast_address)\n enet = eip.IPv4Network('1.2.3.0/24')\n time2, result2 = timefn(n, lambda: enet.broadcast_address)\n results = (time1, result1), (time2, result2)\n self.report_4n.report(fn_name(), n, results, net)", "def test_xor_tx_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_XOR_BALANCE, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n\n self.verify_xor_tx(self.dut_ports[3], bond_port, \"L2\", False, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_round_robin_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ROUND_ROBIN, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_round_robin_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_round_robin_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_unicast_block_ignores_when_multicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = True\n expected = \"\"\n actual = self.UUT.generateUnicastBlock()\n self.assertEqual(expected, actual)", "def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()", "def test_active_backup_all_slaves_down(self):\n bond_port = self.create_bonded_device(MODE_ACTIVE_BACKUP, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port, False, self.dut_ports[0], self.dut_ports[1], self.dut_ports[2])\n self.dut.send_expect(\"set portlist %d,%d\" % (self.dut_ports[3], bond_port), \"testpmd> \")\n self.start_all_ports()\n self.dut.send_expect(\"start\", \"testpmd> \")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"down\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"down\")\n\n try:\n slaves = {}\n slaves['active'] = []\n slaves['inactive'] = [self.dut_ports[0], self.dut_ports[1], self.dut_ports[2]]\n self.verify_active_backup_rx(self.dut_ports[3], bond_port, **slaves)\n self.verify_active_backup_tx(self.dut_ports[3], bond_port, **slaves)\n finally:\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[0]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[1]), \"up\")\n self.admin_tester_port(self.tester.get_local_port(self.dut_ports[2]), \"up\")", "def test_known_eth_dst_deletion(self):\n self.rcv_packet(\n 2,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": self.UNKNOWN_MAC,\n \"ipv4_src\": \"10.0.0.2\",\n \"ipv4_dst\": \"10.0.0.3\",\n },\n )\n match = {\"in_port\": 3, \"vlan_vid\": self.V100, \"eth_dst\": self.P1_V100_MAC}\n self.assertTrue(\n self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100),\n msg=\"Packet not output correctly after mac is learnt on new port\",\n )\n self.assertFalse(\n self.network.tables[self.DP_ID].is_output(match, port=1),\n msg=\"Packet output on old port after mac is learnt on new port\",\n )", "def verify_round_robin_tx(self, unbound_port, bond_port, **slaves):\n pkt_count = 300\n pkt_now = {}\n pkt_now, summary = self.send_default_packet_to_unbound_port(unbound_port, bond_port, pkt_count=pkt_count, **slaves)\n\n if slaves['active'].__len__() == 0:\n self.verify(pkt_now[bond_port][0] == 0, \"Bonding port should not have TX pkt in mode 0 when all slaves down\")\n else:\n self.verify(pkt_now[bond_port][0] == pkt_count, \"Bonding port has error TX pkt count in mode 0\")\n for slave in slaves['active']:\n self.verify(pkt_now[slave][0] == pkt_count / slaves['active'].__len__(), \"Active slave has error TX pkt count in mode 0\")\n for slave in slaves['inactive']:\n self.verify(pkt_now[slave][0] == 0, \"Inactive slave has error TX pkt count in mode 0\")", "def test_ping_unknown_neighbor(self):\n echo_replies = self.rcv_packet(\n 1,\n 0x100,\n {\n \"eth_src\": self.P1_V100_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"vid\": 0x100,\n \"ipv4_src\": \"10.0.0.1\",\n \"ipv4_dst\": \"10.0.0.99\",\n \"echo_request_data\": self.ICMP_PAYLOAD,\n },\n )[self.DP_ID]\n self.assertTrue(echo_replies)\n out_pkts = ValveTestBases.packet_outs_from_flows(echo_replies)\n self.assertTrue(out_pkts)\n for out_pkt in out_pkts:\n pkt = packet.Packet(out_pkt.data)\n exp_pkt = {\n \"arp_source_ip\": \"10.0.0.254\",\n \"arp_target_ip\": \"10.0.0.99\",\n \"opcode\": 1,\n \"eth_src\": FAUCET_MAC,\n \"eth_dst\": self.BROADCAST_MAC,\n }\n self.verify_pkt(pkt, exp_pkt)", "def test_multicast_block_ignores_when_unicast(self):\n self.UUT.checkMulticast = MagicMock()\n self.UUT.checkMulticast.return_value = False\n expected = \"\"\n actual = self.UUT.generateMulticastBlock()\n self.assertEqual(expected, actual)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert Python PODs to XML Takes in a Python POD (dictionary, list or scalar) and returns its XML representation as a string. The return value always needs to be wrapped in an enclosing element.
def to_xmls (foo, indent = 1): if type(foo) == type({}): return __print_dict(foo, indent) elif type(foo) == type([]) or type(foo) == type(()): return __print_list(foo, indent) else: return __print_scalar(foo, indent)
[ "def generateXml(obj):\r\n if isinstance(obj, dict) or isinstance(obj,DictMixin):\r\n return getXML_dict(obj, \"item\")\r\n elif isinstance(obj,collections.Iterable):\r\n return \"<list>%s</list>\" % getXML(obj, \"item\")\r\n else:\r\n raise RuntimeError(\"Unable to convert to XML: %s\" % obj)", "def d2xml(d):\n def _d2xml(d, p):\n for k,v in d.items():\n if isinstance(v,dict):\n node = etree.SubElement(p, k)\n _d2xml(v, node)\n elif isinstance(v,list):\n for item in v:\n node = etree.SubElement(p, k)\n _d2xml(item, node)\n elif k == \"__text__\":\n p.text = v\n elif k == \"__tail__\":\n p.tail = v\n else:\n p.set(k, v)\n\n k,v = d.items()[0]\n node = etree.Element(k)\n _d2xml(v, node)\n return node", "def d2xml(d):\n def _d2xml(d, p):\n for k,v in d.items():\n if isinstance(v,dict):\n node = etree.SubElement(p, k)\n _d2xml(v, node)\n elif isinstance(v,list):\n for item in v:\n node = etree.SubElement(p, k)\n _d2xml(item, node)\n elif k == \"__text__\":\n p.text = v\n elif k == \"__tail__\":\n p.tail = v\n else:\n p.set(k, v)\n\n key = list(d.keys())[0]\n root = etree.Element(key)\n _d2xml(d[key], root)\n return root", "def serialize_to_xml_str(obj_pyxb, pretty=True, strip_prolog=False, xslt_url=None):\n return serialize_gen(obj_pyxb, None, pretty, strip_prolog, xslt_url)", "def getXML(obj, objname=None):\r\n if obj == None:\r\n return \"\"\r\n if not objname:\r\n objname = \"item\"\r\n adapt={\r\n dict: getXML_dict,\r\n list: getXML_list,\r\n tuple: getXML_list,\r\n }\r\n if adapt.has_key(obj.__class__):\r\n return adapt[obj.__class__](obj, objname)\r\n else:\r\n return \"<%(n)s>%(o)s</%(n)s>\"%{'n':objname,'o':str(obj)}", "def dict_to_xml(tag, d):\r\n elem = Element(tag)\r\n for key, val in d.items():\r\n child = Element(key)\r\n child.text = str(val)\r\n elem.append(child)\r\n return elem", "def jsonp2xml(json):\n ret = \"\"\n content = None\n for c in [str, int, unicode]:\n if isinstance(json, c):\n return str(json)\n if not isinstance(json, dict):\n raise Exception(\"class type: %s\" % json)\n\n # every tag is a dict.\n # its value can be a string, a list or a dict\n for tag in json.keys():\n tag_list = json[tag]\n\n # if tag_list is a list, then it represent a list of elements\n # ex. {index: [{ 'a':'1'} , {'a':'2'} ] }\n # --> <index a=\"1\" /> <index b=\"2\" />\n if isinstance(tag_list, list):\n for t in tag_list:\n # for every element, get the attributes\n # and embed them in the tag named\n attributes = \"\"\n content = \"\"\n if not isinstance(t, dict):\n ret += \"%s\" % t\n else:\n for (attr, value) in t.iteritems():\n # only serializable values are attributes\n if value.__class__.__name__ in 'str':\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes,\n attr,\n cgi.escape(\n stringutils.to_unicode(value), quote=None)\n )\n elif value.__class__.__name__ in ['int', 'unicode', 'bool', 'long']:\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes, attr, value)\n # other values are content\n elif isinstance(value, dict):\n content += ResponseHelper.jsonp2xml(value)\n elif isinstance(value, list):\n content += ResponseHelper.jsonp2xml(\n {attr: value})\n if content:\n ret += \"<%s%s>%s</%s>\" % (\n tag, attributes, content, tag)\n else:\n ret += \"<%s%s/>\" % (tag, attributes)\n elif isinstance(tag_list, dict):\n attributes = \"\"\n content = \"\"\n\n for (attr, value) in tag_list.iteritems():\n # only string values are attributes\n if not isinstance(value, dict) and not isinstance(value, list):\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes, attr, value)\n else:\n content += ResponseHelper.jsonp2xml({attr: value})\n if content:\n ret += \"<%s%s>%s</%s>\" % (tag, attributes, content, tag)\n else:\n ret += \"<%s%s/>\" % (tag, attributes)\n\n # Log the source and destination of the response\n ResponseHelper.log.debug(\"ret object is %s\" % ret.__class__)\n if dump_response:\n ResponseHelper.log.debug(\n \"\\n\\njsonp2xml: %s\\n--->\\n%s \\n\\n\" % (json, ret))\n\n return ret.replace(\"isDir=\\\"True\\\"\", \"isDir=\\\"true\\\"\")", "def json2xml(json_obj: Dict[str, str]) -> str:\n result_list = []\n\n json_obj_type = type(json_obj)\n\n if json_obj_type is dict:\n count = 0\n for tag_name in json_obj:\n sub_obj = json_obj[tag_name]\n result_list.append(\"<entry lxnm:entryID='%s' xmlns:lxnm='http://www.lexonomy.eu/'>\" % (count))\n result_list.append(\"<headword xml:space='preserve'>%s</headword>\" % (tag_name))\n result_list.append('<sense>')\n result_list.append(\"<translation xml:space='preserve'>%s</translation>\" % (str(sub_obj)))\n result_list.append('</sense>')\n result_list.append('</entry>')\n count +=1\n return \"\".join(result_list)\n\n return \"%s%s\" % (json_obj)", "def serialize_gen(\n obj_pyxb, encoding=\"utf-8\", pretty=False, strip_prolog=False, xslt_url=None\n):\n assert d1_common.type_conversions.is_pyxb(obj_pyxb)\n assert encoding in (None, \"utf-8\", \"UTF-8\")\n try:\n obj_dom = obj_pyxb.toDOM()\n except pyxb.ValidationError as e:\n raise ValueError(\n 'Unable to serialize PyXB to XML. error=\"{}\"'.format(e.details())\n )\n except pyxb.PyXBException as e:\n raise ValueError('Unable to serialize PyXB to XML. error=\"{}\"'.format(str(e)))\n\n if xslt_url:\n xslt_processing_instruction = obj_dom.createProcessingInstruction(\n \"xml-stylesheet\", 'type=\"text/xsl\" href=\"{}\"'.format(xslt_url)\n )\n root = obj_dom.firstChild\n obj_dom.insertBefore(xslt_processing_instruction, root)\n\n if pretty:\n xml_str = obj_dom.toprettyxml(indent=\" \", encoding=encoding)\n # Remove empty lines in the result caused by a bug in toprettyxml()\n if encoding is None:\n xml_str = re.sub(r\"^\\s*$\\n\", r\"\", xml_str, flags=re.MULTILINE)\n else:\n xml_str = re.sub(b\"^\\s*$\\n\", b\"\", xml_str, flags=re.MULTILINE)\n else:\n xml_str = obj_dom.toxml(encoding)\n if strip_prolog:\n if encoding is None:\n xml_str = re.sub(r\"^<\\?(.*)\\?>\", r\"\", xml_str)\n else:\n xml_str = re.sub(b\"^<\\?(.*)\\?>\", b\"\", xml_str)\n\n return xml_str.strip()", "def ConvertDictToXml(xmldict):\r\n\r\n roottag = list(xmldict)[0]\r\n root = ElementTree.Element(roottag)\r\n _ConvertDictToXmlRecurse(root, xmldict[roottag])\r\n\r\n return ElementTree.tostring(root)", "def map_to_xml(mapping, root=None, command=None):\n envelope = None\n\n if root is None:\n envelope, root = get_envelope(command)\n\n for tag, value in mapping:\n tag = ElementTree.Element(tag)\n\n if type(value) == tuple:\n # Allow for nesting.\n value = map_to_xml(value, tag)\n elif type(value) == list:\n # This conditional lets us expand lists into multiple elements with\n # the same name:\n #\n # ((\"test\", ((\"test_child\", [1, 2, 3]),)),)\n #\n # will be serialized as:\n #\n # <test>\n # <test_child>1</test_child>\n # <test_child>2</test_child>\n # <test_child>3</test_child>\n # </test>\n value_list = tuple((tag.tag, value) for value in value)\n value = map_to_xml(value_list, root)\n continue\n elif type(value) == dict:\n # This conditional expands dicts into name/value pairs, as required\n # by some Silverpop method:\n #\n # ((\"COLUMN\", {\"a\": 1}),)\n #\n # will be serialized as:\n #\n # <COLUMN>\n # <NAME>a</NAME>\n # <VALUE>1</VALUE>\n # </COLUMN>\n value_list = ()\n for column_name, column_value in six.iteritems(value):\n value_list += (((tag.tag), ((\"NAME\", column_name), (\"VALUE\", column_value))),)\n\n value = map_to_xml(value_list, root)\n continue\n\n elif not type(value) == bool:\n # If the value isn't True/False, we can set the node's text value.\n # If the value is True, the tag will still be appended but will be\n # self-closing.\n tag.text = u\"%s\" % (value)\n\n if value:\n root.append(tag)\n\n if envelope is not None:\n root = envelope\n return ElementTree.tostring(root)", "def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)", "def convert_dict_to_xml(xmldict):\n\n roottag = xmldict.keys()[0]\n root = ETree.Element(roottag)\n _convert_dict_to_xml_recurse(root, xmldict[roottag])\n return root", "def obj_to_xml(obj):\n # TODO convert object to xml without default namespace gracefully.\n try:\n\n xml = obj.toxml('utf-8')\n except pyxb.ValidationError as e:\n raise ChargebackError(e.details())\n xml = xml.replace(b'ns1:', b'')\n xml = xml.replace(b':ns1', b'')\n return xml", "def wrap(x):\n\n if isinstance(x, dict):\n return XmlDictObject(\n (k, XmlDictObject.Wrap(v)) for (k, v) in iter(x.items()))\n elif isinstance(x, list):\n return [XmlDictObject.Wrap(v) for v in x]\n else:\n return x", "def _get_xml_value(value):\n retval = []\n if isinstance(value, dict):\n for key, value in value.items():\n retval.append('<' + xml_escape(text_type(key)) + '>')\n retval.append(_get_xml_value(value))\n retval.append('</' + xml_escape(text_type(key)) + '>')\n elif isinstance(value, list):\n for key, value in enumerate(value):\n retval.append('<child order=\"' + xml_escape(text_type(key)) + '\">')\n retval.append(_get_xml_value(value))\n retval.append('</child>')\n elif isinstance(value, bool):\n retval.append(xml_escape(text_type(value).lower()))\n elif isinstance(value, binary_type):\n retval.append(xml_escape(value.encode('utf-8')))\n elif isinstance(value, text_type):\n retval.append(xml_escape(value))\n else:\n retval.append(xml_escape(text_type(value)))\n return \"\".join(retval)", "def wrap_arguments(args=None):\r\n if args is None:\r\n args = []\r\n\r\n tags = []\r\n for name, value in args:\r\n tag = \"<{name}>{value}</{name}>\".format(\r\n name=name, value=escape(\"%s\" % value, {'\"': \"&quot;\"}))\r\n # % converts to unicode because we are using unicode literals.\r\n # Avoids use of 'unicode' function which does not exist in python 3\r\n tags.append(tag)\r\n\r\n xml = \"\".join(tags)\r\n return xml", "def get_ep_properties_as_xml(uc, object_id, ver='2.3', c=False, **kwargs):\r\n obj_props = get_ep_object_properties(uc, object_id, ver, c, **kwargs)\r\n xml_element = Element(obj_props.descriptive_object_type_id())\r\n for (key, val) in obj_props.as_dict().items():\r\n if c:\r\n if key == 'Comments':\r\n comment_section = Element('Comments')\r\n for comment in val:\r\n comment_element = dict_to_xml('Comment', comment)\r\n comment_section.append(comment_element)\r\n xml_element.append(comment_section)\r\n elif key == 'Tags':\r\n if val is not None:\r\n tag_section = Element('Tags')\r\n for tag in val:\r\n tag_element = dict_to_xml('Tag', tag)\r\n tag_section.append(tag_element)\r\n xml_element.append(tag_section)\r\n elif key == 'Permissions':\r\n perms_element = Element('Permissions')\r\n perms = []\r\n for permission_code in val:\r\n for (k, v) in OBJRIGHT_T.items():\r\n if v == permission_code:\r\n perms.append(k)\r\n perms_element.text = str(perms)\r\n xml_element.append(perms_element)\r\n else:\r\n child = Element(key)\r\n child.text = str(val)\r\n xml_element.append(child)\r\n return xml_element", "def _make_xml(self, tag, value, parent):\n if '@' == tag[:1] and isinstance(value, dict):\n tag = tag[1:]\n\n if parent is None:\n if self._root is None:\n el = ET.Element(tag, value)\n self._root = el\n else:\n el = self._root\n self._root = None\n\n else:\n el = parent if tag == parent.tag else parent.find(tag)\n if el is None:\n # Element first add\n el = ET.SubElement(parent, tag, value)\n else:\n # Save attributes\n el.attrib.update(value)\n\n return el\n\n stag = '#' + tag\n if stag in value:\n if isinstance(value[stag], dict):\n el = ET.Element(tag, value[stag])\n else:\n el = ET.Element(tag)\n\n del value[stag]\n\n else:\n if parent is None:\n if self._root is None:\n el = ET.Element(tag)\n self._root = el\n else:\n el = self._root\n self._root = None\n\n else:\n el = parent.find(tag)\n if el is None:\n # Element first add\n el = ET.SubElement(parent, tag)\n\n if isinstance(value, dict):\n self._parse_dict(value, el)\n else:\n el.text = value\n\n return el" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a noisy subgraph matching problem.
def smp_noisy(): adj0 = csr_matrix([[0, 0, 0], [1, 0, 0], [0, 0, 0]]) adj1 = csr_matrix([[0, 0, 0], [0, 0, 0], [0, 1, 0]]) nodelist = pd.DataFrame(['a', 'b', 'c'], columns=[Graph.node_col]) edgelist = pd.DataFrame([['b', 'a', 'c1'], ['c', 'b', 'c2']], columns=[Graph.source_col, Graph.target_col, Graph.channel_col]) tmplt = Graph([adj0, adj1], ['c1', 'c2'], nodelist, edgelist) adj2 = csr_matrix(np.zeros((3,3))) edgelist2 = pd.DataFrame([['b', 'a', 'c1']], columns=[Graph.source_col, Graph.target_col, Graph.channel_col]) world = Graph([adj0.copy(), adj2], ['c1', 'c2'], nodelist, edgelist2) smp = MatchingProblem(tmplt, world, global_cost_threshold=1, local_cost_threshold=1) return smp
[ "def test_subgraph(self):\n self.list_motif = []\n n1 = Nucleotid(0, 1, [4], [\"tHS\"])\n n2 = Nucleotid(1, 2, [3], [\"cWW\"])\n n3 = Nucleotid(1, 3, [4], [])\n self.list_motif.append(n1)\n self.list_motif.append(n2)\n self.list_motif.append(n3)\n # self.search_for_sub(self.list_motif, 0, [], 0)\n condidat = self.is_condidat(n1)\n print(\" size of motif: \" + str(len(self.list_motif)))\n print(\"condidat: \" + str(condidat))\n for c in condidat:\n res = self.sub_graph(\n c.index_chain, n1.index_chain, [])\n # if(len(res) == len(self.list_motif)):\n print(\n \"========================================================================================\")\n print(res)\n print(\n \"========================================================================================\")\n self.draw_list()\n\n # res = self.is_condidat(n2)\n # self.detect_all_motif()\n # print(self.subgraph)\n # print(self.list_nucleotide)", "def test_restricted_induced_subgraph_chains(self):\n hide_nodes = [3, 4, 5]\n hide_edges = [(6, 7)]\n RG = nx.restricted_view(self.G, hide_nodes, hide_edges)\n nodes = [4, 5, 6, 7, 8]\n SG = nx.induced_subgraph(RG, nodes)\n SSG = RG.subgraph(nodes)\n assert_is(RG._graph, self.G)\n assert_is(SSG._graph, self.G)\n assert_is(SG._graph, RG)\n assert_edges_equal(SG.edges, SSG.edges)\n # should be same as morphing the graph\n CG = self.G.copy()\n CG.remove_nodes_from(hide_nodes)\n CG.remove_edges_from(hide_edges)\n assert_edges_equal(CG.edges(nodes), SSG.edges)\n CG.remove_nodes_from([0, 1, 2, 3])\n assert_edges_equal(CG.edges, SSG.edges)\n # switch order: subgraph first, then restricted view\n SSSG = self.G.subgraph(nodes)\n RSG = nx.restricted_view(SSSG, hide_nodes, hide_edges)\n assert_is_not(RSG._graph, self.G)\n assert_edges_equal(RSG.edges, CG.edges)", "def subdMatchTopology(frontOfChain=bool):\n pass", "def test_densest_subgraph(self):\n graph_data = self.get_file(\"clique_10.csv\")\n schema = [('src', str), ('dst', str)]\n\n # Set up the frames for the graph, nodes is the union of the src and\n # dst\n # edges need to be both directions\n\n # set up the initial frame\n self.frame = self.context.frame.import_csv(graph_data, schema=schema)\n\n # reverse the edges\n self.frame2 = self.frame.copy()\n self.frame2.add_columns(\n lambda x: [x[\"dst\"], x[\"src\"]], [(\"src2\", str), (\"dst2\", str)])\n self.frame2.drop_columns([\"src\", \"dst\"])\n self.frame2.rename_columns({\"src2\": \"src\", \"dst2\": \"dst\"})\n\n # set up 2 frames to build the union frame for nodes\n self.vertices = self.frame.copy()\n self.vertices2 = self.frame.copy()\n\n # get the src and make it id's\n self.vertices.rename_columns({\"src\": \"id\"})\n self.vertices.drop_columns([\"dst\"])\n\n # get the dst and make it id's\n self.vertices2.rename_columns({\"dst\": \"id\"})\n self.vertices2.drop_columns([\"src\"])\n\n # append the src and dst (now called id)\n self.vertices.append(self.vertices2)\n\n # drop the duplicates\n self.vertices.drop_duplicates()\n self.vertices.sort(\"id\")\n\n self.frame.append(self.frame2)\n\n self.frame.add_columns(lambda x: 2, (\"value\", int))\n\n self.graph = self.context.graph.create(self.vertices, self.frame)\n\n subgraph = self.graph.densest_subgraph()\n\n self.assertAlmostEqual(subgraph.density, 9.0)\n\n subgraph_vertices = subgraph.sub_graph.create_vertices_frame()\n subgraph_vertices_pandas = list(\n subgraph_vertices.to_pandas(subgraph_vertices.count())[\"id\"])\n\n known_values = [u'k_10_2', u'k_10_3', u'k_10_4',\n u'k_10_10', u'k_10_5', u'k_10_6',\n u'k_10_7', u'k_10_8', u'k_10_9', u'k_10_1']\n\n self.assertItemsEqual(known_values, subgraph_vertices_pandas)", "def _patch_subgraph_hole(*, subgraph, graph, start, end):\n shortest_path = nx.algorithms.shortest_path(\n graph, start, end, weight=_DISTANCE_KEY\n )\n for node in shortest_path[1:-1]:\n subgraph.add_node(node)\n for edge in zip(shortest_path[:-1], shortest_path[1:]):\n subgraph.add_edge(*edge, **graph.edges[edge])", "def subgraph(self, nbunch):\n bunch =nbunch# self.nbunch_iter(nbunch)\n # create new graph and copy subgraph into it\n H = self.__class__()\n # copy node and attribute dictionaries\n for n in bunch:\n # print n\n H.nei[n] = Set([])\n for n_i in self.nei[n]:\n if n_i in bunch:\n if n_i not in H.rev_nei:\n H.rev_nei[n_i] = Set([])\n H.nei[n].add(n_i)\n H.rev_nei[n_i] = Set([])\n H.rev_nei[n_i].add(n)\n return H", "def find_subgraphs_matching_pattern(graph: nx.DiGraph, pattern_graph: GraphPattern) -> List[List[str]]:\n\n def are_nodes_matching(node_1, node_2):\n for attr in node_2:\n if attr == GraphPattern.LABEL_ATTR:\n continue\n if attr == GraphPattern.METATYPE_ATTR:\n # GraphPattern.ANY_PATTERN_NODE_TYPE and GraphPattern.NON_PATTERN_NODE_TYPE\n # are matched to any node type.\n\n if (\n GraphPattern.ANY_PATTERN_NODE_TYPE in node_2[attr]\n or GraphPattern.NON_PATTERN_NODE_TYPE in node_2[attr]\n ):\n continue\n # Torch and TF pattern mapping based on 'type' section,\n # While ONNX mapping based on metatypes -\n # to support all of them, we need to check the existane of the attributes\n if GraphPattern.NODE_TYPE_ATTR in node_1:\n if node_1[GraphPattern.NODE_TYPE_ATTR] in node_2[attr]:\n continue\n if node_1[attr] not in node_2[attr]:\n return False\n return True\n\n def are_edges_matching(edge_1, edge_2):\n for attr in edge_2:\n if edge_1[attr] not in edge_2[attr]:\n return False\n return True\n\n subgraphs = [] # type: List[List[str]]\n visited_nodes = set() # type: Set[str]\n patterns = [] # type: List[nx.DiGraph]\n for c in nx.weakly_connected_components(pattern_graph.graph):\n patterns.append(pattern_graph.graph.subgraph(c))\n\n def sort_patterns(pattern: nx.DiGraph):\n \"\"\"\n Sort patterns by their length,\n keeping in mind that if node type is GraphPattern.NON_PATTERN_NODE_TYPE it shouldn't count.\n \"\"\"\n pattern_len = len(pattern)\n for node in pattern.nodes:\n if GraphPattern.NON_PATTERN_NODE_TYPE in pattern_graph.graph.nodes.get(node)[GraphPattern.METATYPE_ATTR]:\n pattern_len -= 1\n return pattern_len\n\n # Get all patterns sorted by their lengths\n # as we want match the longest patterns first\n\n patterns = sorted(patterns, key=sort_patterns, reverse=True)\n\n for pattern in patterns:\n matcher = ism.DiGraphMatcher(graph, pattern, node_match=are_nodes_matching, edge_match=are_edges_matching)\n for subgraph in matcher.subgraph_isomorphisms_iter():\n # Bottleneck that need to sort by id for result consistency\n pattern_subgraph = list(\n nx.lexicographical_topological_sort(graph.subgraph(subgraph), key=lambda x: int(x.split()[0]))\n )\n\n full_subgraph_with_non_pattern_nodes = pattern_subgraph[:]\n outside_pattern_nodes = []\n\n # If some nodes are outside the pattern - remove them from pattern_subgraph\n\n for node, pattern_node_id in matcher.mapping.items():\n pattern_node = pattern_graph.graph.nodes[pattern_node_id]\n pattern_node_types = pattern_node.get(GraphPattern.METATYPE_ATTR)\n if GraphPattern.NON_PATTERN_NODE_TYPE in pattern_node_types:\n outside_pattern_nodes.append(node)\n for node in outside_pattern_nodes:\n pattern_subgraph.remove(node)\n\n is_visited_node = any(node in visited_nodes for node in pattern_subgraph)\n if is_visited_node:\n continue\n if is_subgraph_has_inner_outgoing_edges(graph, full_subgraph_with_non_pattern_nodes, pattern_subgraph):\n continue\n visited_nodes.update(pattern_subgraph)\n subgraphs.append(pattern_subgraph)\n\n return subgraphs if subgraphs else []", "def partition_girvan_newman(sub_graph):\n return a1.partition_girvan_newman(sub_graph, 3)", "def make_sub_graph(metadata, relevant_fields):\n g = metadata.graph\n sub_graph = DiGraph()\n copy = dict()\n vertices_to_keep = set(relevant_fields.keys())\n\n # Copy relevant vertices from g\n for u in vertices_to_keep: \n copy_u = Table.make_table_from_fields(u, relevant_fields[u])\n copy[u] = copy_u\n sub_graph.add_node(copy_u) # no data on nodes\n\n # Copy relevant arcs from g\n for u, v in g.edges():\n try:\n copy_u, copy_v = copy[u], copy[v]\n except:\n continue\n\n sub_graph.add_edge(copy_u, copy_v, deepcopy(g.edge[u][v]))\n Log.debug(\"Adding copy of : %s\" % metadata.print_arc(u, v))\n\n return sub_graph", "def _remove_duplicate_paths(subgraph):\n for *edge, _ in sorted(\n subgraph.edges(data=_WEIGHT_KEY), key=lambda e: -e[2]\n ):\n shortest_path = nx.algorithms.shortest_path(\n subgraph, *edge, weight=_WEIGHT_KEY\n )\n if len(shortest_path) > 2:\n subgraph.remove_edge(*edge)", "def subgraph_extract(g, v1, v2):\n thres_min = 4\n thres_max = 8\n rad = math.ceil(nx.shortest_path_length(g, source=v1, target=v2) / 2)\n rad = max(rad, thres_min)\n rad = min(rad, thres_max)\n \n nodes1 = list(nx.single_source_shortest_path_length(g, v1, cutoff=rad).keys())\n nodes2 = list(nx.single_source_shortest_path_length(g, v2, cutoff=rad).keys())\n \n g1 = g.subgraph(nodes1)\n g2 = g.subgraph(nodes2)\n return g1, g2", "def find_subseq_violation(self):\n\n # Iterate through all edge pairs\n # If determinism condition present for any pair, (v==w and s==t)\n # return edges\n # Else return None\n \n graph = self.graph\n states = graph.states()\n for state in states:\n neighbors = graph[state]\n # print(len(neighbors))\n for neighbor_1 in neighbors:\n for neighbor_2 in neighbors:\n if neighbor_1 != neighbor_2:\n # print(\"Yo\")\n edge_1 = graph[state][neighbor_1]\n edge_2 = graph[state][neighbor_2]\n if edge_1['input'] == edge_2['input'] and edge_1['output'] == edge_2['output']:\n return((state, edge_1['input'], edge_1['output'], neighbor_1, edge_2['output'], neighbor_2))", "def find_segues(g1, g2, pre_filtering=pre, post_filtering=post, nodes_types_to_filter=nodes_types_to_filter_strict, nodes_types_to_segue_not_equal=get_dict()):\n # First, merges g1 and g2 in a unique graph.\n # Then, find segues as paths from the source node of g1 and the source node of g2.\n # Then, filters out undesired nodes\n # Finally, converts paths to the dictionary form.\n\n # Efficient structure where to store the merged graph\n g = defaultdict(set)\n\n # Map back a tuple of nodes ids in g to a list of nodes in g1 (dictionary 0) and g2 (dictionary 1)\n # A series of identical nodes in g can be mapped to more nodes in one of the starting graphs, we are in a multigraph scenario.\n map_back = {'g1': {}, 'g2': {}}\n\n # Tells whether an edge in g was from g1 or g2 or\n # if it was induced, i.e. resulting from the application of a compare functio to nodes from g1 and g2\n edges = {'g1': set(), 'g2': set(), 'induced': set()}\n\n # An induced edge is added as the result of the application of a compare function to two nodes\n # In induced_edges_infos we store these information\n induced_edges_infos = defaultdict(list)\n\n # Here we merge graphs\n\n # Every node in g1 and g2 is represented by a string, which is the conversion of its fields to text (mergiable_id)\n # This automatically implements the equal compare function, as equal nodes will converge into the same node in g\n for idx, addend in enumerate((g1, g2)):\n id_sub_graph = f\"source_{idx}\"\n stack = [((f\"source_{idx}\",), iter(addend['source']))]\n while stack:\n children = stack[-1]\n child = next(children[1], None)\n if child is None:\n stack.pop()\n else:\n child_id = addend.nodes()[child]['mergiable_id']\n child_id += f\"__{idx}\" if addend.nodes()[child]['type'] in nodes_types_to_filter else \"\"\n\n if idx == 0:\n g[children[0][-1]].add(child_id)\n edges['g1'].add((children[0][-1], child_id))\n else:\n g[child_id].add(children[0][-1])\n edges['g2'].add((child_id, children[0][-1]))\n\n key = children[0]+(child_id,)\n if key in map_back[f'g{idx+1}']:\n map_back[f'g{idx+1}'][key].append(child)\n else:\n map_back[f'g{idx+1}'][key] = [child]\n\n stack.append((children[0]+(child_id,), iter(addend[child])))\n\n # Now we add edges stemming for compare functions different from equal\n compareble_nodes_without_equal = [k for k, v in nodes_types_to_segue_not_equal.items()]\n # Every key in d is a tuple of types, so broadcasting to type_1 and type_2\n for type_1, type_2 in compareble_nodes_without_equal:\n\n nodes_type_1 = [g1.nodes()[node_id] for node_id in g1.nodes() if g1.nodes()[node_id]['type'] == type_1]\n nodes_type_2 = [g2.nodes()[node_id] for node_id in g2.nodes() if g2.nodes()[node_id]['type'] == type_2]\n\n for compare_function in [f for f in d[(type_1, type_2)] if f.__name__ != 'equal']:\n\n nodes_type_1_filtered = [n for n in nodes_type_1 if pre(n, compare_function)]\n nodes_type_2_filtered = [n for n in nodes_type_2 if pre(n, compare_function)]\n\n for n1, n2 in itertools.product(nodes_type_1_filtered, nodes_type_2_filtered):\n result = compare_function(n1, n2)\n if result['outcome'] == True:\n\n # Add the edge\n id_1 = f\"{n1['mergiable_id']}__0\" if n1['type'] not in compareble_nodes_with_equal else n1['mergiable_id']\n id_2 = f\"{n2['mergiable_id']}__1\" if n2['type'] not in compareble_nodes_with_equal else n2['mergiable_id']\n g[id_1].add(id_2)\n edges['induced'].add((id_1, id_2))\n\n # Store the result of the compare function application in a dictionary\n result.pop('outcome')\n result['compare_function'] = compare_function.__name__\n induced_edges_infos[(n1['id'], n2['id'])].append(result)\n\n # Find paths in graph\n paths = list(_all_simple_paths_graph(g, 'source_0', {'source_1'}, 50))\n\n # Convert paths to dictionary-shaped segues\n segues = []\n\n # Find out which is the last node that belongs to g1 and which is the first that belongs to g2\n # middle_leg is len==2 tuple which has as values such information\n for j, path in enumerate(paths):\n for idx in range(2, len(path)):\n if tuple(path[:idx]) not in map_back['g1']:\n idx = idx-2\n middle_leg = (path[idx], path[idx+1])\n break\n\n if (tuple(path[idx:][::-1]) in map_back['g2']):\n # Compare function == equal\n for id_1, id_2 in itertools.product(map_back['g1'][tuple(path[0:idx+1])], map_back['g2'][tuple(path[idx:][::-1])]):\n\n segue = {'n1': g1._node[id_1],\n 'n2': g2._node[id_2],\n 'value': g1._node[id_1]['value'],\n 'compare_function': 'equal'}\n\n if check_filters(segue, pre_filtering, post_filtering) == True:\n segues.append(segue)\n\n elif middle_leg in edges['induced']:\n # Compare function != equal\n for id_1, id_2 in itertools.product(map_back['g1'][tuple(path[0:idx+1])], map_back['g2'][tuple(path[idx+1:][::-1])]):\n\n candidated_segues = iter([{**{'n1': g1._node[id_1], 'n2': g2._node[id_2]}, **induced_edge_infos}\n for induced_edge_infos in induced_edges_infos[(id_1, id_2)]])\n\n for segue in candidated_segues:\n if check_filters(segue, pre_filtering, post_filtering) == True:\n segues.append(segue)\n\n else:\n # spurious path to be discarded, valid segues enter either the if or elif branch\n pass\n\n return segues", "def GenSubgraphs(G, maxSubgraphSize):\n # Each node already has a 0-based nodeIdx. \n nodeSubsets = set(frozenset([i]) for i in range(len(G.nodes)))\n allSubsets = nodeSubsets.copy()\n totalSubsets = len(nodeSubsets)\n for k in range(1, maxSubgraphSize):\n nodeSubsets = ExpandSubsets(G, nodeSubsets)\n #print(\"%d subsets of size %d.\" % (len(nodeSubsets), k + 1))\n allSubsets |= nodeSubsets\n totalSubsets += len(nodeSubsets)\n assert len(allSubsets) == totalSubsets \n return allSubsets", "def obtain_rules_discard(df_anomalies_no_sub, df_anomalies_yes_sub, X_train, sc,\n n_vertex_numerical, numerical_cols, categorical_cols,\n clustering_algorithm, use_inverse):\n\n def hyper_limits(vectors_bound_all, df_anomalies_yes_sub, numerical_cols):\n limits = obtain_limits(vectors_bound_all)\n df_anomalies_yes_sub[\"outside_hcube\"] = df_anomalies_yes_sub.apply(\n lambda x: function_check(x, limits, numerical_cols), axis=1)\n return df_anomalies_yes_sub, limits\n\n if clustering_algorithm == \"kprototypes\":\n feature_cols = numerical_cols + categorical_cols\n else:\n feature_cols = numerical_cols\n\n # Tolerance param\n max_iters = MAX_ITERS\n\n # Obtain vertices\n n = 0\n check = True\n\n # Drop duplicates\n df_anomalies_no_sub.drop_duplicates(inplace=True)\n df_anomalies_yes_sub.drop_duplicates(inplace=True)\n df_final = []\n \n # Ñapa: duplicate datapoints if below 2\n if len(df_anomalies_no_sub)<2:\n df_anomalies_no_sub = df_anomalies_no_sub.append(df_anomalies_no_sub)\n df_anomalies_no_sub = df_anomalies_no_sub.reset_index(drop=True)\n \n if len(df_anomalies_yes_sub)<2:\n df_anomalies_yes_sub = df_anomalies_yes_sub.append(df_anomalies_no_sub)\n df_anomalies_yes_sub = df_anomalies_yes_sub.reset_index(drop=True)\n \n # Data used -- start using all and 1 cluster\n dct_subdata = {\"data\": df_anomalies_no_sub, \"n_clusters\": 1}\n list_subdata = [dct_subdata]\n\n # Check until all non anomalous data is used for rule inferring\n j = 0\n while check:\n # When there is no data to infer rules, finish\n if len(list_subdata) == 0:\n break\n list_original = list_subdata.copy()\n list_subdata = [] # Reset list\n # For each subdata space, use two clusters to try and infer rules\n for dct_subdata in list_original:\n # Load data\n df_anomaly_no = dct_subdata['data']\n n = dct_subdata['n_clusters']\n j += 1\n\n # Check tolerance\n if j >= max_iters:\n check=False\n break\n # If there is only one point left, skip it\n elif n > len(df_anomaly_no):\n continue\n\n # Rules\n print(\"Iteration {0} | nº clusters used {1}\".format(j, n))\n # Returns n_vertex_numerical datapoints\n # if n_vertex_numerical > len(df_anomalies_no) for each cluster;\n # else returns df_anomalies_no\n dict_vectors_bound_all = obtain_vertices(\n df_anomaly_no,\n X_train,\n sc,\n n_vertex_numerical,\n numerical_cols,\n categorical_cols,\n clustering_algorithm,\n n_clusters=n)\n\n # For each cluster in that subdata\n for key, value in dict_vectors_bound_all.items():\n vectors_bound_all = value[0].copy()\n df_anomalies_yes_sub, limits = hyper_limits(\n vectors_bound_all, df_anomalies_yes_sub, feature_cols)\n list_check = list(\n df_anomalies_yes_sub[\"outside_hcube\"].unique())\n\n # Recover original indexes\n df_anomaly_iter = value[2]\n df_aux = df_anomaly_no.copy().reset_index()\n cols_merge = [\n column for column in list(df_anomaly_iter.columns)\n if column != \"distances\"\n ]\n df_anomaly_iter = df_anomaly_iter[cols_merge]\n df_anomaly_iter = df_anomaly_iter.merge(\n df_aux,\n how=\"left\",\n left_on=cols_merge,\n right_on=cols_merge)\n df_anomaly_iter.index = df_anomaly_iter['index']\n del df_anomaly_iter['index']\n\n # If there are points that belong to the other class,\n # retrain with one more cluster\n if False in list_check:\n dct_subdata = {'data': df_anomaly_iter, 'n_clusters': 2}\n list_subdata.append(dct_subdata)\n # When there are no points from the other class,\n # turn into rules (and do not use those points again)\n elif len(df_anomaly_no)==1.:\n df_final.append(limits)\n else:\n df_final.append(limits)\n\n return df_final", "def indrect_graph_matching(costs: Dict, probs: Dict, p_t: np.ndarray,\n idx2nodes: Dict, ot_hyperpara: Dict, weights: Dict = None) -> Tuple[List, List, List]:\n cost_t, trans, _ = Gwl.gromov_wasserstein_barycenter(costs, probs, p_t, ot_hyperpara, weights)\n set_idx, set_name, set_confidence = node_set_assignment(trans, probs, idx2nodes)\n return set_idx, set_name, set_confidence", "def test_maximal_matching_combined_qubo(self):\n\n G = nx.complete_graph(5)\n delta = max(G.degree(node) for node in G) # maximum degree\n A = 1 # magnitude arg for _matching_qubo\n B = .75 * A / (delta - 2.) # magnitude arg for _maximal_matching_qubo\n\n edge_mapping = {edge: idx for idx, edge in enumerate(G.edges())}\n edge_mapping.update({(e1, e0): idx for (e0, e1), idx in edge_mapping.items()})\n inv_edge_mapping = {idx: edge for edge, idx in edge_mapping.items()}\n\n Qm = _matching_qubo(G, edge_mapping, magnitude=A)\n Qmm = _maximal_matching_qubo(G, edge_mapping, magnitude=B)\n\n Q = defaultdict(float)\n for edge, bias in Qm.items():\n Q[edge] += bias\n for edge, bias in Qmm.items():\n Q[edge] += bias\n Q = dict(Q)\n\n # now for each combination of edges, we check that if the combination\n # is a maximal matching, and if so that is has ground energy, else\n # there is an infeasible gap\n ground_energy = -1. * B * len(G.edges()) # from maximal matching\n infeasible_gap = float('inf')\n for edge_vars in powerset(set(edge_mapping.values())):\n\n # get the matching from the variables\n potential_matching = {inv_edge_mapping[v] for v in edge_vars}\n\n # get the sample from the edge_vars\n sample = {v: 0 for v in edge_mapping.values()}\n for v in edge_vars:\n sample[v] = 1\n\n if dnx.is_maximal_matching(G, potential_matching):\n # print potential_matching, qubo_energy(Q, sample)\n self.assertLess(abs(qubo_energy(sample, Q) - ground_energy), 10**-8)\n else:\n en = qubo_energy(sample, Q)\n\n gap = en - ground_energy\n if gap < infeasible_gap:\n infeasible_gap = gap\n\n self.assertLessEqual(B - infeasible_gap, 10**-8)\n\n #\n # Another graph, Chimera tile this time\n #\n\n G = dnx.chimera_graph(1, 1, 4)\n delta = max(G.degree(node) for node in G) # maximum degree\n A = 1 # magnitude arg for _matching_qubo\n B = .95 * A / (delta - 2.) # magnitude arg for _maximal_matching_qubo\n\n edge_mapping = {edge: idx for idx, edge in enumerate(G.edges())}\n edge_mapping.update({(e1, e0): idx for (e0, e1), idx in edge_mapping.items()})\n inv_edge_mapping = {idx: edge for edge, idx in edge_mapping.items()}\n\n Qm = _matching_qubo(G, edge_mapping, magnitude=A)\n Qmm = _maximal_matching_qubo(G, edge_mapping, magnitude=B)\n\n Q = defaultdict(float)\n for edge, bias in Qm.items():\n Q[edge] += bias\n for edge, bias in Qmm.items():\n Q[edge] += bias\n Q = dict(Q)\n\n # now for each combination of edges, we check that if the combination\n # is a maximal matching, and if so that is has ground energy, else\n # there is an infeasible gap\n ground_energy = -1. * B * len(G.edges()) # from maximal matching\n infeasible_gap = float('inf')\n for edge_vars in powerset(set(edge_mapping.values())):\n\n # get the matching from the variables\n potential_matching = {inv_edge_mapping[v] for v in edge_vars}\n\n # get the sample from the edge_vars\n sample = {v: 0 for v in edge_mapping.values()}\n for v in edge_vars:\n sample[v] = 1\n\n if dnx.is_maximal_matching(G, potential_matching):\n # print potential_matching, qubo_energy(Q, sample)\n self.assertLess(abs(qubo_energy(sample, Q) - ground_energy), 10**-8)\n else:\n en = qubo_energy(sample, Q)\n\n gap = en - ground_energy\n if gap < infeasible_gap:\n infeasible_gap = gap\n\n self.assertLessEqual(B - infeasible_gap, 10**-8)", "def find_missing_adjacencies(server, uuid, instance, body, known_edges=None, cc=None, svs=None, search_distance=1, connect_non_adjacent=False):\n BLOCK_TABLE_COLS = ['z', 'y', 'x', 'sv_a', 'sv_b', 'cc_a', 'cc_b', 'detected', 'applied']\n\n assert (known_edges is None) != (cc is None), \\\n \"Provide known_edges or cc (not both)\"\n\n if svs is None:\n # We could compute the supervoxel list ourselves from\n # the labelindex, but dvid can do it faster.\n svs = fetch_supervoxels(server, uuid, instance, body)\n\n if cc is None:\n known_edges = np.asarray(known_edges, np.uint64)\n cc = connected_components_nonconsecutive(known_edges, svs)\n\n orig_num_cc = final_num_cc = cc.max()+1\n if orig_num_cc == 1:\n return np.zeros((0,2), np.uint64), orig_num_cc, final_num_cc, pd.DataFrame(columns=BLOCK_TABLE_COLS)\n\n labelindex = fetch_labelindex(server, uuid, instance, body, format='protobuf')\n encoded_block_coords = np.fromiter(labelindex.blocks.keys(), np.uint64, len(labelindex.blocks))\n coords_zyx = decode_labelindex_blocks(encoded_block_coords)\n\n cc_mapper = LabelMapper(svs, cc)\n sv_adj_found = []\n cc_adj_found = set()\n block_tables = {}\n searched_block_svs = {}\n\n for coord_zyx, sv_counts in zip(coords_zyx, labelindex.blocks.values()):\n # Given the supervoxels in this block, what CC adjacencies\n # MIGHT we find if we were to inspect the segmentation?\n block_svs = np.fromiter(sv_counts.counts.keys(), np.uint64)\n block_ccs = cc_mapper.apply(block_svs)\n possible_cc_adjacencies = set(combinations(sorted(set(block_ccs)), 2))\n\n # We only aim to find (at most) a single link between each CC pair.\n # That is, we don't care about adjacencies between CC that we've already linked so far.\n possible_cc_adjacencies -= cc_adj_found\n if not possible_cc_adjacencies:\n continue\n\n searched_block_svs[(*coord_zyx,)] = block_svs\n\n # Not used in the search; only returned for debug purposes.\n block_adj_table = _init_adj_table(coord_zyx, block_svs, cc_mapper)\n\n block_vol = fetch_block_vol(server, uuid, instance, coord_zyx, block_svs)\n if search_distance > 0:\n # It would be nice to do a proper spherical dilation,\n # but apparently dilation() is special-cased to be WAY\n # faster with a square structuring element, and we prefer\n # speed over cleaner dilation.\n # footprint = skimage.morphology.ball(dilation)\n radius = search_distance // 2\n footprint = np.ones(3*(1+2*radius,), np.uint8)\n dilated_block_vol = dilation(block_vol, footprint)\n\n # Since dilation is a max-filter, we might have accidentally\n # erased small, low-valued supervoxels, erasing the adjacendies.\n # Overlay the original volume to make sure they still count.\n block_vol = np.where(block_vol, block_vol, dilated_block_vol)\n\n sv_adjacencies = compute_label_adjacencies(block_vol)\n sv_adjacencies['cc_a'] = cc_mapper.apply( sv_adjacencies['sv_a'].values )\n sv_adjacencies['cc_b'] = cc_mapper.apply( sv_adjacencies['sv_b'].values )\n\n # Normalize\n # Note: This might swap only cc (or sv) without swapping sv (or cc),\n # but that doesn't matter here.\n swap_cc = sv_adjacencies.eval('cc_a > cc_b')\n swap_sv = sv_adjacencies.eval('sv_a > sv_b')\n sv_adjacencies.loc[swap_cc, ['cc_a', 'cc_b']] = sv_adjacencies.loc[swap_cc, ['cc_b', 'cc_a']]\n sv_adjacencies.loc[swap_sv, ['sv_a', 'sv_b']] = sv_adjacencies.loc[swap_sv, ['sv_b', 'sv_a']]\n\n found_new_adj = False\n for row in sv_adjacencies.query('cc_a != cc_b').itertuples(index=False):\n sv_adj = (row.sv_a, row.sv_b)\n cc_adj = (row.cc_a, row.cc_b)\n\n block_adj_table.loc[sv_adj, 'detected'] = True\n if cc_adj in cc_adj_found:\n continue\n\n found_new_adj = True\n cc_adj_found.add( cc_adj )\n sv_adj_found.append( sv_adj )\n block_adj_table.loc[sv_adj, 'applied'] = True\n\n block_tables[(*coord_zyx,)] = block_adj_table\n\n # If we made at least one change and we've\n # finally unified all components, then we're done.\n if found_new_adj:\n final_num_cc = connected_components(np.array(list(cc_adj_found), np.uint64), orig_num_cc).max()+1\n if final_num_cc == 1:\n break\n\n # If we couldn't connect everything via direct adjacencies,\n # we can just add edges for any supervoxels that share a block.\n if final_num_cc > 1 and connect_non_adjacent:\n for coord_zyx, block_svs in searched_block_svs.items():\n block_ccs = cc_mapper.apply(block_svs)\n\n # We only need one SV per connected component,\n # so load them into a dict.\n selected_svs = dict(zip(block_ccs, block_svs))\n for (sv_a, sv_b) in combinations(sorted(selected_svs.values()), 2):\n (cc_a, cc_b) = cc_mapper.apply(np.array([sv_a, sv_b], np.uint64))\n if cc_a > cc_b:\n cc_a, cc_b = cc_b, cc_a\n\n if (cc_a, cc_b) not in cc_adj_found:\n if sv_a > sv_b:\n sv_a, sv_b = sv_b, sv_a\n\n cc_adj_found.add( (cc_a, cc_b) )\n sv_adj_found.append( (sv_a, sv_b) )\n\n block_tables[(*coord_zyx,)].loc[(sv_a, sv_b), 'applied'] = True\n\n final_num_cc = connected_components(np.array(list(cc_adj_found), np.uint64), orig_num_cc).max()+1\n\n if len(block_tables) == 0:\n block_table = pd.DataFrame(columns=BLOCK_TABLE_COLS)\n else:\n block_table = pd.concat(block_tables.values(), sort=False).reset_index()\n block_table = block_table[BLOCK_TABLE_COLS]\n\n if sv_adj_found:\n new_edges = np.array(sv_adj_found, np.uint64)\n else:\n new_edges = np.zeros((0,2), dtype=np.uint64)\n\n return new_edges, int(orig_num_cc), int(final_num_cc), block_table", "def _find_dupe_verts(base, bf, group, freq, bkdns):\n #find redundant vertices\n base_edges = base.edges\n base_edge_corr, base_face_corr = base.faces_by_edge(base_edges)\n l0 = []\n l1 = []\n for i in range(len(base_edges)):\n edge = base_edges[i]\n index = base_edge_corr == i\n facex = base_face_corr[index]\n fn = len(facex)\n if fn > 2:\n warnings.warn(\"More than 2 faces meet at a single edge. \"\n \"Choosing 2 faces arbitrarily...\")\n facex = facex[:2]\n elif fn < 2:#external edge, skip it\n continue\n index_0 = bf == facex[0]\n index_1 = bf == facex[1]\n faces = [base.faces[facex[0]], base.faces[facex[1]]]\n match = _stitch(edge, faces, bkdns, freq)\n lx0 = np.argwhere(index_0)[match[..., 0]].flatten()\n lx1 = np.argwhere(index_1)[match[..., 1]].flatten()\n l0.extend(lx0)\n l1.extend(lx1)\n matches = np.stack([l0, l1], axis=-1)\n #TODO replace this with np.unique when 1.13 comes out\n matches = np.array(sorted(list({tuple(sorted(t)) for t in matches})))\n vno = len(group)\n conns = sparse.coo_matrix((np.ones(len(matches)),\n (matches[:, 0], matches[:, 1])),\n shape=(vno, vno))\n ncp, cp = csgraph.connected_components(conns)\n verts = np.arange(vno, dtype=int)\n for i in range(ncp):\n component = np.argwhere(cp == i).flatten()\n gp = group[component]\n order = np.argsort(gp)\n component = component[order]\n v = verts[component[0]]\n verts[component] = v\n unique_index = verts == np.arange(len(verts))\n renumbered = xmath.renumber(unique_index)\n return renumbered[verts], unique_index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Circuit with some example instructions
def test_circuit(): instructions = """\ 123 -> x 456 -> y x AND y -> d x OR y -> e x LSHIFT 2 -> f y RSHIFT 2 -> g NOT x -> h NOT y -> i """ expected = dict( [ ("d", 72), ("e", 507), ("f", 492), ("g", 114), ("h", 65412), ("i", 65079), ("x", 123), ("y", 456), ] ) circuit = Circuit(instructions) circuit.build() # Ensure each wire has a value assert circuit._wires == expected
[ "def test_control_bit_of_cnot(self):\n\n qr = QuantumRegister(2, \"qr\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[1])\n circuit.x(qr[0])\n circuit.cx(qr[0], qr[1])\n\n new_pm = PassManager(CommutativeCancellation())\n new_circuit = new_pm.run(circuit)\n expected = QuantumCircuit(qr)\n expected.cx(qr[0], qr[1])\n expected.x(qr[0])\n expected.cx(qr[0], qr[1])\n\n self.assertEqual(expected, new_circuit)", "def test_additional_examples_1(self):\n self._test(\"Additional Examples 1\")", "def test_circuits(self):\n test_amps = [-0.5, 0, 0.5]\n rabi = RoughXSXAmplitudeCal([0], self.cals, amplitudes=test_amps, backend=self.backend)\n\n circs = rabi._transpiled_circuits()\n\n for circ, amp in zip(circs, test_amps):\n self.assertEqual(circ.count_ops()[\"Rabi\"], 1)\n\n d0 = pulse.DriveChannel(0)\n with pulse.build(name=\"x\") as expected_x:\n pulse.play(pulse.Drag(160, amp, 40, 0), d0)\n\n self.assertEqual(circ.calibrations[\"Rabi\"][((0,), (amp,))], expected_x)", "def test_retrieve_instructions(self):\n pass", "def test_nested_control_flow(self):\n level2_test = QuantumCircuit(2, 1)\n level2_test.cz(0, 1)\n level2_test.cz(0, 1)\n level2_test.cz(0, 1)\n level2_test.measure(0, 0)\n\n level1_test = QuantumCircuit(2, 1)\n level1_test.for_loop((0,), None, level2_test.copy(), level1_test.qubits, level1_test.clbits)\n level1_test.h(0)\n level1_test.h(0)\n level1_test.measure(0, 0)\n\n test = QuantumCircuit(2, 1)\n test.while_loop((test.clbits[0], True), level1_test.copy(), test.qubits, test.clbits)\n test.measure(0, 0)\n\n level2_expected = QuantumCircuit(2, 1)\n level2_expected.cz(0, 1)\n level2_expected.measure(0, 0)\n\n level1_expected = QuantumCircuit(2, 1)\n level1_expected.for_loop(\n (0,), None, level2_expected.copy(), level1_expected.qubits, level1_expected.clbits\n )\n level1_expected.measure(0, 0)\n\n expected = QuantumCircuit(2, 1)\n expected.while_loop(\n (expected.clbits[0], True), level1_expected.copy(), expected.qubits, expected.clbits\n )\n expected.measure(0, 0)\n\n passmanager = PassManager([CommutationAnalysis(), CommutativeCancellation()])\n new_circuit = passmanager.run(test)\n self.assertEqual(new_circuit, expected)", "def test_additional_examples_2(self):\n self._test(\"Additional Examples 2\")", "def test_target_basis_01(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n circuit.rz(np.pi, 0)\n theta = Parameter(\"theta\")\n target = Target(num_qubits=2)\n target.add_instruction(CXGate())\n target.add_instruction(PhaseGate(theta))\n target.add_instruction(SXGate())\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(target=target))\n new_circuit = passmanager.run(circuit)\n expected = QuantumCircuit(1)\n expected.rz(11 * np.pi / 4, 0)\n expected.global_phase = 11 * np.pi / 4 / 2 - np.pi / 2\n\n self.assertEqual(new_circuit, expected)", "def test_assemble_single_circuit(self):\n q = QuantumRegister(2, name='q')\n c = ClassicalRegister(2, name='c')\n circ = QuantumCircuit(q, c, name='circ')\n circ.h(q[0])\n circ.cx(q[0], q[1])\n circ.measure(q, c)\n\n run_config = RunConfig(shots=2000, memory=True)\n qobj = assemble_circuits(circ, run_config=run_config)\n self.assertIsInstance(qobj, Qobj)\n self.assertEqual(qobj.config.shots, 2000)\n self.assertEqual(qobj.config.memory, True)\n self.assertEqual(len(qobj.experiments), 1)\n self.assertEqual(qobj.experiments[0].instructions[1].name, 'cx')", "def test_basic_classical_wires(self):\n original = QuantumCircuit(2, 1)\n original.x(0).c_if(original.cregs[0], 0)\n original.x(1).c_if(original.cregs[0], 0)\n # This transpilation shouldn't change anything, but it should succeed. At one point it was\n # triggering an internal logic error and crashing.\n transpiled = PassManager([CommutativeCancellation()]).run(original)\n self.assertEqual(original, transpiled)", "def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = SuperOp(circuit)\n target = SuperOp(target)\n self.assertEqual(op, target)", "def test_circuits(self):\n\n drag = FineDrag([0], Gate(\"Drag\", num_qubits=1, params=[]))\n drag.set_experiment_options(schedule=self.schedule)\n drag.backend = FakeArmonkV2()\n for circuit in drag.circuits()[1:]:\n for idx, name in enumerate([\"Drag\", \"rz\", \"Drag\", \"rz\"]):\n self.assertEqual(circuit.data[idx][0].name, name)", "def test_instance_calling(self):\n\n\n class exampleInst(Instance):\n def __call__(self, number):\n return number*2\n\n self.example.setInstanceClass(exampleInst)\n\n conceptPatterns = [\n \"%(20)\",\n \"@(20)\",\n \"#example(20)\",\n \"#[something]example(20)\"\n ]\n\n for logic in conceptPatterns:\n node = self.factory.constructTree(logic)\n scenario = {logic[:logic.index(\"(\")]: self.example.instance()}\n self.assertEqual(node.eval(scenario = scenario), 40)", "def test_ConvE():\n testing_function('conve')", "def test_case_control_11(self):\n lines = [\n 'SET 100 = 100',\n 'DISP = 100',\n 'SUBCASE 1',\n ' SPC = 1',\n ' LOAD = 1',\n 'SUBCASE 2',\n ' SPC = 2',\n ' LOAD = 2',\n ' DISP = ALL',\n 'SUBCASE 3',\n ' SET 100 = 100, 101',\n ' SPC = 3',\n ' LOAD = 3',\n ' DISP = 100',\n 'SUBCASE 4',\n ' SPC = 3',\n ' LOAD = 3',\n ' DISP = 100',\n ]\n\n deck = CaseControlDeck(lines)\n\n default = deck.subcases[0]\n sc3 = deck.subcases[3]\n sc4 = deck.subcases[4]\n\n assert default.params['SET 100'] == [[100], 100, 'SET-type']\n assert sc3.params['SET 100'] == [[100, 101], 100, 'SET-type']\n assert sc4.params['SET 100'] == [[100], 100, 'SET-type']", "def test_coin_info(self):", "def test_mpx_registers_with_example_code(self):\n self.build()\n self.mpx_registers_with_example_code()", "def test_basis_02(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(basis_gates=[\"cx\", \"rz\", \"sx\"]))\n new_circuit = passmanager.run(circuit)\n\n expected = QuantumCircuit(1)\n expected.rz(7 * np.pi / 4, 0)\n expected.global_phase = 7 * np.pi / 4 / 2\n self.assertEqual(new_circuit, expected)", "def test_demo(self):\n self.cbct.run_demo(show=False)", "def visitCase(self, testCase):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get map figure coordinates for a position.
def _figure_coordinates(self, position): position = np.array(position) scaled = np.atleast_2d((position - self._origin) / self._resolution) # flip array in left-right direction return np.fliplr(scaled).astype(np.uint16).reshape(position.shape)
[ "def _get_plot_coordinates(self) -> Tuple[int, int]:\n return self._x0 + AXIS_SPACE_PX, self._y0 # y does not need to be added AXIS_SPACE_PX, since it is at bottom", "def obj_coords(self, soma_id, soma_map, soma_config):\n query = { \"map\": soma_map,\n \"config\": soma_config,\n \"id\": soma_id\n } \n\n res = self.find_projection(query, {\"pose\": 1})\n\n if res.count() == 0:\n return None\n return res[0]['pose']['position']['x'], res[0]['pose']['position']['y'], \\\n res[0]['pose']['position']['z']", "def get_coordinates(self, offset):\n x = self.position.x + self.margin.left + offset.x\n y = self.position.y + self.margin.top + offset.y\n return x, y", "def get_coordinates(self):\n return self.coordinates", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def mkmapcoords(self, pts):\n return(mk_mapcoords(pts, self.vol_verts, self.element, self.dim))", "def getCoords(self): # real signature unknown; restored from __doc__\n pass", "def get_element_location(self, value):\n try:\n location = self.get_element(value).location_in_view\n x = location['x']\n y = location['y']\n return x, y\n except AttributeError as msg:\n raise msg", "def get_coord(self):\n return self._coord", "def position(self):\n return self.polargraph.position()", "def position(self):\n return (self.center, self.height)", "def coord(self):\n return self._coord", "def coordinates(self):\n\t\tplayer = self.value['Map']['World']['Player']\n\t\treturn player['X'], player['Y']", "def calc_pos(self, gridpos):\n x,y = gridpos\n x = self.x_offset + self.x_u * x\n y = self.y_offset + self.y_u * y\n return x, y", "def calculate_xy(self):\n x_p = self.offset * self.grid_size * 2\n # multiply by -1 to draw the diagram from top to bottom\n y_p = self.order * self.grid_size * 2 * -1\n return x_p, y_p", "def get_position(self):\n return self._rect.x, self._rect.y", "def pos(self):\n return Point(*self.position())", "def position(self):\n self._pedbcomponents._edb.Geometry.PointData(self._pedbcomponents._edb_value(0.0),\n self._pedbcomponents._edb_value(0.0))\n if is_ironpython:\n out = self.pin.GetPositionAndRotationValue()\n else:\n out = self.pin.GetPositionAndRotationValue(\n self._pedbcomponents._edb.Geometry.PointData(self._pedbcomponents._edb_value(0.0),\n self._pedbcomponents._edb_value(0.0)),\n self._pedbcomponents._edb_value(0.0),)\n if out[0]:\n return [out[1].X.ToDouble(), out[1].Y.ToDouble()]", "def coordinates(self, gid):\n return self._geod[gid]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether a position is free in the map.
def _is_free(self, position): index = self._figure_coordinates(position) return self._map[tuple(index)] == FREE
[ "def _is_free(self, pos: int) -> bool:\n row_idx, col_idx = self._index(pos)\n return self._board[row_idx][col_idx] is None", "def is_costmap_free(self, x, y):\n x_idx = int((x - self.map_orig_x)/self.map_resolution)\n y_idx = int((y - self.map_orig_y)/self.map_resolution)\n\n if self.costmap[y_idx, x_idx] == 0:\n return True\n else:\n return False", "def is_free(self, check_address):\n for row in self.entries:\n if check_address.collides(row.address):\n return False\n return True", "def free_space(self, coord):\n \n # Verify that the coordinates are within bounds\n if (coord[0] < self.arena_left or coord[0] > self.arena_right or\n coord[1] < self.arena_bottom or coord[1] > self.arena_top):\n return False\n \n # Check whether the destination intersects any blocks\n if self._game.blocked(coord) == True:\n return False\n \n # If we made it past both tests, then the position must be free\n return True", "def __is_pos_valid(self, x, y, map):\n cell_radius = int((self.ROBOT_RADIUS + 0.1)/map.info.resolution)\n y_index = int((y-map.info.origin.position.y)/map.info.resolution)\n x_index = int((x-map.info.origin.position.x)/map.info.resolution)\n\n for i in range(x_index-cell_radius, x_index+cell_radius, 1):\n for j in range(y_index-cell_radius, y_index+cell_radius, 1):\n index = j * map.info.width + i\n if index >= len(map.data):\n return False\n try:\n val = map.data[index]\n except IndexError:\n print(\"IndexError: index: %d, map_length: %d\"%(index, len(map.data)))\n return False\n if val != 0:\n return False\n return True", "def at_free_pos(field: LikelihoodField, pos: Vector2) -> bool:\n if (dist := closest_to_pos(field, pos)) is None:\n return False\n\n return dist > 0.0", "def is_free(board, field):\n x, y = field\n return board[x][y] == FREE", "def isOccupied(self, position):\n return self.isBoard(position) and self.board[position[0]][position[1]] > 0", "def free_cell(board, position):\n if position in board:\n if board[position] is True:\n del board[position]", "def is_empty(self, pos):\n lowx, highx, lowy, highy = self.pos_to_coords(pos)\n return not self.grid[lowx:highx, lowy:highy, :].any()", "def isSafe(self):\r\n for spots in self.safe_places:\r\n if self.pos == spots:\r\n return True", "def is_free(self, item_id):\n self.claimed(item_id) is None", "def cellOccupied(self, position):\n if position in self.images:\n return True\n else:\n return False", "def are_there_free_stations(self):\n return any([station.free for station in self.pool])", "def position_in_bounds(self, position):\n if position[0] < 0 or position[0] >= self.n:\n return False\n if position[1] < 0 or position[1] >= self.m:\n return False\n return True", "def pos_full(self, pos):\n return bool(self[pos])", "def is_valid_position(self, piece):\r\n for (x, y) in piece.get_template():\r\n if x < 0 or x > 9 or y > 19 or \\\r\n (0 <= x <= 9 and 0 <= y <= 19 and self.grid[y][x]):\r\n return False\r\n return True", "def is_in_memory(self):\n for r in self.pa_range():\n if not r.is_mapped():\n return False\n return True", "def is_free(self) -> bool:\n return self.price_overview.final == 0", "def is_collision_free(x, y, obstacle_map, granularity):\n if collision_cache.get(y, False):\n return False\n\n if is_obstacle_space(y, obstacle_map):\n collision_cache[y] = True\n return False\n\n x = np.array(x)\n y = np.array(y)\n d = np.asscalar(cartesian_distance(x, y))\n unit_vector = (y - x) / d\n floor = int(np.floor(d / granularity))\n\n for i in range(floor):\n _m = x + i * granularity * unit_vector\n\n if collision_cache.get(tuple(_m), False):\n return False\n\n # can be skipped as the hit ratio is not that much,\n # so time for cache checking adds up\n if free_space_cache.get(tuple(_m), False):\n continue\n\n if is_obstacle_space(_m, obstacle_map):\n collision_cache[tuple(_m)] = True\n return False\n\n free_space_cache[tuple(_m)] = True\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inflate the obstacles in map by a given radius.
def _inflate_map(self, og, radius): new_map = copy(og) shape = og.shape new_radius = radius / self._resolution obstacles = np.nonzero(og == OCCUPIED) for i in range(np.size(obstacles[0])): x = obstacles[0][i] y = obstacles[1][i] rr,cc = circle(int(x),int(y),int(new_radius), shape) new_map[rr,cc] = OCCUPIED return new_map
[ "def inflate_map(self, grid_map):\n\n\n \"\"\"\n Fill in your solution here\n \"\"\"\n\n width = grid_map.get_width()\n height = grid_map.get_height()\n radius = self.radius\n #fill in the C space cells whose distance to occupied cells <= robot radius\n for x_grid in range(width):\n for y_grid in range(height):\n\n if grid_map[x_grid, y_grid] == self.occupied_space:\n x_0 = x_grid - radius\n y_0 = y_grid - radius\n\n for delta_x in range(2 * radius + 1):\n for delta_y in range(2 * radius + 1):\n x_check = x_0 + delta_x\n y_check = y_0 + delta_y\n if sqrt((x_check - x_grid)**2 + (y_check - y_grid)**2) <= radius and grid_map[x_check, y_check] != self.occupied_space:\n self.add_to_map(grid_map, x_check, y_check, self.c_space)\n\n\n # Return the inflated map\n return grid_map", "def _update_map(self):\n # Remove all new obstacles outside of radius\n new_obstacles_temp = [obstacle_i for obstacle_i in self.new_obstacles if self._check_if_within(obstacle_i)]\n # Remove all old obstacles outside of radius\n self.map[:] = [obstacle_i for obstacle_i in self.map if self._check_if_within(obstacle_i)]\n original_length = len(self.map)\n\n for new_obstacle_i in new_obstacles_temp:\n already_exists = False\n for j, old_obstacle_j in enumerate(self.map[:original_length]):\n if new_obstacle_i.name == old_obstacle_j.name:\n already_exists = True\n break\n\n if not already_exists:\n self.map.append(new_obstacle_i)\n \n return", "def obstacles_geometry(self):\n return [(self.__class__.state_to_env(self._origin, o.row, o.col), o.radius) for o in self._obstacles]", "def create_obstacle_map_update(self) -> set:\n x = self.x_size\n y = self.y_size\n\n # create the top border of the map\n obstacle = {(i, 0) for i in range(x)}\n # create the bottom border of the map\n obstacle.update({(i, y - 1) for i in range(x)})\n # Create left vertical border of the map\n obstacle.update({(0,i) for i in range(y)})\n # Create the right vertical border of the map\n obstacle.update({(x - 1, i) for i in range(y)})\n\n # add static walls:\n for i in range(10, 21):\n obstacle.add((i, 15))\n for i in range(15):\n obstacle.add((20, i))\n\n for i in range(15, 30):\n obstacle.add((30, i))\n for i in range(16):\n obstacle.add((40, i))\n\n # TODO: clean up the above magic number values with\n # something less brittle:\n '''\n walls = self.APRIORI_STATIC_OBSTACLES\n\n for i in range(walls[0][0][0], walls[0][1][0])\n obstacle.add(i, walls[0][0][1])\n\n for i in range(walls[1][0][0], walls[1][0][0])\n obstacle.add(i, walls[0][0][1])\n '''\n\n\n return obstacle", "def extract_neighborhood(x, y, arr, radius):\n if x < radius or y < radius or x>=480-radius or y>=640-radius:\n return np.ones((radius*2+1,radius*2+1)).ravel()\n return arr[(x - radius) : (x + radius + 1), (y - radius) : (y + radius + 1)].ravel()", "def set_obstacles(self):\n position = self.normalize_position(self.robot.get_position())\n safe_area = self.get_area_indexes(position, 9)\n\n count = self.obstacles_count\n while count > 0:\n position = random.randint(0, self.height * self.width - 1)\n if position not in safe_area:\n area = self.get_area_indexes(position,\n random.choice([1, 2, 3, 4]))\n for idx in area:\n if (0 <= idx < self.width * self.height\n and idx not in safe_area):\n self.field[idx] = self.WALL_SIGN\n count -= 1", "def recompute_fov(fov_map, x, y, radius, light_walls=True, algorithm=0):\n libtcod.map_compute_fov(fov_map, x, y, radius, light_walls, algorithm)", "def get_subGrid_map(self):\n spatial_map = np.zeros(self.shape) # define velocity field over UK\n rho_boundaries = {}\n # rho_boundaries : a dictionary with the form {i: [rho_low, rho_high} where is the index in rho-space\n for i in range(len(self.rho_space) - 1):\n rho_boundaries[i] = [self.rho_space[i], self.rho_space[i + 1]]\n max_density = rho_boundaries[i][1] # maximum density in data\n for i, row in enumerate(self.domain):\n for j, col in enumerate(row):\n d_ij = self.domain[i, j] # density value at point i,j\n if np.isnan(d_ij): # if sea, then pass\n pass\n else: # if land region: map rho_ij to a velocity-space value\n for rho_box in rho_boundaries: # iterate through rho-space $ check against if == density_ij\n boundary = rho_boundaries[rho_box]\n # If density in the range interval then set map location density_ij == velocity(density)\n if boundary[0] <= d_ij < boundary[1]:\n spatial_map[i, j] = self.velocity[rho_box]\n # CHECK if density bigger than rho given space\n # - cap at highest given rho space boundary mapping\n elif d_ij > max_density: # if density above max density, cap to max value\n spatial_map[i, j] = self.velocity[len(rho_boundaries) - 1]\n return spatial_map", "def _prep_tiles(self):\r\n # todo: write this. expected output is a flat iterable.\r\n # todo: explore turning flatten() into generator\r\n\r\n if self._bounds and not self._tiles:\r\n # build tile list from bounds\r\n self._zoom = self._detail + Pin.find_span_zoom(self._bounds)\r\n self._tiles = Tile.from_pins(self._bounds, self._zoom) # get the tiles covering the span\r\n Tile.new_tile_q.join() # wait for tiles to arrive\r\n\r\n if self._tiles and not self._bounds:\r\n sw_pin = Pin.from_tile_coord(np.min(self._X), np.max(self._Y) + 1, self._zoom)\r\n ne_pin = Pin.from_tile_coord(np.max(self._X) + 1, np.min(self._Y), self._zoom)\r\n self._bounds = sw_pin, ne_pin\r\n\r\n assert all(isinstance(t, Tile) for t in self._tiles), f'{self._tiles}' # all objects must be tiles\r\n self._X, self._Y, zooms = np.asarray(list(self._tiles)).T # asarray won't work on sets. ugh.\r\n assert all(zooms == zooms[0]) # all zooms must be the same\r\n self._zoom = zooms[0]", "def expand_boundaries(boundaries, r=0):\n if r == 0:\n return boundaries\n expanded_boundaries = dilation(boundaries, d_ks[r])\n ### save_image(expanded_boundaries, f'expanded_boundaries_{r}_{boundaries.float().mean():.2f}.png', normalize=True)\n return expanded_boundaries", "def updateDecisionVariableBounds(self, radius):\n for var in self.decision_variables:\n var.setlb(\n maxIgnoreNone(\n value(var) - radius, self.initial_decision_bounds[var.name][0]\n )\n )\n var.setub(\n minIgnoreNone(\n value(var) + radius, self.initial_decision_bounds[var.name][1]\n )\n )", "def __get_repulsive_force(self, robot_cell, robot_map):\n circle = filled_midpoint_circle(robot_cell.x, robot_cell.y, self.__radius_obs)\n closest_obstacles = [None] * self.__max_obs\n min_dists = [inf] * self.__max_obs\n for point in circle:\n if robot_map.is_in_bound(point) and robot_map.grid[point.x][point.y] >= 0.75:\n dist = hypot(robot_cell.x - point.x, robot_cell.y - point.y)\n for i in range(self.__max_obs):\n if dist < min_dists[i]:\n for ii in range(self.__max_obs - 1, i + 2, -1):\n min_dists[ii] = min_dists[ii - 1]\n closest_obstacles[ii] = closest_obstacles[ii - 1]\n min_dists[i] = dist\n closest_obstacles[i] = point\n break\n result = {'x': 0, 'y': 0}\n for obstacle in closest_obstacles:\n if obstacle != None:\n dist = hypot(robot_cell.x - obstacle.x, robot_cell.y - obstacle.y)\n rep_factor = min(0.9, abs(self.__radius_obs - dist) / self.__radius_obs)\n length = -2 * log10(1 - rep_factor) * self.__weight_rep\n dx = obstacle.x - robot_cell.x\n dy = obstacle.y - robot_cell.y\n angle = atan2(dy, dx)\n result['x'] += -length * cos(angle)\n result['y'] += -length * sin(angle)\n return result", "def seafloor_grid(depths, lat, lon):", "def __init__(self, target_idx, radius):\r\n\r\n self.target_idx = target_idx\r\n self.radius = radius\r\n self.turbines = []", "def create_2d_circle_kernel(radius):\n return np.array([ np.sqrt( x * x + y * y ) <= float(radius) for y in xrange(-radius, radius+1) for x in xrange(-radius, radius+1)], dtype=np.float32).reshape( radius*2+1, radius*2+1 )", "def calc_cspace(self,msg):\n try:\n padding = 3\n THRESH = 50 #Threshold for shading of a cell\n worldCoordinates = [] #Initialize world coordinate list of obstacles\n self.c_space = deepcopy(self.map)\n cspaceMap = list(self.c_space.data) #Create a copy of existing map to expand obstacles with. Make list so its changeable\n rospy.loginfo(\"Calculating C-Space\")\n\n ## Determine cspace for each layer of padding\n for i in range(padding):\n #print(i)\n ## Go through each cell in the occupancy grid (range used to start on row/col 0)\n for y in range(self.map.info.height):\n for x in range(self.map.info.width):\n ## Inflate the obstacles where necessary\n if self.c_space.data[self.grid_to_index(x, y)] >= THRESH: \n cspaceMap[self.grid_to_index(x, y)] = 100 #Set to 100 to make it 100% an obstacle\n neighbors = self.all_neighbors_of_8(x, y, self.map) #Get all walkable cells that neighbor main cell\n for each in neighbors:\n cspaceMap[self.grid_to_index(each[0], each[1])] = 100 #Set cell to an obstacle in the map copy\n #cspaceMap = tuple(cspaceMap)\n self.c_space.data = deepcopy(cspaceMap) #Set the mapdata to the new map for use in recursion. \n \n ## Convert cspace coordinates to world coordinates (to avoid duplicates)\n for y in range(self.map.info.height):\n for x in range(self.map.info.width):\n if cspaceMap[self.grid_to_index(x, y)] >= THRESH: #If an obstacle is detected\n worldCoordinates.append(self.grid_to_world(x, y)) #append to list in order\n \n ## Create a GridCells message and publish it\n ## This is used only for Rviz Visualization\n msg = GridCells() #Create GridCells Object\n msg.cell_height = self.map.info.resolution #dims are equal to map resolution\n msg.cell_width = self.map.info.resolution\n msg.cells = worldCoordinates #Set cell data to the world coordinates of obstacles\n msg.header.frame_id = self.map.header.frame_id #Copy over frame id\n self.pubCSpace.publish(msg) #Publish to topic\n\n occGridCSpace = deepcopy(self.c_space) #Create new Occupancy Grid Object\n occGridCSpace.data = cspaceMap\n self.c_space = occGridCSpace\n\n ## Return the C-space\n #rospy.sleep(.25)\n return occGridCSpace\n except Exception as e:\n print('failed on calc_cspace')\n print(e)\n return None", "def _get_cells_in_city(self, center: IntVector2D, radius: int, city_orientation: int,\n vector_field: IntVector2DArray) -> IntVector2DArray:\n x_range = np.arange(center[0] - radius, center[0] + radius + 1)\n y_range = np.arange(center[1] - radius, center[1] + radius + 1)\n x_values = np.repeat(x_range, len(y_range))\n y_values = np.tile(y_range, len(x_range))\n city_cells = list(zip(x_values, y_values))\n for cell in city_cells:\n vector_field[cell] = align_cell_to_city(center, city_orientation, cell)\n return city_cells", "def tot_hidden_area(radius_planet, radius_in, radius_out, x_star, y_star, ring_inclination, star_planet_intersections, star_disk_intersections_in, star_disk_intersections_out, disk_planet_intersections_in, disk_planet_intersections_out, opacity, tol=10**-10):\n #Planet hidden area\n planet_area = planet_hidden_area(radius_planet, x_star, y_star, star_planet_intersections, tol)\n #Disks hidden area\n disk_in_area = disk_hidden_area(radius_in, ring_inclination, x_star, y_star, star_disk_intersections_in, tol)\n disk_out_area = disk_hidden_area(radius_out, ring_inclination, x_star, y_star, star_disk_intersections_out, tol)\n #Double hidden area\n #Initial values assuming no intersections\n double_area_in = np.minimum(planet_area,disk_in_area)\n double_area_out = np.minimum(planet_area,disk_out_area)\n #When there are intersections, call the algorithm to find the double hidden area.\n calcin = np.logical_and(np.logical_and(planet_area>0,disk_in_area>0),np.any(np.logical_not(np.isnan(disk_planet_intersections_in)),(1,2)))\n star, planet, disk, dha_border_in = handler(radius_planet[calcin], radius_in[calcin], ring_inclination, x_star[calcin], y_star[calcin], star_planet_intersections[calcin], star_disk_intersections_in[calcin], disk_planet_intersections_in[calcin], tol)\n double_area_in[calcin] = double_hidden_area((star, planet, disk), dha_border_in, tol)\n calcout = np.logical_and(np.logical_and(planet_area>0,disk_out_area>0),np.any(np.logical_not(np.isnan(disk_planet_intersections_out)),(1,2)))\n star, planet, disk, dha_border_out = handler(radius_planet[calcout], radius_out[calcout], ring_inclination, x_star[calcout], y_star[calcout], star_planet_intersections[calcout], star_disk_intersections_out[calcout], disk_planet_intersections_out[calcout], tol)\n double_area_out[calcout] = double_hidden_area((star, planet, disk), dha_border_out, tol)\n #Conclusions\n ring_area = (disk_out_area-double_area_out)-(disk_in_area-double_area_in)\n hidden_area = opacity*ring_area+planet_area\n return hidden_area", "def generate_local_costmap_and_obstacles(self):\n rospy.wait_for_message('/scan', LaserScan)\n\n # Initilize point cloud for transformations of obstacles\n pointcloud_hokuyo = PointCloud()\n pointcloud_hokuyo.header.frame_id = 'hokuyo_link'\n\n # Initilize point cloud for transformations regarding the visualization of the local map.\n pointcloud_local_costmap = PointCloud()\n pointcloud_local_costmap.header.frame_id = 'hokuyo_link'\n\n # Set robot position to the middle of the grid map\n local_costmap_middle = int(self.local_costmap.info.height/2)\n robot_pos = (local_costmap_middle, local_costmap_middle)\n\n while not rospy.is_shutdown():\n start = time.time()\n\n # Get current values from subscribed topics\n ranges = self.scan.ranges \n current_pose = self.current_pose\n min_angle = self.scan.angle_min\n angle_inc = self.scan.angle_increment\n\n # Clear point clouds\n pointcloud_hokuyo.points.clear()\n pointcloud_local_costmap.points.clear()\n\n # Clear local costmap\n local_costmap = np.zeros((self.local_costmap.info.height, self.local_costmap.info.height), dtype=np.int8)\n\n for idx, element in enumerate(ranges):\n # Check if element would be in local_costmap\n if element < self.lc_length/2:\n angle = min_angle + idx * angle_inc\n\n # Get position of the sensed element in the frame of the laser scanner\n dx = np.cos(angle) * element\n dy = np.cos(np.pi/2 - angle) * element\n\n # Get position of the sensed element for visualization of the local costmap\n dx_local_map = np.cos(np.pi/2 - (angle + current_pose[2])) * element\n dy_local_map = np.sin(np.pi/2 - (angle + current_pose[2])) * element \n\n point_hokuyo_frame = Point()\n point_hokuyo_frame.x = dx\n point_hokuyo_frame.y = dy\n point_hokuyo_frame.z = 0\n\n point_local_costmap = Point()\n point_local_costmap.x = dx_local_map\n point_local_costmap.y = dy_local_map\n point_local_costmap.z = 0\n\n pointcloud_hokuyo.points.append(point_hokuyo_frame)\n pointcloud_local_costmap.points.append(point_local_costmap)\n\n # Transform point cloud into 'map' frame\n self.listener.waitForTransform('/hokuyo_link', '/base_link', rospy.Time(0), rospy.Duration(10))\n pointcloud_hokuyo.header.stamp = self.listener.getLatestCommonTime('/hokuyo_link', '/base_link')\n pointcloud_local_costmap.header.stamp = self.listener.getLatestCommonTime('/hokuyo_link', '/base_link')\n pointcloud_local_costmap = self.listener.transformPointCloud('/base_link', pointcloud_local_costmap)\n pointcloud_base_link = self.listener.transformPointCloud('/base_link', pointcloud_hokuyo)\n\n self.listener.waitForTransform('/odom', '/base_link', rospy.Time(0), rospy.Duration(10))\n pointcloud_base_link.header.stamp = self.listener.getLatestCommonTime('/base_link', '/odom')\n pointcloud_odom = self.listener.transformPointCloud('/odom', pointcloud_base_link)\n\n self.listener.waitForTransform('/map', '/odom', rospy.Time(0), rospy.Duration(10))\n pointcloud_odom.header.stamp = self.listener.getLatestCommonTime('/odom', '/map')\n pointcloud_map = self.listener.transformPointCloud('/map', pointcloud_odom)\n\n # Add points of the local costmap that have been transformed for visualization purposes\n for point in pointcloud_local_costmap.points:\n point = (int(np.floor(point.x / self.local_costmap.info.resolution)), \\\n int(np.floor(point.y / self.local_costmap.info.resolution)))\n try:\n local_costmap[robot_pos[0] + point[0], robot_pos[1] + point[1]] = 100\n except:\n pass\n\n # Publish local_costmap with robot in its center\n self.local_costmap.header.stamp = rospy.Time.now()\n self.local_costmap.info.origin.position.x = current_pose[0] - self.lc_length / 2\n self.local_costmap.info.origin.position.y = current_pose[1] - self.lc_length / 2\n self.local_costmap.data = local_costmap.ravel()\n self.pub_local_costmap.publish(self.local_costmap)\n\n # Publish local_obstacles\n self.local_obstacles = pointcloud_map\n self.pub_local_obstacles.publish(self.local_obstacles)\n\n end = time.time()\n # To ensure that desired frequency does not get affected by computation time.\n rospy.sleep((1/self.lc_freq) - end + start)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a random sample from the configuration space
def _draw_sample(self): sample = np.random.random_sample(2)*10 return sample
[ "def draw_sample(self):\n m = self.data[1].shape[0]\n select = np.random.choice(m,self.mtrain,replace=False)\n return tuple([d[select,:] for d in self.data])", "def draw_sample(self):\n return self.sample_fn(self.output_components)", "def generate_sample(self):\n\n rand_angles = self.generate_random_angles()\n rand_lengths = self.generate_random_lengths()\n\n random_config = make_robot_config_from_ee1(self.config.points[0][0], self.config.points[0][1], rand_angles,\n rand_lengths, self.config.ee1_grappled, self.config.ee2_grappled)\n\n test = test_obstacle_collision(random_config, self.spec, self.obstacles)\n\n if test and test_self_collision(random_config, self.spec) and test_environment_bounds(random_config):\n return random_config\n else:\n return self.generate_sample()", "def sample(self, size):", "def sample(self):\n if self.params is not None:\n self.value = np.random.choice(self.params)", "def draw_uniform_sample(choices: List[T], n: int) -> List[T]:\n return random.default_rng().choice(a=choices, size=n)", "def draw_sample(self):\n width = len(self.data)\n col = \"#4287f5\"\n chunk = width\n steps = chunk / 800\n beak = 20\n x = 1\n pos = 0\n last = 60\n self.canvas.delete(\"all\")\n while pos < chunk:\n y = float(self.data[pos]) + 0.5\n y = 120 - int((120 * y) / 1)\n self.canvas.create_line(x - 1, last, x, y, fill=col)\n last = y\n x += 1\n pos += int(steps)", "def sample(self):\n sample_x = self.x_range[0] + (self.x_range[1] - self.x_range[0]) * np.random.rand()\n sample_y = self.y_range[0] + (self.y_range[1] - self.y_range[0]) * np.random.rand()\n sampled_state = StateSamplerPosition.state_tuple(np.array([sample_x, sample_y]))\n return sampled_state", "def draw_sample(self, t=None):\n raise NotImplementedError(\"This method draw_sample(t) has to be implemented in the class inheriting from Arm.\")", "def sample(self):\n return np.random.dirichlet(self.alpha)", "def visualize_sample(self, x, y):\n print(\"Sample visualization not implemented for the current class.\")", "def sample(self, shape=(), seed=None):\n raise TypeError(\"cannot sample from a flat distribution\")", "def sample(self, shape=(), seed=None):\n raise TypeError(\"cannot sample from a half flat distribution\")", "def draw_random_sample(choices, probabilities, n):\n # sets up an index list for the chosen particles, and makes bins for the probabilities\n values = np.array(range(len(choices)))\n probs = np.array(probabilities)\n bins = np.add.accumulate(probs)\n inds = values[np.digitize(random_sample(n), bins)] # chooses the new particles based on the probabilities of the old ones\n samples = []\n for i in inds:\n samples.append(deepcopy(choices[int(i)])) # makes the new particle cloud based on the chosen particles\n return samples", "def randomize(self):\n\n # initialize, clear lattice\n\n self._lattice = []\n for n in range(self.size):\n self._lattice.append([ random.choice(self._dictionary) for x in range(self.size) ])", "def randomSample(tree):\r\n\r\n\t# Take an initial sample\r\n\tsample = Node((uniform(-pi, pi), uniform(-2, 2)))\r\n\r\n\twhile existsInTree(tree, sample): # sample again until we haven't see said sample\r\n\t\tsample = Node((uniform(-pi, pi), uniform(-2, 2)))\r\n\r\n\treturn sample", "def random(self):\r\n if self.ate_apple:\r\n self.x = 20 * random.randint(0, 23)\r\n self.y = 20 * random.randint(3, 23)", "def sample(self, n):\n\n\t\treturn torch.distributions.Gamma(self.shapes, self.rates).sample([n])", "def sample_uniform(self, N):\n np.random.seed()\n return np.random.dirichlet([1]*self.k, N)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trains one logistic classifier per review group. Saves the trained classifiers within self.models.
def train(self, x_train, y_train): # check if vectorizer has been created before, if so load from file if check_persisted(f"{self.env['store_misc']}/tfidf", f'{self.vectorizer_hash}_X', self.load_fresh): vec = load(f"{self.env['store_misc']}/tfidf", f'{self.vectorizer_hash}_vec') X = load(f"{self.env['store_misc']}/tfidf", f'{self.vectorizer_hash}_X') else: # get the tokenized papers tokenized_papers = list(x_train[self.tokens_col]) vec = TfidfVectorizer(ngram_range=self.ngram_range, max_features=self.max_vocab_f, strip_accents='unicode') # generate term document matrix (model inputs) X = vec.fit_transform(tokenized_papers) save(vec, f"{self.env['store_misc']}/tfidf", f'{self.vectorizer_hash}_vec', persist=True) save(X, f"{self.env['store_misc']}/tfidf", f'{self.vectorizer_hash}_X', persist=True) self.vectorizer = vec # discard fold ID column from labels review_groups = [col for col in y_train.columns if not col=='k'] for review_group in tqdm(review_groups, desc='Train Review Groups'): # pull label column labels = y_train[review_group] # logistic classifier classifier = SGDClassifier(loss="log", alpha=self.alpha).fit(X, labels) # save the model in dictionary of models self.models[review_group] = classifier
[ "def exec_classifiers(self, dataset):\n f = Features()\n pt = param_tuning.ParamTuning()\n\n start_time = time.time()\n Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)\n print(\"Loaded train/test datasets in {} sec.\".format(time.time() - start_time))\n\n fX_train = f.build(Xtrain)\n fX_test = f.build(Xtest)\n print(\"Build features from train/test data in {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n tot_time = time.time(); start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n # estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.clf_names[clf][0](random_state=config.seed_no)\n estimator.set_params(**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, ytrain, estimator)\n\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n res = pt.testClassifier(fX_test, ytest, estimator)\n self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)\n # if not os.path.exists('output'):\n # os.makedirs('output')\n # np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt=\"%u\")\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))", "def TrainModel(self):\n # Initialize the Needed Classifier\n self.classifier = NaiveBayesClassifier.train(self.train_features)\n #self.classifier = MaxentClassifier.train(self.train_features,algorithm=\"iis\")", "def batch_classify(X_train, Y_train, X_test, Y_test, no_classifiers=5, verbose=True):\n dict_models = {}\n for classifier_name, classifier in list(dict_classifiers.items())[:no_classifiers]:\n #t_start = time.clock()\n t_start = time.perf_counter()\n classifier.fit(X_train, Y_train)\n\n\n if(classifier_name == \"Naive Bayes\"):\n with open('modello-Ing(clf-bayesian-FINALE).pickle', 'wb') as f:\n pickle.dump(classifier, f, pickle.HIGHEST_PROTOCOL)\n print(\"Termine serializzazione classificato bayesiano.\")\n\n if (classifier_name == \"Nearest Neighbors\"):\n with open('modello-Ing(clf-KNN-FINALE).pickle', 'wb') as f:\n pickle.dump(classifier, f, pickle.HIGHEST_PROTOCOL)\n print(\"Termine serializzazione classificato KNN.\")\n\n\n t_end = time.perf_counter()\n t_diff = t_end - t_start\n train_score = classifier.score(X_train, Y_train)\n test_score = classifier.score(X_test, Y_test)\n\n dict_models[classifier_name] = {'model': classifier, 'train_score': train_score, 'test_score': test_score,\n 'train_time': t_diff}\n if verbose:\n print(\"trained {c} in {f:.2f} s\".format(c=classifier_name, f=t_diff))\n return dict_models", "def train_all(self, classifier, name: str, save=False) -> None:\n\n train = self.features[self.features_list]\n target = self.features['stressed']\n scaler = StandardScaler().fit(train)\n train_scaled = scaler.transform(train)\n print(f'Currently Training {name} on all data')\n clf = classifier.fit(train_scaled, target)\n\n self.scaler = scaler\n self.classifier = clf\n self.clf_name = name\n\n if save:\n joblib.dump(scaler, 'models/scaler.pkl')\n joblib.dump(clf, f'models/classifier_{name}.pkl')", "def update_model(batch_size=10000):\n \n clf = pkl.load(\n open(path.join('pkl_objects', 'classifier.pkl'), 'rb')\n )\n\n conn = sqlite3.connect('reviews.sqlite')\n c = conn.cursor()\n c.execute(\"SELECT * FROM review_db\")\n\n results = c.fetchmany(batch_size)\n while results:\n data = np.array(results)\n X = data[:, 0]\n y = data[:, 1].astype(int)\n \n classes = np.array([0, 1])\n X_train = vect.transform(X)\n clf.partial_fit(X_train, y, classes=classes)\n results = c.fetchmany(batch_size)\n conn.close()\n pkl.dump(clf,\n open(path.join('pkl_objects', 'classifier.pkl'), 'wb'),\n protocol=4\n )\n return None", "def train():\n # get parameters from request\n #parameters = request.get_json()\n\n #adaboost, vec, selector, trainingError, validationError = training()\n # persist model\n #joblib.dump(adaboost, 'model.pkl')\n #joblib.dump(vec, 'vectorizer.pkl')\n #joblib.dump(selector, 'selector.pkl')\n\n #use existing models\n\n #Classifier 2 stuff -------------------------------------------------------\n # raw_train_data, train_label = get_data('train.csv', get_label=True)\n # train_data = process_raw_data(raw_train_data)\n # FEATURE_SIZE = train_data.shape[-1]\n #\n # model = XGBClassifier(n_estimators=2500, learning_rate=0.01, max_depth=2)\n # model.fit(train_data, train_label)\n # joblib.dump(model, 'xgboost.pkl')\n model = joblib.load('xgboost.pkl')\n\n retry = 5\n data, recentTweets = getTweetsData()\n if len(recentTweets) < 2 and retry > 0:\n data, recentTweets = getTweetsData()\n retry -= 1\n processed = process_raw_data(data)\n\n preds = model.predict(processed).tolist()\n predsUrl = preds[:]\n for i in range(len(preds)):\n if preds[i] > 0:\n preds[i] = \"Donald J. Trump\"\n predsUrl[i] = \"https://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_400x400.jpg\"\n else:\n preds[i] = \"White House Staff\"\n predsUrl[i] = \"https://abs.twimg.com/sticky/default_profile_images/default_profile_400x400.png\"\n #End Classifier 2 stuff ----------------------------------------------------\n\n #1 is Trump\n #-1 is Staff\n return jsonify({'accuracy': 'not used',\n 'tweetOne': recentTweets[0],\n 'tweetOnePred': preds[0],\n 'tweetOneUrl': predsUrl[0],\n\n 'tweetTwo': recentTweets[1],\n 'tweetTwoPred': preds[1],\n 'tweetTwoUrl': predsUrl[1],\n\n 'tweetThree': recentTweets[2],\n 'tweetThreePred': preds[2],\n 'tweetThreeUrl': predsUrl[2],\n\n 'tweetFour': recentTweets[3],\n 'tweetFourPred': preds[3],\n 'tweetFourUrl': predsUrl[3],\n\n 'tweetFive': recentTweets[4],\n 'tweetFivePred': preds[4],\n 'tweetFiveUrl': predsUrl[4],\n\n 'tweetSix': recentTweets[5],\n 'tweetSixPred': preds[5],\n 'tweetSixUrl': predsUrl[5],\n\n 'tweetSeven': recentTweets[6],\n 'tweetSevenPred': preds[6],\n 'tweetSevenUrl': predsUrl[6],\n\n 'tweetEight': recentTweets[7],\n 'tweetEightPred': preds[7],\n 'tweetEightUrl': predsUrl[7],\n\n 'tweetNine': recentTweets[8],\n 'tweetNinePred': preds[8],\n 'tweetNineUrl': predsUrl[8],\n\n 'tweetTen': recentTweets[9],\n 'tweetTenPred': preds[9],\n 'tweetTenUrl': predsUrl[9],\n\n 'tweet11': recentTweets[10],\n 'tweet11Pred': preds[10],\n 'tweet11Url': predsUrl[10],\n\n 'tweet12': recentTweets[11],\n 'tweet12Pred': preds[11],\n 'tweet12Url': predsUrl[11],\n\n 'tweet13': recentTweets[12],\n 'tweet13Pred': preds[12],\n 'tweet13Url': predsUrl[12],\n\n 'tweet14': recentTweets[13],\n 'tweet14Pred': preds[13],\n 'tweet14Url': predsUrl[13],\n\n 'tweet15': recentTweets[14],\n 'tweet15Pred': preds[14],\n 'tweet15Url': predsUrl[14]})", "def train(self, reviews, save_filename):\n # pre-process the data\n if self.corpus is None:\n self._preprocess(reviews)\n\n # Specify temporary file name to save model weights\n temp_filename = 'topic_model'\n\n start = time.time()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n topic_model = gensim.models.ldamulticore.LdaMulticore(\n corpus= self.corpus,\n num_topics= self.n_topics,\n id2word= self.id2word,\n chunksize=10000,\n workers= None, # Num. Processing Cores - 1\n passes=50,\n eval_every = 1,\n per_word_topics=True)\n topic_model.save(temp_filename)\n self.model = topic_model\n end = time.time()\n\n print(f'Training Completed. The modeling process took {(end-start)/60.} minutes')\n\n # Move saved files to desired location\n shutil.move(temp_filename, save_filename)\n shutil.move(temp_filename + '.state', save_filename + '.state')\n shutil.move(temp_filename + '.id2word', save_filename + '.id2word')\n shutil.move(temp_filename + '.expElogbeta.npy', save_filename + '.expElogbeta.npy')", "def SingleView_train(self,n_cv,view): \n self.tune_parameters(n_cv)\n classifiers = []\n models_copy = copy.deepcopy(self.models)\n Single_view = view#self.Labeled_pool\n for i, model in enumerate(models_copy):\n classifiers.append(model.fit(Single_view, self.y_labeled).best_estimator_) \n self.classifiers = copy.deepcopy(classifiers)", "def train_model(self):\n \n self.predictor = LogisticRegression().fit(self.x_train, self.y_train)\n return", "def __save(self):\r\n self.logger.info(f\"Saving classification head of model '{self}'...\")\r\n self._model.classifier.load_state_dict(self._classifier.state_dict())\r\n self._model.save_pretrained(self.path_store)\r\n self.logger.info(\"--> Successful\")", "def classifier_save(self, path=\"../../../datasets/logistic_regression_classifier\"):\n \n with open(path, 'wb') as f:\n pickle.dump(self.classifier, f)", "def train():\n # get data from files\n tourism_file = open('classifier/tourism.txt', 'r')\n nontourism_file = open('classifier/nontourism.txt', 'r')\n\n # retrieve features\n data_set = process_data(tourism_file, nontourism_file)\n training_set = data_set[0]\n test_set = data_set[1]\n datamixed = data_set[2]\n size = data_set[3]\n feature_set = data_set[4]\n\n # classifiers\n classifier_nb = NaiveBayesClassifier\n classifier_lr = SklearnClassifier(LogisticRegression())\n classifier_svm = SklearnClassifier(LinearSVC())\n\n # get best classifier from cross-validation\n classifier = cross_validate(classifier_svm, training_set, test_set)['classifier'] # set classifier\n return classifier", "def train_model(x_train, y_train):\r\n\r\n best_models = []\r\n model_names = []\r\n\r\n for i in range(len(classifiers)):\r\n\r\n model = classifiers[i]\r\n grid_search = GridSearchCV(model, param_grids[i], cv=5)\r\n\r\n grid_search.fit(x_train, y_train)\r\n best_models.append(grid_search.best_estimator_)\r\n\r\n name = model.__class__.__name__\r\n model_names.append(name)\r\n\r\n estimators = [('knn', best_models[0]), ('SVC', best_models[1]), ('DT', best_models[2]), ('LA', best_models[3]),\r\n ('QA', best_models[4])]\r\n\r\n ensemble = VotingClassifier(estimators, voting='hard')\r\n ensemble.fit(x_train, y_train)\r\n return ensemble", "def train(self, training_set, target_set, tuning=False):\n if tuning:\n # For cross validation:\n ranges = [(0.0001, 0.001, 0.01, 0.1), (0.1, 0.2, 0.5, 1, 2)]\n self.cross_validation(ranges, training_set, np.expand_dims(target_set, axis=1), k=5, ratio_validation=0.1)\n # When cross_val is done, we update our parameters:\n self.hyperparams = self.best_params\n # Each time, train is called, we re-init a SGDClassifier object so that we could use correct parameters:\n self.reinit()\n self.logistic_classifier = self.logistic_classifier.fit(training_set, target_set)", "def train_model_track1(train_pool, validation_pool, validation, test_private, features, data_path):\n\n cat = CatBoostClassifier(iterations=3000,\n loss_function='Logloss',\n l2_leaf_reg=2,\n random_seed=100,\n scale_pos_weight=11.92984045,\n eval_metric='AUC',\n use_best_model=True,\n early_stopping_rounds=100,\n max_depth=7,\n max_bin=100\n )\n\n cat.fit(train_pool, eval_set=validation_pool)\n valid_pred_prob = cat.predict_proba(validation.loc[:, features].values)[:, 1]\n valid_score_90 = scoring.rejection90(validation.label.values, valid_pred_prob,\n sample_weight=validation.weight.values)\n # 0.771923225\n print(f\"Score at rejection 90 {valid_score_90}\")\n predictions = cat.predict_proba(test_private.loc[:, features].values)[:, 1]\n prediction_file = os.path.join(data_path, \"test_private.csv\")\n print(f\"Track 1 prediction on private test data is present at {prediction_file}\")\n pd.DataFrame(data={\"prediction\": predictions}, index=test_private.index).to_csv(prediction_file,\n index_label=utils.ID_COLUMN)\n model_file = os.path.join(data_path, 'track_1_best_mode.cbm')\n print(f\"Track 1 best model is saved at {model_file}\")\n cat.save_model(model_file, format='cbm')", "def train( self, trainingData, trainingLabels):\n\n self.features = trainingData[0].keys()\n \"*** YOUR CODE HERE ***\"\n m = len(trainingData)\n n = int(self.ratio*m)\n trainingDataset = []\n trainingLabelsDataset = []\n\n for i in range(self.num_classifiers):\n trainingDataset.append([])\n trainingLabelsDataset.append([])\n for j in range(n):\n choice = random.choice(range(m))\n trainingDataset[i].append(trainingData[choice])\n trainingLabelsDataset[i].append(trainingLabels[choice])\n\n for i in range(self.num_classifiers):\n self.classifiers[i].train(trainingDataset[i], trainingLabelsDataset[i])\n # util.raiseNotDefined()", "def train(self):\r\n self.is_train = True\r\n for module in self._submodules.values():\r\n module.train()", "def ml_classification(x_train, y_train, x_test, y_test, cross_validation=False):\n from time import time\n from sklearn.naive_bayes import GaussianNB\n from sklearn.svm import SVC\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\n from sklearn.metrics import accuracy_score\n\n from sklearn.model_selection import KFold\n from sklearn.base import clone\n\n classifiers = (GaussianNB(), SVC(\n kernel=\"rbf\", ), DecisionTreeClassifier(), KNeighborsClassifier(\n n_neighbors=10), AdaBoostClassifier(), RandomForestClassifier(100))\n\n names = [\n \"Naive Bayes\", \"SVM\", \"Decision Trees\", \"KNeighbors\", \"AdaBoost\",\n \"Random Forest\"\n ]\n\n for idx, clf in enumerate(classifiers):\n\n clf_cv = clone(clf)\n\n print(\"\\n\", names[idx], \"\\n\", \"-\" * 20)\n\n t0 = time()\n # Fitting the model without cross validation\n clf.fit(x_train, y_train[:, 0])\n train_time = time() - t0\n y_pred = clf.predict(x_test)\n accuracy = accuracy_score(y_pred, y_test[:, 0])\n\n if cross_validation:\n k_fold = KFold(n_splits=10)\n\n t0 = time()\n # Fitting the model with cross validation\n for id_train, id_test in k_fold.split(x_train):\n # print(y_train[id_train, 0].shape)\n clf_cv.fit(x_train[id_train], y_train[id_train, 0])\n train_time_cv = time() - t0\n\n y_pred_cv = clf_cv.predict(x_test)\n accuracy_cv = accuracy_score(y_pred_cv, y_test[:, 0])\n\n print(\"Test Accuracy: \\t {:.3f}\".format(accuracy))\n if cross_validation:\n print(\"Test Accuracy CV:\\t {:.3f}\".format(accuracy_cv))\n\n print(\"Training Time: \\t {:.1f} ms\".format(train_time * 1000))\n if cross_validation:\n print(\n \"Training Time CV: \\t {:.1f} ms\".format(train_time_cv * 1000))", "def update_classifier(): #\n clf_pickle = open(\"text_clf.pickle\", \"rb\") # Setting up connection to saved classifier pickle file\n global text_clf\n text_clf = pickle.load(clf_pickle) # loading classifier to a local variable\n clf_pickle.close()\n print(\"SUCCESS : Using new pickle file for chatbot\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute a class saliency map using the model for images X and labels y.
def compute_saliency_maps(X, y, model): # Make input tensor require gradient X.requires_grad_() saliency = None ############################################################################## # TODO: Implement this function. Perform a forward and backward pass through # # the model to compute the gradient of the correct class score with respect # # to each input image. You first want to compute the loss over the correct # # scores (we'll combine losses across a batch by summing), and then compute # # the gradients with a backward pass. # # Hint: X.grad.data stores the gradients # ############################################################################## # Replace "pass" statement with your code # Make a forward pass of X (which contains N images) through the model. # The output (scores) has shape (N, C): For each image, get its unnormalized # scores (for each class of the dataset), e.g. C=1000 for a model trained on ImageNet. scores = model(X) # Get the -unnormalized- score of the correct class for each image. # "cscores" has shape of (N,) cscores = scores.gather(1, y.view(-1, 1)).squeeze() # Compute the loss over the correct scores. # As mentioned above, the loss is the sum across batch correct class scores. loss = torch.sum(cscores) # Apply the backward pass, which computes the gradient of the loss # w.r.t. our model's parameters (among others, the input X). loss.backward() # Note that we can apply the backward pass directly from "cscores" by using: # >>> cscores.backward(gradient=torch.ones_like(y)) # The reason: The sub-computational graph for the "sum" method is: # ----- # Forward pass: cscores ---> [sum] ---> loss # Backward pass (gradiants): [1, ..., 1] <-------------- 1 # ----- # That is, we can directly start from "cscores" gradient, which is a tensor of # ones with the shape (N,). Actually: ones_like(y) == ones_like(cscores) # Compute the absolute value of the X gradients. # Saliency Maps requires nonnegative values (gradients). # For now, "saliency" has shape of: (N, 3, H, W) saliency = X.grad.abs() # Take the maximum value over the 3 input channels (for each of N images). # Now, "saliency" has shape of: (N, H, W) saliency = torch.max(saliency, dim=1).values ############################################################################## # END OF YOUR CODE # ############################################################################## return saliency
[ "def classifier_saliency_maps(X, y, model):\n # Make sure the model is in \"test\" mode\n model.eval()\n\n # Make input tensor require gradient\n X.requires_grad_()\n\n scores = model(X)\n correct_class_scores = scores.gather(1, y.view(-1,1)).squeeze()\n dummy_loss = torch.sum(correct_class_scores)\n dummy_loss.backward()\n\n saliency = torch.max(torch.abs(X.grad), dim=1).values\n return saliency", "def compute_saliency_maps(X, y, model):\n # Make input tensor require gradient\n \n ##############################################################################\n # TODO: Implement this function. Perform a forward and backward pass through #\n # the model to compute the gradient of the correct class score with respect #\n # to each input image. You first want to compute the loss over the correct #\n # scores (we'll combine losses across a batch by summing), and then compute #\n # the gradients with a backward pass. #\n # Hint: X.grad.data stores the gradients #\n\n X.requires_grad_()\n N = X.shape[0]\n score_max = None\n for i in range(N):\n scores = model(X[i][None])\n score_max_index = scores.argmax()\n if i == 0:\n score_max = scores[0,score_max_index]\n else:\n score_max += scores[0,score_max_index]\n\n score_max.backward()\n saliency, _ = torch.max(X.grad.data.abs(),dim=1)\n\n return saliency", "def plot_saliency_maps(model, plot_prefix, class_name):\n\tloaded_image = keras.preprocessing.image.load_img('Images/' + \n\t\tclass_name + '/' + class_name + '_1.jpeg',target_size=(224,224))\n\t# preprocess image to get it into the right format for the model\n\timage = keras.preprocessing.image.img_to_array(loaded_image)\n\timage = image.reshape((1, *image.shape))\n\ty_pred = model.predict(image)\n\timage_var = tf.Variable(image, dtype=float)\n\n\twith tf.GradientTape() as tape:\n\t\tpred = model(image_var, training=False)\n\t\tclass_idxs_sorted = np.argsort(pred.numpy().flatten())[::-1]\n\t\tloss = pred[0][class_idxs_sorted[0]]\n\tgrads = tape.gradient(loss, image_var)\n\tdgrad_abs = tf.math.abs(grads)\n\tdgrad_max = np.max(dgrad_abs, axis=3)[0]\n\t# normalize to range between 0 and 1\n\tarr_min, arr_max = np.min(dgrad_max), np.max(dgrad_max)\n\tgrad_eval = (dgrad_max - arr_min) / (arr_max - arr_min + 1e-18)\n\tfig, axes = plt.subplots(1,2,figsize=(14,5))\n\taxes[0].imshow(loaded_image)\n\taxes[1].imshow(loaded_image)\n\ti = axes[1].imshow(grad_eval, cmap=\"jet\", alpha=0.8) # , alpha=0.8\n\tcolorbar = fig.colorbar(i)\n\tcolorbar.set_label('Saliency', rotation=270)\n\tplt.title('Saliency map for ' + class_name + '_1')\n\tplt.tight_layout()\n\tplt.savefig(plot_prefix + '_' + class_name + '_1_saliency.png')\n\tplt.show()", "def get_saliency_maps(model_object, target_class, predictor_matrix):\n\n loss_tensor = K.mean(\n (model_object.layers[-1].output[..., target_class] - 1) ** 2)\n\n gradient_tensor = K.gradients(loss_tensor, [model_object.input])[0]\n gradient_tensor = gradient_tensor / K.maximum(\n K.std(gradient_tensor), K.epsilon()\n )\n\n inputs_to_gradients_function = K.function(\n [model_object.input, K.learning_phase()], [gradient_tensor])\n\n saliency_matrix = inputs_to_gradients_function([predictor_matrix, 0])[0]\n return -1 * saliency_matrix", "def map_class(fine_labels, mapping):\n return np.array([mapping[l] for l in fine_labels])", "def computeSaliency(self, image, saliencyMap=...) -> Tuple[retval, saliencyMap]:\n ...", "def saliency_map(self, input, class_idx=None, retain_graph=False):\n b, c, h, w = input.size()\n\n logit = self.model(input)\n if class_idx is None:\n score = logit[:, logit.max(1)[-1]].squeeze()\n else:\n score = logit[:, class_idx].squeeze() \n \n self.model.zero_grad()\n score.backward(retain_graph=retain_graph)\n gradients = self.gradients['value'] # dS/dA\n activations = self.activations['value'] # A\n b, k, u, v = gradients.size()\n\n alpha_num = gradients.pow(2)\n alpha_denom = gradients.pow(2).mul(2) + \\\n activations.mul(gradients.pow(3)).view(b, k, u*v).sum(-1, keepdim=True).view(b, k, 1, 1)\n alpha_denom = torch.where(alpha_denom != 0.0, alpha_denom, torch.ones_like(alpha_denom))\n\n alpha = alpha_num.div(alpha_denom+1e-7)\n positive_gradients = F.relu(score.exp()*gradients) # ReLU(dY/dA) == ReLU(exp(S)*dS/dA))\n weights = (alpha*positive_gradients).view(b, k, u*v).sum(-1).view(b, k, 1, 1)\n\n saliency_map = (weights*activations).sum(1, keepdim=True)\n saliency_map = F.relu(saliency_map)\n saliency_map = F.upsample(saliency_map, size=(h,w), mode='bilinear', align_corners=False)\n saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()\n saliency_map = (saliency_map-saliency_map_min).div(saliency_map_max-saliency_map_min).data\n\n self.saliency_map = saliency_map", "def _convert_labels(self, y_user):\n print self._user2classif_lut\n return [self._user2classif_lut[x] for x in y_user]", "def predict(self, imageSource):\n targetImage = matplotlib.image.imread(imageSource).flatten()\n targetProjectedImage = self.project(targetImage)\n distances=[]\n classesArray=[]\n \n for i in range(len(self.classProjected)):\n distance = np.linalg.norm(targetProjectedImage - self.classProjected[i])\n distances.append(distance)\n classesArray.append(self.classes[i])\n \n predictedClass=argsortTwoArrays(distances,classesArray)[0]\n return predictedClass", "def classify(model, input_paths, image_dim=IMAGE_DIM):\n images, image_paths = load_images(input_paths, (image_dim, image_dim))\n probs = classify_nd(model, images)\n return dict(zip(image_paths, probs))", "def predict_label(image):\n \n img = load_img(\n image, target_size=(SHAPE[0, SHAPE[1])\n )\n\n img_array = img_to_array(img)\n img_array = np.expand_dims(img_array, 0) # Create a batch\n\n # get the weights for each class\n predictions = model.predict(img_array)\n \n # get the confidence score for the prediction\n score = tf.nn.softmax(predictions[0])\n\n # get the label for the predicted clas : 0/1\n # depending on which class has the higher score\n label = np.argmax(score)\n\n # generating class name for the label\n if label == 1 : cls = 'signature'\n else : cls = 'no_signature'\n \n return label", "def map_labels(labels: np.ndarray) -> list:\n maped = [0.0 if x == 'Iris-setosa' else 1.0 if x == 'Iris-versicolor' else 2.0 for x in labels]\n return maped", "def compute_labels(inputs, labels, threshold=0.71):\n global model\n\n outputs = model(**inputs, labels=labels)\n logits = outputs[:2][1]\n\n return map_logit(logits.detach().numpy()[0], threshold=threshold)", "def cal_confusion_matrices(self) -> Dict[str, Dict]:\n for true_labels, pred_labels in zip(self.sents_true_labels, self.sents_pred_labels):\n for true_label in true_labels: \n entity_type = true_label['label']\n prediction_hit_count = 0 \n for pred_label in pred_labels:\n if pred_label['label'] != entity_type:\n continue\n if pred_label['start_idx'] == true_label['start_idx'] and pred_label['end_idx'] == true_label['end_idx'] and pred_label['text'] == true_label['text']: # TP\n self.confusion_matrices[entity_type]['TP'] += 1\n prediction_hit_count += 1\n elif ((pred_label['start_idx'] == true_label['start_idx']) or (pred_label['end_idx'] == true_label['end_idx'])) and pred_label['text'] != true_label['text']: # boundry error, count FN, FP\n self.confusion_matrices[entity_type]['FP'] += 1\n self.confusion_matrices[entity_type]['FN'] += 1\n prediction_hit_count += 1\n if prediction_hit_count != 1: # FN, model cannot make a prediction for true_label\n self.confusion_matrices[entity_type]['FN'] += 1\n prediction_hit_count = 0 # reset to default", "def label(tile,tileNet,ClassCoordinates,raster):\r\n tile=extractTile(raster,tile)\r\n labelVector=tileNet.encode(tile)\r\n labelVector=labelVector.detach().numpy()\r\n label=ClassCoordinates.knn.predict(labelVector)\r\n return(label)", "def svm_classify(train_image_feats, train_labels, test_image_feats, lambda_value=591.0):\r\n\t# categories\r\n\tcategories = list(set(train_labels))\r\n\r\n\t# construct 1 vs all SVMs for each category\r\n\t# print(\"lambda:\", lambda_value)\r\n\tsvms = {cat: LinearSVC(random_state=0, tol=1e-5, loss='hinge', C=lambda_value) for cat in categories}\r\n\r\n\ttest_labels = []\r\n\r\n\t#############################################################################\r\n\t# TODO: YOUR CODE HERE #\r\n\t#############################################################################\r\n\tnum_test_points = test_image_feats.shape[0]\r\n\tpredictions = []\r\n\tW_mat = []\r\n\tb_vec = []\r\n\t# Iterate through categories and train each SVM:\r\n\tfor cat_ in svms:\r\n\t\t# obtain targets to train SVM:\r\n\t\ty, w_s = get_targets(cat_, train_labels)\r\n\t\tsvms[cat_].fit(train_image_feats, y, sample_weight=w_s)\r\n\t\tW_mat.append(svms[cat_].coef_)\r\n\t\tb_vec.append(svms[cat_].intercept_)\r\n\t\t# predictions.append(np.expand_dims(svms[cat_].decision_function(test_image_feats), -1))\r\n\r\n\tW_mat = np.concatenate(W_mat, 0)\r\n\tb_vec = np.expand_dims(np.concatenate(b_vec, -1),-1)\r\n\tpredictions = W_mat.dot(test_image_feats.T) + b_vec\t\r\n\tpredicted_indices = np.argmax(predictions, axis=0)\t\r\n\ttest_labels = [categories[index] for index in predicted_indices]\t\r\n\t#############################################################################\r\n\t# END OF YOUR CODE #\r\n\t#############################################################################\r\n\r\n\treturn test_labels", "def _do_saliency_calculations(model_object, loss_tensor, input_matrices):\n\n if isinstance(model_object.input, list):\n input_tensors = model_object.input\n else:\n input_tensors = [model_object.input]\n\n gradient_tensors = K.gradients(loss_tensor, input_tensors)\n num_input_tensors = len(input_tensors)\n\n for i in range(num_input_tensors):\n gradient_tensors[i] /= K.maximum(\n K.std(gradient_tensors[i]), K.epsilon()\n )\n\n inputs_to_gradients_function = K.function(\n input_tensors + [K.learning_phase()], gradient_tensors\n )\n\n saliency_matrices = [None] * num_input_tensors\n num_examples = input_matrices[0].shape[0]\n\n for i in range(num_examples):\n if numpy.mod(i, 100) == 0:\n print((\n 'Have computed saliency maps for {0:d} of {1:d} examples...'\n ).format(\n i, num_examples\n ))\n\n these_input_matrices = [a[[i], ...] for a in input_matrices]\n these_saliency_matrices = inputs_to_gradients_function(\n these_input_matrices + [0]\n )\n\n if saliency_matrices[0] is None:\n for j in range(num_input_tensors):\n these_dim = (\n (num_examples,) + these_saliency_matrices[j].shape[1:]\n )\n saliency_matrices[j] = numpy.full(these_dim, numpy.nan)\n\n for j in range(num_input_tensors):\n saliency_matrices[j][i, ...] = these_saliency_matrices[j][0, ...]\n\n print('Have computed saliency maps for all {0:d} examples!'.format(\n num_examples\n ))\n\n for j in range(num_input_tensors):\n saliency_matrices[j] *= -1\n\n return saliency_matrices", "def create_mapping(original_labels, cluster_labels):\n\n original_labels = np.array(original_labels, dtype=np.int)\n cluster_labels - np.array(cluster_labels, dtype=np.int)\n class_ids = np.unique(original_labels)\n cluster_ids = np.unique(cluster_labels)\n mapping = {}\n for cluster_id in cluster_ids:\n original_labels_in_cluster = original_labels[cluster_labels == cluster_id]\n map_to_id = np.bincount(original_labels_in_cluster).argmax()\n mapping[cluster_id] = map_to_id\n\n return mapping", "def get_saliency_map(session, features, saliency_method, label,\n input_tensor_name, output_tensor_name, graph=None):\n if graph is None:\n graph = tf.get_default_graph()\n label_placeholder = tf.placeholder(tf.int32)\n else:\n with graph.as_default():\n label_placeholder = tf.placeholder(tf.int32)\n output_tensor = graph.get_tensor_by_name(\n output_tensor_name)[0][label_placeholder]\n input_tensor = graph.get_tensor_by_name(input_tensor_name)\n if saliency_method == 'integrated_gradients':\n # Integrated Gradients is used on the first layer activations.\n # We run IG for 200 steps because empirically we find with these many steps,\n # the IG scores converges.\n return integrated_gradients.IntegratedGradients(\n graph=graph, session=session, y=output_tensor, x=input_tensor).GetMask(\n x_value=features, feed_dict={label_placeholder: label}, x_steps=200)\n elif saliency_method == 'integrated_gradients_black_white_baselines':\n # Integrated Gradients (Black + White baselines) is used on the input.\n # Computes 2 saliency maps using a black image and a white image as a\n # baseline separately and returns their mean average.\n # We run IG for 200 steps because empirically we find with these many steps,\n # the IG scores converges.\n saliency_maps = []\n for baseline in [\n np.min(features) * np.ones_like(features), # black baseline\n np.max(features) * np.ones_like(features), # white baseline\n ]:\n saliency_maps.append(\n integrated_gradients.IntegratedGradients(\n graph=graph, session=session, y=output_tensor,\n x=input_tensor).GetMask(\n x_value=features,\n x_baseline=baseline,\n feed_dict={label_placeholder: label},\n x_steps=200))\n return np.mean(saliency_maps, axis=0)\n elif saliency_method == 'xrai':\n return xrai.XRAI(\n graph=graph, session=session, y=output_tensor, x=input_tensor).GetMask(\n x_value=features, feed_dict={label_placeholder: label})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an adversarial attack that is close to X, but that the model classifies as target_y.
def make_adversarial_attack(X, target_y, model, max_iter=100, verbose=True): # Initialize our adversarial attack to the input image, and make it require gradient X_adv = X.clone() X_adv = X_adv.requires_grad_() learning_rate = 1 ############################################################################## # TODO: Generate an adversarial attack X_adv that the model will classify # # as the class target_y. You should perform gradient ascent on the score # # of the target class, stopping when the model is fooled. # # When computing an update step, first normalize the gradient: # # dX = learning_rate * g / ||g||_2 # # # # You should write a training loop. # # # # HINT: For most examples, you should be able to generate an adversarial # # attack in fewer than 100 iterations of gradient ascent. # # You can print your progress over iterations to check your algorithm. # ############################################################################## # Replace "pass" statement with your code # Training loop: Apply gradient ascent 100 times, in maximum. for epoch in range(100): # Forward pass, "scores" shape is (1, 1000) scores = model(X_adv) # Get the predicted class (pred) and its socre (pred_score). pred_score, pred = torch.max(scores, axis=1) pred_score, pred = pred_score.item(), pred.item() # Get the "target_y" score. target_score = scores[:, target_y].squeeze() # Display some information about the current epoch (iteration). print('Iteration %2d: target score %.3f, max score %.3f' \ % (epoch+1, target_score.item(), pred_score)) # Check if the model is fooled, i.e. "predicted class" equals "target_y". if pred == target_y: print('\nThe model is fooled.') break # Apply the backward pass: Compute the gradient of "target score" w.r.t. # model's trainable parameters (among others, "X_adv"). target_score.backward() # Normalize the gradient (Note that "L2 norm" was used in the division). X_adv.grad *= learning_rate / torch.linalg.norm(X_adv.grad) # Compute an update step: Apply the gradient ascent. # Note that an addition is used (+=) insted of substraction (-=), because # the goal is to maximize "target_y" predicted score. X_adv.data += X_adv.grad.data # Re-initialize the gradient of "X_adv" to zero (for the next epoch). X_adv.grad.data.zero_() ############################################################################## # END OF YOUR CODE # ############################################################################## return X_adv
[ "def make_adversarial_attack(x, y_target, model, max_iter=100, verbose=True):\n # Initialize our adversarial attack to the input image, and make it require gradient\n \n \n ##############################################################################\n # TODO: Generate an adversarial attack X_adv that the model will classify #\n # as the class target_y. You should perform gradient ascent on the score #\n # of the target class, stopping when the model is fooled. #\n # When computing an update step, first normalize the gradient: #\n # dX = learning_rate * g / ||g||_2 #\n # #\n # You should write a training loop. #\n # #\n # HINT: For most examples, you should be able to generate an adversarial #\n # attack in fewer than 100 iterations of gradient ascent. #\n # You can print your progress over iterations to check your algorithm. #\n ##############################################################################\n loss_fn = nn.CrossEntropyLoss()\n num_steps = 6\n step_size=0.01\n eps=0.3\n clamp=(0,1)\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n num_channels = x.shape[1]\n y_target = torch.tensor(y_target).unsqueeze(0).to(x.device)\n for i in range(num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n prediction = model(_x_adv)\n print(torch.argmax(prediction))\n loss = loss_fn(prediction, y_target)\n loss.backward()\n with torch.no_grad():\n gradients = _x_adv.grad.sign() * step_size\n x_adv -= gradients\n x_adv = torch.max(torch.min(x_adv, x + eps), x - eps) \n x_adv = x_adv.clamp(*clamp)\n return x_adv", "def attack(self, data, target):\n B, K = data.shape[:2]\n data = data.float().cuda().detach()\n data = data.transpose(1, 2).contiguous()\n ori_data = data.clone().detach()\n ori_data.requires_grad = False\n\n # points and normals\n if ori_data.shape[1] == 3:\n normal = None\n else:\n normal = ori_data[:, 3:, :]\n ori_data = ori_data[:, :3, :]\n target = target.long().cuda().detach()\n\n # init variables with small perturbation\n adv_data = ori_data.clone().detach() + \\\n torch.randn((B, 3, K)).cuda() * 1e-7\n adv_data.requires_grad_()\n opt = optim.Adam([adv_data], lr=self.attack_lr, weight_decay=0.)\n\n adv_loss = torch.tensor(0.).cuda()\n dist_loss = torch.tensor(0.).cuda()\n\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n clip_time = 0.\n\n # there is no binary search in this attack\n # just longer iterations of optimization\n for iteration in range(self.num_iter):\n t1 = time.time()\n\n # forward passing\n logits = self.model(adv_data) # [B, num_classes]\n if isinstance(logits, tuple): # PointNet\n logits = logits[0]\n\n t2 = time.time()\n forward_time += t2 - t1\n\n # print\n pred = torch.argmax(logits, dim=1) # [B]\n success_num = (pred == target).sum().item()\n if iteration % (self.num_iter // 5) == 0:\n print('Iteration {}/{}, success {}/{}\\n'\n 'adv_loss: {:.4f}, dist_loss: {:.4f}'.\n format(iteration, self.num_iter, success_num, B,\n adv_loss.item(), dist_loss.item()))\n\n # compute loss and backward\n adv_loss = self.adv_func(logits, target).mean()\n\n # in the official tensorflow code, they use sum instead of mean\n # so we multiply num_points as sum\n dist_loss = self.dist_func(\n adv_data.transpose(1, 2).contiguous(),\n ori_data.transpose(1, 2).contiguous()).mean() * K\n\n loss = adv_loss + dist_loss\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n t3 = time.time()\n backward_time += t3 - t2\n\n # clipping and projection!\n adv_data.data = self.clip_func(adv_data.clone().detach(),\n ori_data, normal)\n\n t4 = time.time()\n clip_time = t4 - t3\n total_time += t4 - t1\n\n if iteration % 100 == 0:\n print('total time: {:.2f}, for: {:.2f}, '\n 'back: {:.2f}, clip: {:.2f}'.\n format(total_time, forward_time,\n backward_time, clip_time))\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n clip_time = 0.\n torch.cuda.empty_cache()\n\n # end of CW attack\n with torch.no_grad():\n logits = self.model(adv_data) # [B, num_classes]\n if isinstance(logits, tuple): # PointNet\n logits = logits[0]\n pred = torch.argmax(logits, dim=-1) # [B]\n success_num = (pred == target).\\\n sum().detach().cpu().item()\n\n # return final results\n print('Successfully attack {}/{}'.format(success_num, B))\n\n # in their implementation, they estimate the normal of adv_pc\n # we don't do so here because it's useless in our task\n adv_data = adv_data.transpose(1, 2).contiguous() # [B, K, 3]\n adv_data = adv_data.detach().cpu().numpy() # [B, K, 3]\n return adv_data, success_num", "def adversarialTrainer(attack):\n\n model = attack.model_wrapper.model # important note: this is a fresh, untrained model!\n data = attack.getDataset().data\n\n patience_counter, best_val_accuracy = 0, 0\n adversarial_model_train_epochs = 200\n log_template = 'Adversarial Model - Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}, Attack: {:.4f}'\n\n model.attack = True\n # train in an adversarial way\n for epoch in range(0, adversarial_model_train_epochs):\n tmp_attack = copy.deepcopy(attack)\n tmp_attack.setIdx(epoch + 1)\n attacked_x, attacked_nodes, y_targets = \\\n getTheMostHarmfulInput(attack=tmp_attack, approach=NodeApproach.TOPOLOGY)\n\n train(model=attack.model_wrapper.model, optimizer=attack.model_wrapper.optimizer, data=data,\n attacked_nodes=attacked_nodes, attacked_x=attacked_x)\n train_results = test(data=data, model=attack.model_wrapper.model, targeted=attack.targeted,\n attacked_nodes=attacked_nodes, y_targets=y_targets)\n print(log_template.format(epoch + 1, *train_results))\n\n # patience\n val_acc = train_results[1]\n if val_acc > best_val_accuracy:\n best_val_accuracy = val_acc\n patience_counter = 0\n else:\n patience_counter += 1\n if patience_counter >= attack.patience:\n break\n\n attack.model_wrapper.model.attack = False\n print()\n model_log = 'Adversarial Model - Train: {:.4f}, Val: {:.4f}, Test: {:.4f}, Attack: {:.4f}'\\\n .format(*train_results)\n return attack.model_wrapper.model, model_log, train_results[2]", "def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,\n targeted: bool = False) -> torch.Tensor:\n batch_size = inputs.shape[0]\n tinputs = self._arctanh((inputs - self.boxplus) / self.boxmul)\n\n # set the lower and upper bounds accordingly\n lower_bound = torch.zeros(batch_size, device=self.device)\n CONST = torch.full((batch_size,), self.initial_const, device=self.device)\n upper_bound = torch.full((batch_size,), 1e10, device=self.device)\n\n o_best_l2 = torch.full((batch_size,), 1e10, device=self.device)\n o_best_score = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n o_best_attack = inputs.clone()\n\n # setup the target variable, we need it to be in one-hot form for the loss function\n labels_onehot = torch.zeros(labels.size(0), self.num_classes, device=self.device)\n labels_onehot.scatter_(1, labels.unsqueeze(1), 1)\n labels_infhot = torch.zeros_like(labels_onehot).scatter_(1, labels.unsqueeze(1), float('inf'))\n\n for outer_step in range(self.binary_search_steps):\n\n # setup the modifier variable, this is the variable we are optimizing over\n modifier = torch.zeros_like(inputs, requires_grad=True)\n\n # setup the optimizer\n optimizer = optim.Adam([modifier], lr=self.learning_rate, betas=(0.9, 0.999), eps=1e-8)\n best_l2 = torch.full((batch_size,), 1e10, device=self.device)\n best_score = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat and outer_step == (self.binary_search_steps - 1):\n CONST = upper_bound\n\n prev = float('inf')\n for iteration in range(self.max_iterations):\n # perform the attack\n adv, logits, l2, logit_dists, loss = self._step(model, optimizer, inputs, tinputs, modifier,\n labels, labels_infhot, targeted, CONST)\n\n if self.callback and (iteration + 1) % self.log_interval == 0:\n self.callback.scalar('logit_dist_{}'.format(outer_step), iteration + 1, logit_dists.mean().item())\n self.callback.scalar('l2_norm_{}'.format(outer_step), iteration + 1, l2.sqrt().mean().item())\n\n # check if we should abort search if we're getting nowhere.\n if self.abort_early and iteration % (self.max_iterations // 10) == 0:\n if loss > prev * 0.9999:\n break\n prev = loss\n\n # adjust the best result found so far\n predicted_classes = (logits - labels_onehot * self.confidence).argmax(1) if targeted else \\\n (logits + labels_onehot * self.confidence).argmax(1)\n\n is_adv = (predicted_classes == labels) if targeted else (predicted_classes != labels)\n is_smaller = l2 < best_l2\n o_is_smaller = l2 < o_best_l2\n is_both = is_adv * is_smaller\n o_is_both = is_adv * o_is_smaller\n\n best_l2[is_both] = l2[is_both]\n best_score[is_both] = predicted_classes[is_both]\n o_best_l2[o_is_both] = l2[o_is_both]\n o_best_score[o_is_both] = predicted_classes[o_is_both]\n o_best_attack[o_is_both] = adv[o_is_both]\n\n # adjust the constant as needed\n adv_found = (best_score == labels) if targeted else ((best_score != labels) * (best_score != -1))\n upper_bound[adv_found] = torch.min(upper_bound[adv_found], CONST[adv_found])\n adv_not_found = ~adv_found\n lower_bound[adv_not_found] = torch.max(lower_bound[adv_not_found], CONST[adv_not_found])\n is_smaller = upper_bound < 1e9\n CONST[is_smaller] = (lower_bound[is_smaller] + upper_bound[is_smaller]) / 2\n CONST[(~is_smaller) * adv_not_found] *= 10\n\n # return the best solution found\n return o_best_attack", "def _is_adversarial(self, x_adv: np.ndarray, y_true: np.ndarray) -> bool:\n y_prediction = self.estimator.predict(x=x_adv)\n\n if self.targeted:\n return np.argmax(y_prediction, axis=1)[0] == np.argmax(y_true, axis=1)[0]\n\n return np.argmax(y_prediction, axis=1)[0] != np.argmax(y_true, axis=1)[0]", "def _attack(\n self, image: np.ndarray, target_class: np.ndarray, limit: int, max_iter: int\n ) -> Tuple[bool, np.ndarray]:\n bounds, initial = self._get_bounds(image, limit)\n\n def predict_fn(x):\n predictions = self.estimator.predict(self._perturb_image(x, image))[:, target_class]\n return predictions if not self.targeted else 1 - predictions\n\n def callback_fn(x, convergence=None):\n if self.es == 0:\n if self._attack_success(x.result[0], image, target_class):\n raise Exception(\"Attack Completed :) Earlier than expected\")\n else:\n return self._attack_success(x, image, target_class)\n\n if self.es == 0:\n from cma import CMAOptions\n\n opts = CMAOptions()\n if not self.verbose:\n opts.set(\"verbose\", -9)\n opts.set(\"verb_disp\", 40000)\n opts.set(\"verb_log\", 40000)\n opts.set(\"verb_time\", False)\n\n opts.set(\"bounds\", bounds)\n\n if self.type_attack == 0:\n std = 63\n else:\n std = limit\n\n from cma import CMAEvolutionStrategy\n\n strategy = CMAEvolutionStrategy(initial, std / 4, opts)\n\n try:\n strategy.optimize(\n predict_fn,\n maxfun=max(1, 400 // len(bounds)) * len(bounds) * 100,\n callback=callback_fn,\n iterations=1,\n )\n except Exception as exception:\n if self.verbose:\n print(exception)\n\n adv_x = strategy.result[0]\n else:\n strategy = differential_evolution(\n predict_fn,\n bounds,\n disp=self.verbose,\n maxiter=max_iter,\n popsize=max(1, 400 // len(bounds)),\n recombination=1,\n atol=-1,\n callback=callback_fn,\n polish=False,\n )\n adv_x = strategy.x\n\n if self._attack_success(adv_x, image, target_class):\n return True, self._perturb_image(adv_x, image)[0]\n else:\n return False, image", "def gen_attack_result(self, target):\r\n\r\n hit_result = self.my_board.update_my_board(target)\r\n return hit_result", "def train(self, input, target):\r\n ret = self.bias + self.weights.dot(input)\r\n if ret > 0:\r\n a = target - 1\r\n else:\r\n a = target - 0\r\n\r\n if a != 0:\r\n self.bias += rate * a\r\n for i in range(self.numInputs):\r\n self.weights[i] += rate * a * input[i]\r\n return ret", "def distance_augmentation_attack(model, train_set, test_set, max_samples, attack_type='d', distance_attack='CW', augment_kwarg=1, batch=100, input_dim=[None, 32, 32, 3], n_classes=10):\n if attack_type == 'r':\n augments = create_rotates(augment_kwarg)\n elif attack_type == 'd':\n augments = create_translates(augment_kwarg)\n else:\n raise ValueError(f\"attack type_: {attack_type} is not valid.\")\n m = np.concatenate([np.ones(max_samples),\n np.zeros(max_samples)], axis=0)\n attack_in = np.zeros((max_samples, len(augments)))\n attack_out = np.zeros((max_samples, len(augments)))\n for i, augment in enumerate(augments):\n train_augment = apply_augment(train_set, augment, attack_type)\n test_augment = apply_augment(test_set, augment, attack_type)\n train_ds = tf.data.Dataset.from_tensor_slices(train_augment).batch(batch)\n test_ds = tf.data.Dataset.from_tensor_slices(test_augment).batch(batch)\n attack_in[:, i] = dists(model, train_ds, attack=distance_attack, max_samples=max_samples, input_dim=input_dim, n_classes=n_classes)\n attack_out[:, i] = dists(model, test_ds, attack=distance_attack, max_samples=max_samples, input_dim=input_dim, n_classes=n_classes)\n attack_set = (np.concatenate([attack_in, attack_out], 0),\n np.concatenate([train_set[1], test_set[1]], 0),\n m)\n return attack_set", "def _forward_alibaba_miil(self, input: Tensor, target: Tensor) -> Tensor:\n self.targets = target\n self.anti_targets = 1 - target\n\n # Calculating Probabilities\n self.xs_pos = torch.sigmoid(input)\n self.xs_neg = 1.0 - self.xs_pos\n\n # Asymmetric Clipping\n if self.prob_margin > 0:\n self.xs_neg.add_(self.prob_margin).clamp_(max=1)\n\n # Basic CE calculation\n # loss = y * log(p) + (1-y) * log(1-p)\n self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))\n self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=self.eps)))\n\n # Asymmetric Focusing\n if self.gamma_neg > 0 or self.gamma_pos > 0:\n if self.disable_torch_grad_focal_loss:\n prev = torch.is_grad_enabled()\n torch.set_grad_enabled(False)\n self.xs_pos = self.xs_pos * self.targets # p * y\n self.xs_neg = self.xs_neg * self.anti_targets # (1-p) * (1-y)\n self.asymmetric_w = torch.pow(\n 1 - self.xs_pos - self.xs_neg,\n self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets,\n )\n if self.disable_torch_grad_focal_loss:\n torch.set_grad_enabled(prev)\n self.loss *= self.asymmetric_w\n\n if self.reduction == \"mean\":\n self.loss = -self.loss.mean()\n elif self.reduction == \"sum\":\n self.loss = -self.loss.sum()\n else:\n self.loss = -self.loss\n return self.loss", "def cost(X, y, theta):\n return 1 / (2 * X.shape[0]) * sum((predict(X, theta) - y) ** 2)", "def approachTarget(self, amount):\n if amount == 0:\n # If amount is zero, do nothing.\n return\n \n if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:\n # If 'self.approachTarget()' will not take the view within twice the\n # tolerance distance, approach the target by given amount:\n self.p = self.p.add(self.t.sub(self.p).scale(amount))", "def cross_entropy(y_tag, y):\n return -np.log(y_tag[y])", "def _forward(self, prediction: Tensor, target: Tensor) -> Tensor:\n return torch.sqrt(\n (prediction[:, 0] - target[:, 0]) ** 2\n + (prediction[:, 1] - target[:, 1]) ** 2\n + (prediction[:, 2] - target[:, 2]) ** 2\n )", "def attack(self, target: Health) -> None:\n if self.__cooldown_tick == 0:\n target.apply_damage(self.damage)\n if self.dot > 0: target.apply_dot(self.dot, self.dot_ticks)", "def _attack_success(self, adv_x, x, target_class):\n predicted_class = np.argmax(self.estimator.predict(self._perturb_image(adv_x, x))[0])\n return bool(\n (self.targeted and predicted_class == target_class)\n or (not self.targeted and predicted_class != target_class)\n )", "def predict_with_adversarial_prediction(self, X, adv_prediction_function):\n y_predict = self.predict(X)\n class_distances = self.get_predicted_class_decision_boundary_distances(X, y_predict)\n y_predict_is_adv = np.fromiter(map(adv_prediction_function, class_distances), dtype=np.bool)\n\n for i, x in enumerate(y_predict_is_adv):\n if x:\n y_predict[i] = -1\n \n return y_predict", "def attack(self):\n\n self.check_unresolved_actions()\n messages = self.conflict_check()\n if len(self.args) == 0:\n raise Exception('No target identified for your attack action')\n search = self.args[0]\n chars = []\n if self.engagement and self.engagement.characters:\n chars.extend(list(Character().filter(id__in=[c for c in self.engagement.characters]).all()))\n targets = [c for c in chars if search.lower() in c.name.lower()]\n if not targets and self.sc and self.sc.characters:\n chars.extend(list(Character().filter(id__in=[c for c in self.sc.characters]).all()))\n targets = [c for c in chars if search.lower() in c.name.lower()]\n if not targets:\n raise Exception(f'No target match for _{search}_ found in the ***{self.sc.name}*** scene.')\n if len(targets) > 1:\n names = '\\n '.join([f'***{m.name}***' for m in targets])\n raise Exception(f'Multiple targets matched _{search}_ in the ***{self.sc.name}*** scene. Please specify which:{names}')\n self.target = targets[0]\n self.target.active_target_by = str(self.char.id)\n self.save_char(self.target)\n self.char.active_action = 'Attack'\n self.char.active_target = str(self.target.id)\n self.save_char(self.char)\n messages.extend(self.add_chars_to_engagement())\n self.command = 'roll'\n # Allow for exact roll designation\n if self.args[1] == 'exact' and len(self.args) > 2:\n exact_roll = self.args[2]\n self.args = self.args[3:] if len(self.args) > 3 else tuple()\n self.invoke_index = [i for i in range(0, len(self.args)) if self.args[i] in ['invoke', 'i']]\n self.compel_index = [i for i in range(0, len(self.args)) if self.args[i] in ['compel', 'c']]\n roll_str = self.roll(exact_roll)\n else:\n self.args = self.args[1:]\n roll_str = self.roll()\n messages.extend(roll_str)\n return messages", "def adv_dga(x, model, discretize_fn, projection_fn, levels, phase,\n\t\t\tsteps, eps, thermometer=False, noisy_grads=True, y=None):\n # Add noise\n noise = 0\n\n if noisy_grads:\n noise = tf.random_uniform(\n shape=tf.shape(x), minval=-eps, maxval=eps, dtype=tf.float32)\n x_noisy = x + noise\n\n # Clip so that x_noisy is in [0, 1]\n x_noisy = tf.clip_by_value(x_noisy, 0., 1.)\n\n # Compute the mask over the bits that we are allowed to attack\n mask = discretize_range(\n discretize_fn, levels, x - eps, x + eps, thermometer=thermometer)\n cur_x_discretized = discretize_fn(x_noisy)\n\n for i in range(steps):\n # Compute one hot representation if input is in thermometer encoding.\n cur_x_one_hot = cur_x_discretized\n if thermometer:\n cur_x_one_hot = discretization_utils.thermometer_to_one_hot(\n cur_x_discretized, levels, flattened=True)\n\n logits_discretized = model(projection_fn(cur_x_discretized),\n is_training=phase)\n\n if i == 0 and y is None:\n # Get one hot version from predictions\n y = tf.one_hot(\n tf.argmax(logits_discretized, 1),\n tf.shape(logits_discretized)[1])\n\n loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=y, logits=logits_discretized)\n\n # compute the gradients wrt to current input\n grad, = tf.gradients(loss, cur_x_discretized)\n\n # The harm done by choosing a particular bit to be active\n harm = grad * (1. + cur_x_one_hot - 2 * cur_x_discretized)\n\n # If we are using thermometer harm is the cumsum\n if thermometer:\n harm_r = discretization_utils.unflatten_last(harm, levels)\n harm_r = tf.cumsum(harm_r, axis=-1, reverse=True)\n harm = discretization_utils.flatten_last(harm_r)\n\n # Make sure values outside the global mask lose the max\n harm = harm * mask - (1. - mask) * 1000.0\n\n harm_r = discretization_utils.unflatten_last(harm, levels)\n\n bit_to_activate = tf.argmax(harm_r, axis=-1)\n\n one_hot = tf.one_hot(\n bit_to_activate,\n depth=levels,\n on_value=1.,\n off_value=0.,\n dtype=tf.float32,\n axis=-1)\n\n # Convert into thermometer if we are doing thermometer encodings\n inp = one_hot\n if thermometer:\n inp = discretization_utils.one_hot_to_thermometer(\n one_hot, levels, flattened=False)\n\n flattened_inp = discretization_utils.flatten_last(inp)\n flattened_inp.mask = mask\n flattened_inp = tf.stop_gradient(flattened_inp)\n\n cur_x_discretized = flattened_inp\n return flattened_inp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs gradient step update to generate an image that maximizes the score of target_y under a pretrained model.
def class_visualization_step(img, target_y, model, **kwargs): l2_reg = kwargs.pop('l2_reg', 1e-3) learning_rate = kwargs.pop('learning_rate', 25) ######################################################################## # TODO: Use the model to compute the gradient of the score for the # # class target_y with respect to the pixels of the image, and make a # # gradient step on the image using the learning rate. Don't forget the # # L2 regularization term! # # Be very careful about the signs of elements in your code. # # Hint: You have to perform inplace operations on img.data to update # # the generated image using gradient ascent & reset img.grad to zero # # after each step. # ######################################################################## # Replace "pass" statement with your code # Forward pass, "scores" shape is (1, 1000) scores = model(img) # Get the "target_y" score. target_score = scores[:, target_y].squeeze() # Add the regularization term (Note that the L2 norm is squared). target_score -= l2_reg * torch.square(torch.linalg.norm(img)) # Apply the backward pass: Compute the gradient of "target score" w.r.t. # model's trainable parameters (among others, "img"). target_score.backward() # Compute an update step: Apply the gradient ascent. # Note that an addition is used (+=) insted of substraction (-=), because # the goal is to maximize "target_y" predicted score. img.data += learning_rate * img.grad.data # Re-initialize the gradient of "img" to zero. img.grad.data.zero_() ######################################################################## # END OF YOUR CODE # ######################################################################## return img
[ "def make_adversarial_attack(X, target_y, model, max_iter=100, verbose=True):\n # Initialize our adversarial attack to the input image, and make it require gradient\n X_adv = X.clone()\n X_adv = X_adv.requires_grad_()\n \n learning_rate = 1\n ##############################################################################\n # TODO: Generate an adversarial attack X_adv that the model will classify #\n # as the class target_y. You should perform gradient ascent on the score #\n # of the target class, stopping when the model is fooled. #\n # When computing an update step, first normalize the gradient: #\n # dX = learning_rate * g / ||g||_2 #\n # #\n # You should write a training loop. #\n # #\n # HINT: For most examples, you should be able to generate an adversarial #\n # attack in fewer than 100 iterations of gradient ascent. #\n # You can print your progress over iterations to check your algorithm. #\n ##############################################################################\n # Replace \"pass\" statement with your code\n\n # Training loop: Apply gradient ascent 100 times, in maximum.\n for epoch in range(100):\n # Forward pass, \"scores\" shape is (1, 1000)\n scores = model(X_adv)\n\n # Get the predicted class (pred) and its socre (pred_score).\n pred_score, pred = torch.max(scores, axis=1)\n pred_score, pred = pred_score.item(), pred.item()\n\n # Get the \"target_y\" score.\n target_score = scores[:, target_y].squeeze()\n\n # Display some information about the current epoch (iteration).\n print('Iteration %2d: target score %.3f, max score %.3f' \\\n % (epoch+1, target_score.item(), pred_score))\n\n # Check if the model is fooled, i.e. \"predicted class\" equals \"target_y\".\n if pred == target_y:\n print('\\nThe model is fooled.')\n break\n\n # Apply the backward pass: Compute the gradient of \"target score\" w.r.t.\n # model's trainable parameters (among others, \"X_adv\").\n target_score.backward()\n\n # Normalize the gradient (Note that \"L2 norm\" was used in the division).\n X_adv.grad *= learning_rate / torch.linalg.norm(X_adv.grad)\n\n # Compute an update step: Apply the gradient ascent.\n # Note that an addition is used (+=) insted of substraction (-=), because\n # the goal is to maximize \"target_y\" predicted score.\n X_adv.data += X_adv.grad.data\n\n # Re-initialize the gradient of \"X_adv\" to zero (for the next epoch).\n X_adv.grad.data.zero_()\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return X_adv", "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE", "def compute_gradient(self, model, x, y):\n\t\tpass", "def _gradient_update(self):\n # sample minibatch\n captions, image_features, urls = sample_coco_minibatch(self.data, self.batch_size, split='train')\n # compute loss and gradient\n loss, gradients = self.model.loss(image_features, captions)\n self.loss_history.append(loss)\n # parameter update\n for para_name, param in self.model.params.items():\n dparam = gradients[para_name]\n next_param, params = self.update_method(param, dparam, self.update_params_all[para_name])\n self.model.params[para_name] = next_param\n self.update_params_all[para_name] = params", "def make_adversarial_attack(x, y_target, model, max_iter=100, verbose=True):\n # Initialize our adversarial attack to the input image, and make it require gradient\n \n \n ##############################################################################\n # TODO: Generate an adversarial attack X_adv that the model will classify #\n # as the class target_y. You should perform gradient ascent on the score #\n # of the target class, stopping when the model is fooled. #\n # When computing an update step, first normalize the gradient: #\n # dX = learning_rate * g / ||g||_2 #\n # #\n # You should write a training loop. #\n # #\n # HINT: For most examples, you should be able to generate an adversarial #\n # attack in fewer than 100 iterations of gradient ascent. #\n # You can print your progress over iterations to check your algorithm. #\n ##############################################################################\n loss_fn = nn.CrossEntropyLoss()\n num_steps = 6\n step_size=0.01\n eps=0.3\n clamp=(0,1)\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n num_channels = x.shape[1]\n y_target = torch.tensor(y_target).unsqueeze(0).to(x.device)\n for i in range(num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n prediction = model(_x_adv)\n print(torch.argmax(prediction))\n loss = loss_fn(prediction, y_target)\n loss.backward()\n with torch.no_grad():\n gradients = _x_adv.grad.sign() * step_size\n x_adv -= gradients\n x_adv = torch.max(torch.min(x_adv, x + eps), x - eps) \n x_adv = x_adv.clamp(*clamp)\n return x_adv", "def update(self,lr):\n self.sample_minibatch(lr)\n # Calculate gradients at current point\n dlogbeta = lr.dlogpost(self)\n lr.grad_sample[self.iter-1,:] = dlogbeta\n\n # Update parameters using SGD\n eta = np.random.normal( scale = self.epsilon )\n lr.beta += self.epsilon / 2 * dlogbeta + eta", "def target_model_update(self):\n self.set_weights(self.model, self.target_model)", "def grad_cam(input_model, img, target_size, layer_name=\"add_3\", scale_factor=3):\n\n # for steering output\n y_s = input_model.output[0][0]\n\n # for collision output\n y_c = input_model.output[1][0]\n\n # print y_s.shape, y_c.shape, img.shape\n\n # activation maps\n conv_output = input_model.get_layer(layer_name).output\n \n grads_s = K.gradients(y_s, conv_output)[0]\n grads_c = K.gradients(y_c, conv_output)[0]\n\n # print conv_output.shape, input_model.input.shape\n\n gradient_function = K.function([input_model.input], [conv_output, grads_s, grads_c])\n\n output, grad_s, grad_c = gradient_function([img])\n output, grad_s, grad_c = output[0, :], grad_s[0, :, :, :], grad_c[0, :, :, :]\n\n # print output.shape, grad_s.shape, grad_c.shape\n\n weights_s = np.mean(grad_s, axis=(0, 1))\n weights_c = np.mean(grad_c, axis=(0, 1))\n\n # print \"weights_s, weights_c\", weights_s.shape, weights_c.shape\n\n cam_s = np.dot(output, weights_s)\n cam_c = np.dot(output, weights_c)\n\n # print \"cam_c.max\", cam_c.max(), \"cam_s.max\", cam_s.max(), cam_c.shape, cam_s.shape\n\n # Process CAM\n cam_s = cv2.resize(cam_s, target_size, cv2.INTER_LINEAR)\n cam_s = np.maximum(cam_s, 0)\n cam_s = cam_s / (cam_s.max() + 1e-10)\n cam_s = cv2.applyColorMap(np.uint8(255 * cam_s), cv2.COLORMAP_JET)\n\n # print \"cam_s shape after resize:\", cam_s.shape\n\n cam_c = cv2.resize(cam_c, target_size, cv2.INTER_LINEAR)\n cam_c = np.maximum(cam_c, 0)\n cam_c = cam_c / (cam_c.max() + 1e-10)\n cam_c = cv2.applyColorMap(np.uint8(255 * cam_c), cv2.COLORMAP_JET)\n\n # print \"cam_c shape after resize:\", cam_c.shape\n\n final_size = (target_size[1]*scale_factor, target_size[0]*scale_factor)\n\n # print \"final_size\", final_size\n\n img = cv2.resize(img[0], final_size, cv2.INTER_LINEAR)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img = np.array(img*255, dtype=np.uint8)\n cam_s = cv2.resize(cam_s, final_size, cv2.INTER_LINEAR)\n cam_c = cv2.resize(cam_c, final_size, cv2.INTER_LINEAR)\n\n # print \"img, cams, cam_c shapes before:\", img.shape, cam_s.shape, cam_c.shape, type(img[0, 0, 1]), type(cam_s[0, 0, 1])\n\n cam_s = cv2.addWeighted(img, 0.7, cam_s, 0.3, 0)\n cam_c = cv2.addWeighted(img, 0.7, cam_c, 0.3, 0)\n\n # print \"img, cams, cam_c shapes\", img.shape, cam_s.shape, cam_c.shape\n\n return img, cam_s, cam_c", "def update_step(self, gradient, variable):\n if self._var_key(variable) not in self._index_dict:\n raise KeyError(f'Optimizer cannot recognize variable {variable.name}, '\n f'this usually means you are calling an optimizer '\n f'previously used on a different model. Please try '\n f'creating a new optimizer instance.')\n lr = tf.cast(self.learning_rate, variable.dtype)\n\n var_key = self._var_key(variable)\n velocity = self._velocities[self._index_dict[var_key]]\n momentum = None\n if self.momentum > 0:\n momentum = self._momentums[self._index_dict[var_key]]\n average_grad = None\n if self.centered:\n average_grad = self._average_gradients[self._index_dict[var_key]]\n\n rho = self.rho\n\n if isinstance(gradient, tf.IndexedSlices):\n # Sparse gradients.\n velocity.assign(rho * velocity)\n velocity.scatter_add(tf.IndexedSlices(\n tf.square(gradient.values) * (1 - rho), gradient.indices))\n if self.centered:\n average_grad.assign(rho * average_grad)\n average_grad.scatter_add(\n tf.IndexedSlices(\n tf.square(gradient.values) * (1 - rho), gradient.indices))\n velocity.assign_add(-tf.square(average_grad))\n velocity_value = tf.gather(velocity, gradient.indices)\n transformed_grad = tf.IndexedSlices(\n gradient.values / (tf.sqrt(velocity_value) + self.epsilon),\n gradient.indices)\n\n if self.momentum > 0:\n momentum.assign(self.momentum * momentum)\n momentum.scatter_add(transformed_grad)\n variable.assign_add(-lr * momentum)\n else:\n variable.scatter_add(\n tf.IndexedSlices(-lr * transformed_grad.values,\n transformed_grad.indices))\n else:\n # Dense gradients.\n velocity.assign(rho * velocity + (1 - rho) * tf.square(gradient))\n if self.centered:\n average_grad.assign(rho * average_grad +\n (1 - rho) * tf.square(gradient))\n velocity.assign_add(-tf.square(average_grad))\n transformed_grad = gradient / (tf.sqrt(velocity) + self.epsilon)\n if self.momentum > 0:\n momentum.assign(self.momentum * momentum + transformed_grad)\n variable.assign_add(-lr * momentum)\n else:\n variable.assign_add(-lr * transformed_grad)", "def compute_saliency_maps(X, y, model):\n # Make input tensor require gradient\n X.requires_grad_()\n \n saliency = None\n ##############################################################################\n # TODO: Implement this function. Perform a forward and backward pass through #\n # the model to compute the gradient of the correct class score with respect #\n # to each input image. You first want to compute the loss over the correct #\n # scores (we'll combine losses across a batch by summing), and then compute #\n # the gradients with a backward pass. #\n # Hint: X.grad.data stores the gradients #\n ##############################################################################\n # Replace \"pass\" statement with your code\n\n # Make a forward pass of X (which contains N images) through the model.\n # The output (scores) has shape (N, C): For each image, get its unnormalized\n # scores (for each class of the dataset), e.g. C=1000 for a model trained on ImageNet.\n scores = model(X)\n\n # Get the -unnormalized- score of the correct class for each image.\n # \"cscores\" has shape of (N,)\n cscores = scores.gather(1, y.view(-1, 1)).squeeze()\n\n # Compute the loss over the correct scores.\n # As mentioned above, the loss is the sum across batch correct class scores.\n loss = torch.sum(cscores)\n # Apply the backward pass, which computes the gradient of the loss\n # w.r.t. our model's parameters (among others, the input X).\n loss.backward()\n\n # Note that we can apply the backward pass directly from \"cscores\" by using:\n # >>> cscores.backward(gradient=torch.ones_like(y))\n # The reason: The sub-computational graph for the \"sum\" method is:\n # -----\n # Forward pass: cscores ---> [sum] ---> loss\n # Backward pass (gradiants): [1, ..., 1] <-------------- 1\n # -----\n # That is, we can directly start from \"cscores\" gradient, which is a tensor of\n # ones with the shape (N,). Actually: ones_like(y) == ones_like(cscores)\n\n # Compute the absolute value of the X gradients.\n # Saliency Maps requires nonnegative values (gradients).\n # For now, \"saliency\" has shape of: (N, 3, H, W)\n saliency = X.grad.abs()\n # Take the maximum value over the 3 input channels (for each of N images).\n # Now, \"saliency\" has shape of: (N, H, W)\n saliency = torch.max(saliency, dim=1).values\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return saliency", "def gradient_descent_update(x, gradx, learning_rate):\n return x - learning_rate * gradx", "def _compute_loss(self, model_output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n pass", "def optimize(self):\n if self.replay_buffer.length() < self.min_replay_size:\n return\n\n batch = getBatch(self.replay_buffer, self.batch_size)\n loss = self._getLoss(batch)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.soft_update:\n self.softUpdate()\n elif self.steps % self.target_update_steps == 0:\n self.updateTargetModel()", "def evalDataLossGrad(self,rawInputs,rawTarget):\n assert False, 'abstract method called'", "def gradient(self, x, y_actual, args):\n weights = args[0]\n self.update_weights(weights)\n # Update zeroth layer\n self.layer0 = x.tolist()\n\n # Begin backtracking\n y = self.predict(x)\n grad_cache = np.zeros((self.num_units_per_layer, self.num_units_per_layer, 4))\n grad_cache.fill(0.0)\n\n # Find 3rd layer of derivatives\n for i in range(0, self.num_units_per_layer):\n grad_cache[i, 1, 3] = (y - y_actual) * self.layer2[i]\n\n # Find 2nd layer of derivatives\n for i in range(0, self.num_units_per_layer):\n for j in range(1, self.num_units_per_layer):\n grad_cache[i, j, 2] = grad_cache[j, 1, 3] * self.weights[j, 1, 3] * (1.0 - self.layer2[j]) * self.layer1[i]\n\n # Find 3rd layer of derivatives\n for i in range(0, x.shape[0]):\n for j in range(1, self.num_units_per_layer):\n grad_cache[i, j, 1] = x[i] * (1.0 - self.layer1[j]) * np.sum(np.multiply(self.weights[j, :, 2], grad_cache[j, :, 2]))\n\n return grad_cache", "def update_gradient_hessian(self, X, y, sample_weight):", "def mse_gradient_step(X, y, learning_rate):\n w = sys.modules[temp_module_name].__dict__['w']\n\n # Calculate gradient\n err = y.reshape((len(y),1))-np.dot(X,w)\n grad = -2.*np.dot(np.transpose(X),err)/ X.shape[0]\n\n for index in np.where(abs(grad) > .01)[0]:\n w[index] -= learning_rate*grad[index,0]", "def compute_target(self, data):\n s,a,r,n,_ = data\n target =r + self.gamma*self.model_target.predict(n) \n target = target - np.mean(target)\n target = target/np.std(target)*20\n return target", "def on_train_batch_gradient_step_end(self, context: PhaseContext) -> None:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an instance of a reviewer based on MODE.
def get_instance(*args): if MODE == 'list': return ListReviewer(*args) if MODE == 'quorum': return QuorumReviewer(*args) raise Exception('Invalid MODE')
[ "def get_instance(self, name, id):\n cls = self.get_class(name)\n if cls:\n if hasattr(cls, 'objects') and id:\n try:\n return cls.objects.get(id=id)\n except (cls.DoesNotExist, ValueError):\n return None\n return None\n from .models import Role\n try:\n return Role.objects.get(type=name, rid=id)\n except Role.DoesNotExist:\n return None", "def get_self_reviewer_class(self):\n for rc in self.get_reviewer_classes():\n if rc.is_self:\n return rc", "def vl_requisition_instance(self):\n if not self._vl_requisition_instance:\n try:\n self._vl_requisition_instance = self.models[self.timepoint_key].get(\n 'subject_requisition').objects.get(\n subject_visit=self.subject_visit, panel__name='Viral Load', is_drawn='Yes')\n except self.models[self.timepoint_key].get('subject_requisition').DoesNotExist:\n pass\n return self._vl_requisition_instance", "def get_object_for_this_type(self, **kwargs):\r\n return self.model_class().objects.get(**kwargs)", "def get_record(self):\n return get_record(self.id)", "def rbd_requisition_instance(self):\n if not self._rbd_requisition_instance:\n try:\n self._rbd_requisition_instance = self.models[self.timepoint_key].get(\n 'subject_requisition').objects.get(\n subject_visit=self.subject_visit, panel__name='Research Blood Draw', is_drawn='Yes')\n except self.models[self.timepoint_key].get('subject_requisition').DoesNotExist:\n pass\n return self._rbd_requisition_instance", "def _get_viewer(self):\n if self._viewer is None:\n self._viewer = mujoco_py.MjViewer(self.sim)\n self._viewer.cam.fixedcamid = self._camera_ids[0]\n self._viewer.cam.type = mujoco_py.generated.const.CAMERA_FIXED\n self._viewer_reset()\n return self._viewer", "def get_instance(self, model_class, pk):\r\n try:\r\n instance = model_class.objects.get(pk=pk)\r\n except ObjectDoesNotExist:\r\n self.log.error(\"Couldn't load model instance with pk #%s. Somehow it went missing?\" % pk)\r\n return None\r\n except MultipleObjectsReturned:\r\n self.log.error(\"More than one object with pk #%s. Oops?\" % pk)\r\n return None\r\n\r\n return instance", "def instance():\n return RestSvr", "def get_reddit_instance(username=AUTO_CROSSPOST_BOT_NAME):\n global praw_instances\n if not praw_instances:\n _decorate_praw()\n praw_instances = {}\n dotenv.load_dotenv()\n\n bot_names = [AUTO_CROSSPOST_BOT_NAME, SUB_DOESNT_EXIST_BOT_NAME, SAME_SUBREDDIT_BOT_NAME, SAME_POST_BOT_NAME]\n for name in bot_names:\n app_client_id=bot_details[name]['app_client_id']\n password=os.environ.get(bot_details[name]['env_password_key'])\n app_client_secret=os.environ.get(bot_details[name]['env_app_client_secret_key'])\n version=bot_details[name]['version']\n\n reddit_instance = _instantiate_praw(username=name,\n app_client_id=app_client_id,\n password=password, \n app_client_secret=app_client_secret,\n version=version)\n praw_instances[name] = reddit_instance\n\n return praw_instances[username]", "def get_if_readable_by(cls, ident, user_or_token, options=[]):\n obj = cls.query.options(options).get(ident)\n\n if obj is not None and not obj.is_readable_by(user_or_token):\n raise AccessError('Insufficient permissions.')\n\n return obj", "def get_candidate(request):\n candidate_id = request.query_params.get('candidate_id', None)\n return Candidate.objects.filter(pk=candidate_id).first()", "def get_review( app, id ):\n sa_session = app.model.context.current\n return sa_session.query( app.model.RepositoryReview ).get( app.security.decode_id( id ) )", "def hiv_test_review_instance(self):\n if not self._hiv_test_review_instance:\n try:\n self._hiv_test_review_instance = self.models[self.timepoint_key].get(\n 'hiv_test_review').objects.get(\n subject_visit=self.subject_visit, recorded_hiv_result__in=[POS, 'NEG', 'IND'])\n except self.models[self.timepoint_key].get('hiv_test_review').DoesNotExist:\n self._hiv_test_review_instance = None\n return self._hiv_test_review_instance", "def retrieve(self, request, pk=None):\n try:\n ride_review = RideReview.objects.get(pk=pk)\n serializer = RideReviewSerializer(ride_review, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseNotFound(ex)", "def reaktor(conf='default'):\n if REAKTOR_PER_THREAD:\n\n thread = threading.currentThread()\n try:\n reaktor_instance = thread.reaktor[conf]\n except AttributeError:\n logger.debug(\"holon: init reaktor for thread %s\" % thread)\n reaktor_instance = REAKTOR_CLASS(**settings.REAKTORS['default'])\n if not thread.reaktor:\n thread.reaktor = {}\n thread.reaktor[conf] = reaktor_instance\n\n return reaktor_instance\n\n return REAKTOR", "def get_instance(par=None):\n\n if Miniweb.__inst is None:\n Miniweb(par)\n return Miniweb.__inst", "def get_redditor(self, user_name, *args, **kwargs):\n return objects.Redditor(self, user_name, *args, **kwargs)", "def get_review(self, review_index):\n try:\n review = Review(self.reviews_dict[\"reviews\"][review_index])\n return review\n except KeyError:\n raise Exception(\"Invalid Review!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update Github commit status as success.
def set_success_commit_status(self, desc): info = self.get_pull_request() sha = info['head']['sha'] repo = info['head']['repo']['full_name'] return self.set_commit_status('success', desc, repo, sha)
[ "def _update_github_status(report, url, key, threshold, details_link):\n title = key.capitalize()\n\n if report:\n value = int(re.sub(r\"\\D\", \"\", report[key]))\n if value >= threshold:\n pr_state = \"success\"\n description = f\"{title} diff is good!\"\n else:\n pr_state = \"failure\"\n description = (\n f\"{title} diff is below expected ({value}% out of {threshold}%)\"\n )\n else:\n pr_state = \"success\"\n description = \"No report provided for this commit\"\n details_link = \"\" # If not report, don't provide the link\n\n github.update_pr_status(url, pr_state, f\"FineTune {title}\", description, details_link)", "async def build_status(self, ctx: commands.Context, commit: Commit=Commit()):\n status = await commit.get_status()\n await ctx.send(status)", "async def set_status(event, gh, *args, **kwargs):\n issue_number_found = ISSUE_RE.search(event.data[\"pull_request\"][\"title\"])\n if not issue_number_found:\n issue = await util.issue_for_PR(gh, event.data[\"pull_request\"])\n status = SKIP_ISSUE_STATUS if util.skip(\"issue\", issue) else FAILURE_STATUS\n else:\n if \"body\" in event.data[\"pull_request\"]:\n body = event.data[\"pull_request\"][\"body\"] or \"\"\n if not body or CLOSING_TAG not in body:\n issue_number = issue_number_found.group(\"issue\")\n new_body = BODY.format(body=body, issue_number=issue_number)\n body_data = {\"body\": new_body, \"maintainer_can_modify\": True}\n await gh.patch(event.data[\"pull_request\"][\"url\"], data=body_data)\n status = create_success_status(issue_number_found)\n await util.post_status(gh, event, status)", "def perform_maapi_commit_status(self, usid):\n global maapisock_commit\n log.debug(\"usid=%d\", usid)\n stat = maapi.confirmed_commit_in_progress(maapisock_commit)\n log.debug(\"stat=%d\", stat)\n if stat != 0:\n maapi.cli_write(maapisock_commit, usid,\n \"Ongoing commit in progress!\\n\")\n maapi.cli_write(maapisock_commit, usid, \"Session id: %d\\n\" % stat)\n else:\n maapi.cli_write(maapisock_commit, usid,\n \"No ongoing commit in progress!\\n\")", "def test_change_status_success(self):\n self.login()\n\n created_todo = create_todo()\n todo_id = created_todo.id\n pristine_status = created_todo.mark_completed\n\n response = self.client.post(url_for('alaya_todo.todo_change_status', todo_id=todo_id))\n\n self.assert200(response)\n\n response_dict = json.loads(response.data)\n\n # Checking the expected values in the response\n self.assertTrue(response_dict['success'], 'The success key must be True')\n self.assertEqual(response_dict['status'], 200, 'The status key must be 200.')\n self.assertEqual(response_dict['message'], \"The task's status has been updated.\", 'The response messages '\n 'must math.')\n # Checking the database changes\n updated_todo = load_todo(todo_id)\n self.assertNotEqual(pristine_status, updated_todo.mark_completed,\n 'The mark_completed property must be updated.')\n\n delete_todo(todo_id)\n\n self.logout()", "def action_update(self):\n pr = self._get_pr()\n if self.related_type == 'github':\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0]._rawData['_links']['html']['href']\n commits = pr[0].get_commits()\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n else:\n self.pull_request = \"No pull requests\"\n commit = self._get_branch()[0].commit\n commits = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github'),\n ])\n if commits and self.id not in commits[0].branch_ids.ids:\n commits[0].branch_ids = [(4, self.id)]\n if not commits:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_id = vcs_commit.id\n else:\n self.commit_id = commits[0].id\n elif self.related_type == 'bitbucket':\n # TODO: implement for bitbucket\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0].links['html']['href']\n else:\n self.pull_request = \"No pull requests\"\n # Bitbucket does not require a PR to get branch commits\n # TODO: The list of commits is wrapped inside another list\n commits = self._get_commits()[0]\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.hash),\n ('type', '=', 'bitbucket')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.hash,\n 'branch_ids': [(4, self.id)],\n 'type': 'bitbucket',\n 'author': commit.author.display_name,\n 'name': commit.message,\n 'date': fields.Date.from_string(commit.date),\n 'url': commit.links['html']['href'],\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n self.commit_id = sorted(\n self.commit_ids, key=lambda x: x.date, reverse=True)[0]", "def report_status(self, fork):\n with io.open(status_file(fork), 'r') as f:\n status = json.load(f)['status']\n\n if status == 'success':\n print(f\"Completed build for paper {fork}.\")\n else:\n print(f\"Paper for {fork} did not build successfully.\")", "def create_status(token, repo, sha, state, **kwargs):\n client = _github_login(token)\n owner, repository = repo.split('/')\n repo = client.repository(owner, repository)\n repo.create_status(\n sha, state,\n context=settings.BUILDSERVICE_STATUS_CONTEXT,\n **kwargs\n )", "def ack(self):\n status = CommitStatus(Status.SUCCESS, 'This commit was acknowledged.',\n 'review/gitmate/manual', 'http://gitmate.io/')\n self.set_status(status)", "def _fail_gitlab_commit_build_status(\n user: User, git_repo: str, git_ref: str, description: str\n):\n state = \"failed\"\n system_name = \"reana\"\n git_repo = urlparse.quote_plus(git_repo)\n description = urlparse.quote_plus(description)\n\n secret_store = REANAUserSecretsStore(user.id_)\n gitlab_access_token = secret_store.get_secret_value(\"gitlab_access_token\")\n commit_status_url = (\n f\"{REANA_GITLAB_URL}/api/v4/projects/{git_repo}/statuses/\"\n f\"{git_ref}?access_token={gitlab_access_token}&state={state}\"\n f\"&description={description}&name={system_name}\"\n )\n requests.post(commit_status_url)", "def git_something_to_commit(path: str) -> bool:\n command = ['cd', path, '&&', 'git', 'status', '--porcelain']\n stdout = subprocess.check_output(' '.join(command), shell=True)\n\n something_to_commit = bool(stdout)\n\n LOG.debug('something to commit: %s', something_to_commit)\n\n return something_to_commit", "def commit(self, *args, **kwargs):\n\n if self.is_dirty():\n self.git.commit(*args, **kwargs)", "async def status_for_repo_new_commits(*, github_access_token, repo_info, release_pr):\n async with init_working_dir(github_access_token, repo_info.repo_url) as working_dir:\n last_version = await get_project_version(\n repo_info=repo_info, working_dir=working_dir\n )\n default_branch = await get_default_branch(working_dir)\n return await any_commits_between_branches(\n branch1=\"origin/release-candidate\"\n if release_pr and release_pr.open\n else f\"v{last_version}\",\n branch2=default_branch,\n root=working_dir,\n )", "def commit_update(self):\n commit_msg = 'Issue {} - Update {} to {}'.format(\n self.arguments.issue_number, self.arguments.dependency,\n ' / '.join((self.changes[0]['hg_hash'],\n self.changes[0]['git_hash'])))\n try:\n self._update_dependencies_file()\n self._update_copied_code()\n self.root_repo.commit_changes(commit_msg)\n\n return commit_msg\n except subprocess.CalledProcessError:\n self._main_vcs.undo_changes()\n logger.error('Could not safely commit the changes. Reverting.')", "def set_status(self, status: CommitStatus):\n raise NotImplementedError", "def commit(args):\n if len(args)==0:\n run( \"commit\", \"-a\", \"-m\", \"'Updated files'\" )\n else:\n run( \"commit\", *args )\n echo(click.style('all changes committed locally', fg=\"green\") + click.style(' (sync if you want them remote too)',fg=\"blue\"))", "def set_pending_commit_status(self, desc):\n info = self.get_pull_request()\n sha = info['head']['sha']\n repo = info['head']['repo']['full_name']\n return self.set_commit_status('pending', desc, repo, sha)", "def test_status_update_to_completed():\n PRM().Project(prm_module_project) \\\n .run_integration(skip=True) \\\n .goto_lk().Card(prm_module_project).create_child_card('C1') \\\n .goto_card('C1').set_lane('not_started').set_size(15).update() \\\n .run_integration() \\\n .goto_lk().Card('C1').set_lane('completed').update() \\\n .run_integration() \\\n .goto_prm().Project(prm_module_project).verify_lk_total_cards(15)\\\n .verify_lk_not_started_child_cards(0)\\\n .verify_lk_completed_child_cards(15)\\\n .verify_lk_percent_of_cards_completed(100.0)", "def _update_github_pr(summary_url, statuses_url, cov_report, quality_report, footers, report_links, quality_tool):\n # Summary\n github.write_quality_summary(\n summary_url, cov_report, quality_report, footers[\"coverage\"], footers[\"quality\"]\n )\n\n # PR checks\n cov_link = report_links.get(\"coverage\", {}).get(\"url\", \"\")\n qual_link = report_links.get(quality_tool, {}).get(\"url\", \"\")\n\n _update_github_status(cov_report, statuses_url, \"coverage\", COV_THRESHOLD, cov_link)\n _update_github_status(quality_report, statuses_url, \"quality\", QUALITY_THRESHOLD, qual_link)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update Github commit status as pending.
def set_pending_commit_status(self, desc): info = self.get_pull_request() sha = info['head']['sha'] repo = info['head']['repo']['full_name'] return self.set_commit_status('pending', desc, repo, sha)
[ "def pending(self):\n for status in self.get_statuses():\n if status.context == 'review/gitmate/manual':\n return\n\n status = CommitStatus(Status.PENDING, 'This commit needs review.',\n 'review/gitmate/manual', 'http://gitmate.io')\n self.set_status(status)", "def pending(self):\n self.status = 0", "async def set_status(event, gh, *args, **kwargs):\n issue_number_found = ISSUE_RE.search(event.data[\"pull_request\"][\"title\"])\n if not issue_number_found:\n issue = await util.issue_for_PR(gh, event.data[\"pull_request\"])\n status = SKIP_ISSUE_STATUS if util.skip(\"issue\", issue) else FAILURE_STATUS\n else:\n if \"body\" in event.data[\"pull_request\"]:\n body = event.data[\"pull_request\"][\"body\"] or \"\"\n if not body or CLOSING_TAG not in body:\n issue_number = issue_number_found.group(\"issue\")\n new_body = BODY.format(body=body, issue_number=issue_number)\n body_data = {\"body\": new_body, \"maintainer_can_modify\": True}\n await gh.patch(event.data[\"pull_request\"][\"url\"], data=body_data)\n status = create_success_status(issue_number_found)\n await util.post_status(gh, event, status)", "def set_success_commit_status(self, desc):\n info = self.get_pull_request()\n sha = info['head']['sha']\n repo = info['head']['repo']['full_name']\n return self.set_commit_status('success', desc, repo, sha)", "async def build_status(self, ctx: commands.Context, commit: Commit=Commit()):\n status = await commit.get_status()\n await ctx.send(status)", "def unack(self):\n status = CommitStatus(Status.FAILED, 'This commit needs work.',\n 'review/gitmate/manual', 'http://gitmate.io/')\n self.set_status(status)", "def pending(self):\n if self.state is not State.RUNNING:\n raise RadishError(\"Steps can only be marked as pending when they run\")\n self.state = State.PENDING", "def action_update(self):\n pr = self._get_pr()\n if self.related_type == 'github':\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0]._rawData['_links']['html']['href']\n commits = pr[0].get_commits()\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n else:\n self.pull_request = \"No pull requests\"\n commit = self._get_branch()[0].commit\n commits = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github'),\n ])\n if commits and self.id not in commits[0].branch_ids.ids:\n commits[0].branch_ids = [(4, self.id)]\n if not commits:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_id = vcs_commit.id\n else:\n self.commit_id = commits[0].id\n elif self.related_type == 'bitbucket':\n # TODO: implement for bitbucket\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0].links['html']['href']\n else:\n self.pull_request = \"No pull requests\"\n # Bitbucket does not require a PR to get branch commits\n # TODO: The list of commits is wrapped inside another list\n commits = self._get_commits()[0]\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.hash),\n ('type', '=', 'bitbucket')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.hash,\n 'branch_ids': [(4, self.id)],\n 'type': 'bitbucket',\n 'author': commit.author.display_name,\n 'name': commit.message,\n 'date': fields.Date.from_string(commit.date),\n 'url': commit.links['html']['href'],\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n self.commit_id = sorted(\n self.commit_ids, key=lambda x: x.date, reverse=True)[0]", "async def pending(self, ctx, *reports):\n if not ctx.message.author.id == constants.OWNER_ID:\n return\n not_found = 0\n for _id in reports:\n try:\n report = Report.from_id(_id)\n except ReportException:\n not_found += 1\n continue\n report.pend()\n await report.update(ctx)\n report.commit()\n if not not_found:\n await ctx.send(f\"Marked {len(reports)} reports as patch pending.\")\n else:\n await ctx.send(f\"Marked {len(reports)} reports as patch pending. {not_found} reports were not found.\")", "def set_status(self, status: CommitStatus):\n raise NotImplementedError", "def test_only_change_pending_status(self, logged_in_client, test_props, financial_aid_status):\n test_props.pending_fa.status = financial_aid_status\n test_props.pending_fa.save()\n resp = logged_in_client.patch(test_props.docs_sent_url, **test_props.docs_sent_request_params)\n assert resp.status_code == status.HTTP_400_BAD_REQUEST", "def _update_github_status(report, url, key, threshold, details_link):\n title = key.capitalize()\n\n if report:\n value = int(re.sub(r\"\\D\", \"\", report[key]))\n if value >= threshold:\n pr_state = \"success\"\n description = f\"{title} diff is good!\"\n else:\n pr_state = \"failure\"\n description = (\n f\"{title} diff is below expected ({value}% out of {threshold}%)\"\n )\n else:\n pr_state = \"success\"\n description = \"No report provided for this commit\"\n details_link = \"\" # If not report, don't provide the link\n\n github.update_pr_status(url, pr_state, f\"FineTune {title}\", description, details_link)", "def set_pending(self):\n if self.get_state() != 'new':\n raise InvalidState, \"cannot transition to pending from '%s' state.\" % self.get_state()\n \n self.set_state('pending')", "def change_status(self):\n self.completed = not self.completed", "def start_review(self):\n if self.set_status:\n self.github_repo.create_status(\n state=\"pending\",\n description=\"Static analysis in progress.\",\n context=\"inline-plz\",\n sha=self.last_sha,\n )", "def perform_maapi_commit_status(self, usid):\n global maapisock_commit\n log.debug(\"usid=%d\", usid)\n stat = maapi.confirmed_commit_in_progress(maapisock_commit)\n log.debug(\"stat=%d\", stat)\n if stat != 0:\n maapi.cli_write(maapisock_commit, usid,\n \"Ongoing commit in progress!\\n\")\n maapi.cli_write(maapisock_commit, usid, \"Session id: %d\\n\" % stat)\n else:\n maapi.cli_write(maapisock_commit, usid,\n \"No ongoing commit in progress!\\n\")", "def ack(self):\n status = CommitStatus(Status.SUCCESS, 'This commit was acknowledged.',\n 'review/gitmate/manual', 'http://gitmate.io/')\n self.set_status(status)", "async def new_label(event, gh, *args, **kwargs):\n if util.label_name(event.data) == SKIP_ISSUE_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = SKIP_ISSUE_STATUS\n await util.post_status(gh, event, status)", "def is_pending(self) -> bool:\n return self.status == self.PENDING" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract a list of usernames from a reviewer list.
def get_reviewers(self): match = reviewer_regex.match(self.body) if not match: return [] return [x.strip('@ ') for x in match.group(1).split(',')]
[ "def get_all_usernames():\n return list(map(lambda u: u.username, get_all_users()))", "def extract_username_lists(files, outfile):\n\n authors = []\n for filepath in files:\n with open(filepath, \"r\") as f:\n for line in f:\n try:\n comm = json.loads(line)\n except:\n print(filepath, line)\n continue\n author = comm[\"author\"]\n authors.append(author)\n with open(outfile, \"w\") as f:\n f.write(\" \".join(authors))", "def authors_fullnames_list(self):\n return [a.fullname for a in self.authors_list]", "def get_all_usernames(self):\n self._cursor.execute(\"SELECT username FROM users\")\n usernames = self._cursor.fetchall()\n if usernames is not None:\n return [*usernames]\n # TODO: remove the print and check if [*usernames] can work for\n # all cases\n print(\"None found\")\n return []", "def list_service_usernames(args):\n usernames = get_usernames_for_passwords()\n action_set({'usernames': usernames or []})", "def getAuthorNamesAndEmail(authorInitialsList):\n db = DiaryDatabaseWrapper.DiaryDatabaseWrapper() \n authorNameList = list()\n authorEmailList = list()\n for authorInitials in authorInitialsList:\n authorRows = db.selectFromTable('authors',('name','email'),\\\n 'WHERE initials=\\'' + authorInitials + '\\'')\n authorNameList.append(authorRows[0][0])\n authorEmailList.append(authorRows[0][1])\n db.close()\n return authorNameList, authorEmailList", "def query_usernames_from_phids(conduit, phids):\n usernames = [u.userName for u in query_users_from_phids(conduit, phids)]\n return usernames", "def _get_names(self, persons, name_displayer):\n return [name_displayer.display(person) for person in persons]", "def list_login_names():\n return [user['login_name'] for user in _users]", "def review_participants(self):\n user_ids = list(\n self.reviews\n .filter(public=True)\n .values_list('user_id', flat=True)\n )\n users = set()\n\n if user_ids:\n users.update(User.objects.filter(pk__in=user_ids))\n\n return users", "def get_substring_search_results(self, search_string):\n list_of_users_to_display = []\n print(\"starting ...\")\n users = self.client.Users.users.find()\n for user in users:\n del user[\"_id\"]\n\n if \"user_name\" in user.keys():\n if search_string in user[\"user_name\"]:\n list_of_users_to_display.append(user)\n\n return list_of_users_to_display", "def surname_list(self, data):\n\n\t\tsurname= []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\t\n\t\t\t\tsurname1 = node['tir38:Surname']\n\t\t\t\tsurname.append(str(surname1))\n\t\texcept:\n\t\t\tsurname = ['N/A']\n\t\treturn surname", "def get_admin_usernames(member_guid_arr):\n username_arr = []\n if not member_guid_arr:\n return username_arr\n for user_id in member_guid_arr:\n print(\"User_id being passed in request\", user_id)\n try:\n response = requests.get(f\"{URL_ROOT}/legacy-services/rest/users/{user_id}\",\n headers={\"Authorization\": access_token, \"client-id\": \"legacy_migration\", \"User-Agent\": \"legacy_migration\"},\n timeout=80)\n user_element = ET.fromstring(response.content)\n edl_username = user_element.find('username')\n logging.info('Retrieved edl-username for guid: %s', user_id)\n logging.info('The edl username: %s', edl_username.text)\n # So we don't get duplicate usernames if there are multiple 'Admin' groups in the provider\n if edl_username.text not in username_arr:\n username_arr.append(edl_username.text)\n except requests.exceptions.ConnectionError:\n print(\"Failed to Retrieve edl-username for guid\" + str(user_id))\n # return None\n return username_arr", "def get_users(subs, num_posts=1000):\n users = Counter()\n for sub in subs:\n print(\"Scraping %s\" % sub)\n user_list = [post.author for post in reddit.subreddit(sub).hot(limit=num_posts)]\n users.update([user.name for user in user_list if user is not None])\n\n return users", "def allergen_get_name_list_from_id_list(allergen_id_list):\n allergen_name_list = []\n for allergen in allergen_id_list:\n allergen_name = mongo.db.allergens.find_one(\n {\"_id\": allergen})[\"name\"]\n allergen_name_list.append(allergen_name)\n return allergen_name_list", "def mentioned_users(self):\n mentioned_users = []\n for login in set(re.findall(r' @([\\-\\w\\d_]+)', ' ' + self.body)):\n try:\n mentioned_users.append(self.Endpoint.client.get_user(login))\n except UnknownObjectException:\n pass\n return mentioned_users", "def list_revisions_user(self, file_id):\n try:\n userlist = []\n file_revs = self._get_revisions_by_id(file_id)\n for rev in file_revs:\n userlist.append(\n (rev['lastModifyingUser']['displayName'], rev['modifiedTime']))\n return userlist\n except KeyError:\n return []", "def donor_names():\n donor_list = []\n for donor in donor_db:\n donor_list.append(donor[0])\n return donor_list", "def _get_users(self, usernames: Iterable[str]) -> List[_User]:\n existing_users = []\n for name in usernames:\n try:\n existing_users.append(self._github.get_user(name))\n except github.GithubException as exc:\n if exc.status != 404:\n raise exception.APIError(\n \"Got unexpected response code from the GitHub API\",\n status=exc.status,\n )\n LOGGER.warning(\"User {} does not exist\".format(name))\n return existing_users" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the reviewers from pull request body and call the Github API to check who is still pending reviews.
def pending_reviewers(self): pending = self.get_reviewers() comments = self.get_comments() for comment in comments: username = comment['user']['login'] if username in pending and approve_regex.search(comment['body']): pending.remove(username) return pending
[ "def pass_pull_requests(data):\n\tmissing_params = missing_parameters(params=data, required=['pull_requests'])\n\tif missing_params:\n\t\treturn {\"data\": f\"Missing required parameters: {missing_params}\", \"status\": False}\n\n\tcode_cloud = CodeCloud()\n\tresponse = {'status': True, 'data': []}\n\n\tfor pull_request in data['pull_requests']:\n\t\tpass_response = code_cloud.pass_pull_request_review(\n\t\t\tusername=data['username'], \n\t\t\trepo_name=pull_request['repo'], \n\t\t\tpull_request_id=pull_request['requestId'], \n\t\t\tcred_hash=data['cred_hash']\n\t\t)\n\n\t\tif not pass_response['status']: response['status'] = False\n\t\tresponse['data'].append(pass_response) \n\n\treturn response", "def add_reviewer_all_pull_requests(data):\n\tmissing_params = missing_parameters(params=data, required=['username'])\n\tif missing_params:\n\t\treturn {\"data\": f\"Missing required parameters: {missing_params}\", \"status\": False}\n\n\tcode_cloud = CodeCloud()\n\tresponses = {'status': True, 'data': []}\n\n\tfor request in data.get('pull_requests', []):\n\t\tpull_response = code_cloud.add_reviewer_to_pull_request(\n\t\t\tusername=data['username'], \n\t\t\trepo_name=request['repo'], \n\t\t\tpull_request_id=request['requestId'], \n\t\t\tcred_hash=data['cred_hash']\n\t\t)\n\n\t\tif not pull_response['status']: responses['status'] = False\n\t\tresponses['data'].append(pull_response)\n\t\n\treturn responses", "def check_pr(num=None):\n token = CONFIG['github_auth_token']\n try:\n bot = chatter.bot.JenkinsBot(token, CONFIG['organization'],\n CONFIG['repository'])\n comment, _ = bot.pr_reviewed_by(num)\n except RuntimeError:\n logging.exception(\"Error fetching comments\")\n\n if comment is None:\n return \"Pull request has not been reviewed\"\n else:\n return \"Pull request reviewed by @{}\".format(comment['user']['login'])", "def fetch_pull_request(repo, pr_number):\n gh_inst = _get_github_instance()\n gh_repo = gh_inst.get_repo(repo)\n gh_pr = gh_repo.get_pull(pr_number)\n\n # This is the time that *anything* in the PR was last updated. We use this as a\n # conservative guess of when comments were last updated if we don't have any other\n # last-updated information for a given comment.\n pr_last_updated = gh_pr.updated_at.astimezone()\n\n comments = []\n for gh_comment in gh_pr.get_issue_comments():\n time_info = CommentTime(creation_time=gh_comment.created_at.astimezone(),\n last_updated_time=gh_comment.updated_at.astimezone())\n this_comment = ConversationComment(username=gh_comment.user.login,\n time_info=time_info,\n url=gh_comment.html_url,\n content=gh_comment.body)\n comments.append(this_comment)\n\n for gh_comment in gh_pr.get_comments():\n time_info = CommentTime(creation_time=gh_comment.created_at.astimezone(),\n last_updated_time=gh_comment.updated_at.astimezone())\n this_comment = PRLineComment(username=gh_comment.user.login,\n time_info=time_info,\n url=gh_comment.html_url,\n content=gh_comment.body,\n path=gh_comment.path)\n comments.append(this_comment)\n\n for gh_comment in gh_pr.get_reviews():\n if gh_comment.body:\n # GitHub creates a Pull Request Review for any PR line comments that have been\n # made - even individual line comments made outside a review, or when you make\n # a set of line comments in a review but don't leave an overall\n # comment. Exclude empty reviews that are created in these circumstances.\n\n # Pull Request Reviews don't appear to support a last-updated time, so we use\n # the last updated time of the PR as a whole as a conservative guess.\n time_info = CommentTime(creation_time=gh_comment.submitted_at.astimezone(),\n last_updated_time=pr_last_updated,\n updated_time_is_guess=True)\n this_comment = PRReviewComment(username=gh_comment.user.login,\n time_info=time_info,\n url=gh_comment.html_url,\n content=gh_comment.body)\n comments.append(this_comment)\n\n time_info = CommentTime(creation_time=gh_pr.created_at.astimezone(),\n last_updated_time=pr_last_updated)\n return PullRequest(pr_number=pr_number,\n title=gh_pr.title,\n username=gh_pr.user.login,\n time_info=time_info,\n url=gh_pr.html_url,\n body=gh_pr.body,\n comments=comments)", "def pending_reviews(self):\n pending = QUORUM\n comments = self.get_comments()\n for comment in comments:\n username = comment['user']['login']\n if (approve_regex.search(comment['body'])\n and (username in QUORUM_USERS or len(QUORUM_USERS) == 0)):\n pending = pending - 1\n return pending", "def payload_pull_request(self):\n print(\"PR\", self.payload['action'])\n print(\"No. Commits in PR:\", self.payload['pull_request']['commits'])\n return Response(\"success\")", "def collect_open_pull_request_data(self):\n gh = login(token=self.github_token)\n ftw = gh.organization(self.organisation)\n repos = ftw.repositories()\n for repo in repos:\n for pull in repo.pull_requests('all'):\n last_updated = pull.updated_at or pull.created_at\n\n self.pull_info.append({\n 'pull_state': pull.state,\n 'pull_title': pull.title,\n 'last_updated': last_updated.timestamp() if last_updated else None,\n 'url': pull.html_url,\n 'creator': pull.user.login,\n 'created': pull.created_at.timestamp() if pull.created_at else None,\n 'merged': pull.merged_at.timestamp() if pull.merged_at else None,\n 'assignees': [assignee.login for assignee in\n pull.assignees],\n 'reviewers': [reviewer.login for reviewer in\n pull.requested_reviewers],\n })", "def test_get_with_review_request(self):\n # Publicly-accessible published review request.\n review_request = self.create_review_request(publish=True,\n create_repository=True)\n\n # Comment from a published review on a publicly-accessible\n # review request.\n review1 = self.create_review(review_request, publish=True)\n comment1 = self._create_diff_comment(review_request, review1)\n\n # Comment from an unpublished review on a publicly-accessible\n # review request.\n review2 = self.create_review(review_request, publish=False)\n self._create_diff_comment(review_request, review2)\n\n # Comment from a published review owned by the requester on a\n # publicly-accessible review request.\n review3 = self.create_review(review_request,\n user=self.user,\n publish=True)\n comment3 = self._create_diff_comment(review_request, review3)\n\n # Comment from an unpublished review owned by the requester on a\n # publicly-accessible review request.\n review4 = self.create_review(review_request,\n user=self.user,\n publish=False)\n comment4 = self._create_diff_comment(review_request, review4)\n\n # Published review request from a private repository the requester\n # does not have access to.\n repo = self.create_repository(public=False)\n review_request_inaccessible = self.create_review_request(\n repository=repo,\n publish=True)\n\n # Comment from a published review on a private repository the requester\n # does not have access to.\n review5 = self.create_review(review_request_inaccessible, publish=True)\n self._create_diff_comment(review_request_inaccessible, review5)\n\n # Comment from an unpublished review on a private repository the\n # requester does not have access to.\n review6 = self.create_review(review_request_inaccessible,\n publish=False)\n self._create_diff_comment(review_request_inaccessible, review6)\n\n # Testing that only comments from the given review request\n # are returned.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'review-request-id': review_request.id,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 3)\n self.compare_item(rsp_items[0], comment1)\n self.compare_item(rsp_items[1], comment3)\n self.compare_item(rsp_items[2], comment4)\n\n # Testing that no comments are returned when the requester does\n # not have access to the given review request.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'review-request-id': review_request_inaccessible.id,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 0)", "def _pullreviewidentifiers(repo, identifiers):\n reviews = repo.reviews\n\n # In the ideal world, we'd use RBTools to talk directly to the ReviewBoard\n # API. Unfortunately, the Mercurial distribution on Windows doesn't ship\n # with the json module. So, we proxy through the Mercurial server and have\n # it do all the heavy lifting.\n # FUTURE Hook up RBTools directly.\n remote = hg.peer(repo, {}, reviews.remoteurl)\n caps = getreviewcaps(remote)\n if 'pullreviews' not in caps:\n raise util.Abort('cannot pull code review metadata; '\n 'server lacks necessary features')\n\n req = commonrequestdict(repo.ui)\n req['identifiers'] = [str(i) for i in identifiers]\n res = calljsoncommand(repo.ui, remote, 'pullreviews', data=req)\n\n for rid, data in sorted(res['reviewrequests'].iteritems()):\n reviews.savereviewrequest(rid, data)\n\n return res['reviewrequests']", "def review(self, commits, **kwargs):\n status, data = self.run_gerrit_command('review', commits, **kwargs)\n\n return status, data", "def get_pull_requests(self, repo = None):\n if repo is None:\n repo = self.get_repo()\n return repo.get_pulls()[0:self.number_of_latest_pull_requests_to_examine]", "def doreview(repo, ui, remote, nodes):\n assert nodes\n assert 'pushreview' in getreviewcaps(remote)\n\n # Ensure a color for ui.warning is defined.\n try:\n color = extensions.find('color')\n if 'ui.warning' not in color._styles:\n color._styles['ui.warning'] = 'red'\n except Exception:\n pass\n\n bzauth = getbugzillaauth(ui)\n if not bzauth:\n ui.warn(_('Bugzilla credentials not available. Not submitting review.\\n'))\n return\n\n identifier = None\n\n # The review identifier can come from a number of places. In order of\n # priority:\n # 1. --reviewid argument passed to push command\n # 2. The active bookmark\n # 3. The active branch (if it isn't default)\n # 4. A bug number extracted from commit messages\n\n if repo.reviewid:\n identifier = repo.reviewid\n\n # TODO The server currently requires a bug number for the identifier.\n # Pull bookmark and branch names in once allowed.\n #elif repo._bookmarkcurrent:\n # identifier = repo._bookmarkcurrent\n #elif repo.dirstate.branch() != 'default':\n # identifier = repo.dirstate.branch()\n\n if not identifier:\n identifiers = set()\n for node in nodes:\n ctx = repo[node]\n bugs = parse_bugs(ctx.description().split('\\n')[0])\n if bugs:\n identifier = 'bz://%s' % bugs[0]\n identifiers.add(identifier)\n\n if len(identifiers) > 1:\n raise util.Abort('cannot submit reviews referencing multiple '\n 'bugs', hint='limit reviewed changesets '\n 'with \"-c\" or \"-r\" arguments')\n\n identifier = ReviewID(identifier)\n\n if not identifier:\n ui.write(_('Unable to determine review identifier. Review '\n 'identifiers are extracted from commit messages automatically. '\n 'Try to begin one of your commit messages with \"Bug XXXXXX -\"\\n'))\n return\n\n # Append irc nick to review identifier.\n # This is an ugly workaround to a limitation in ReviewBoard. RB doesn't\n # really support changing the owner of a review. It is doable, but no\n # history is stored and this leads to faulty attribution. More details\n # in bug 1034188.\n if not identifier.user:\n ircnick = ui.config('mozilla', 'ircnick', None)\n identifier.user = ircnick\n\n if hasattr(repo, 'mq'):\n for patch in repo.mq.applied:\n if patch.node in nodes:\n ui.warn(_('(You are using mq to develop patches. For the best '\n 'code review experience, use bookmark-based development '\n 'with changeset evolution. Read more at '\n 'https://mozilla-version-control-tools.readthedocs.io/en/latest/mozreview-user.html)\\n'))\n break\n\n req = commonrequestdict(ui, bzauth)\n req['identifier'] = identifier.full\n req['changesets'] = []\n req['obsolescence'] = obsolete.isenabled(repo, obsolete.createmarkersopt)\n req['deduce-reviewers'] = ui.configbool('reviewboard', 'deduce-reviewers', True)\n\n reviews = repo.reviews\n oldparentid = reviews.findparentreview(identifier=identifier.full)\n\n # Include obsolescence data so server can make intelligent decisions.\n obsstore = repo.obsstore\n for node in nodes:\n precursors = [hex(n) for n in obsolete.allprecursors(obsstore, [node])]\n req['changesets'].append({\n 'node': hex(node),\n 'precursors': precursors,\n })\n\n ui.write(_('submitting %d changesets for review\\n') % len(nodes))\n\n res = calljsoncommand(ui, remote, 'pushreview', data=req, httpcap='submithttp',\n httpcommand='mozreviewsubmitseries')\n\n # Re-encode all items in res from u'' to utf-8 byte str to avoid\n # exceptions during str operations.\n reencoderesponseinplace(res)\n\n if 'error' in res:\n raise error.Abort(res['error'])\n\n for w in res['display']:\n ui.write('%s\\n' % w)\n\n reviews.baseurl = res['rburl']\n newparentid = res['parentrrid']\n reviews.addparentreview(identifier.full, newparentid)\n\n nodereviews = {}\n reviewdata = {}\n\n for rid, info in sorted(res['reviewrequests'].iteritems()):\n if 'node' in info:\n node = bin(info['node'])\n nodereviews[node] = rid\n\n reviewdata[rid] = {\n 'status': info['status'],\n 'public': info['public'],\n }\n\n if 'reviewers' in info:\n reviewdata[rid]['reviewers'] = info['reviewers']\n\n reviews.remoteurl = remote.url()\n\n for node, rid in nodereviews.items():\n reviews.addnodereview(node, rid, newparentid)\n\n reviews.write()\n for rid, data in reviewdata.iteritems():\n reviews.savereviewrequest(rid, data)\n\n havedraft = False\n\n ui.write('\\n')\n for node in nodes:\n rid = nodereviews[node]\n ctx = repo[node]\n # Bug 1065024 use cmdutil.show_changeset() here.\n ui.write('changeset: %s:%s\\n' % (ctx.rev(), ctx.hex()[0:12]))\n ui.write('summary: %s\\n' % ctx.description().splitlines()[0])\n ui.write('review: %s' % reviews.reviewurl(rid))\n if not reviewdata[rid].get('public'):\n havedraft = True\n ui.write(' (draft)')\n ui.write('\\n\\n')\n\n ui.write(_('review id: %s\\n') % identifier.full)\n ui.write(_('review url: %s') % reviews.parentreviewurl(identifier.full))\n if not reviewdata[newparentid].get('public'):\n havedraft = True\n ui.write(' (draft)')\n ui.write('\\n')\n\n # Warn people that they have not assigned reviewers for at least some\n # of their commits.\n for node in nodes:\n rd = reviewdata[nodereviews[node]]\n if not rd.get('reviewers', None):\n ui.write('\\n')\n ui.warn(_('(review requests lack reviewers; visit review url '\n 'to assign reviewers)\\n'))\n break\n\n # Make it clear to the user that they need to take action in order for\n # others to see this review series.\n if havedraft:\n # If there is no configuration value specified for\n # reviewboard.autopublish, prompt the user. Otherwise, publish\n # automatically or not based on this value.\n if ui.config('reviewboard', 'autopublish', None) is None:\n ui.write('\\n')\n publish = ui.promptchoice(_('publish these review '\n 'requests now (Yn)? '\n '$$ &Yes $$ &No')) == 0\n else:\n publish = ui.configbool('reviewboard', 'autopublish')\n\n if publish:\n publishreviewrequests(ui, remote, bzauth, [newparentid])\n else:\n ui.status(_('(visit review url to publish these review '\n 'requests so others can see them)\\n'))", "def github_list_pull_requests(urls, numbers_only=False):\n pulls = github_get_pull_request_all(urls)\n formatted_pulls = []\n print \"Total pull count\", len(pulls)\n sys.stdout.write(\"Processing pulls...\")\n for pull in pulls:\n n = pull[\"number\"]\n sys.stdout.write(\" %d\" % n)\n sys.stdout.flush()\n pull_info = github_get_pull_request(urls, n)\n if not pull_info:\n # Pull request is an issue\n continue\n mergeable = pull_info[\"mergeable\"]\n if pull[\"head\"][\"repo\"]:\n repo = pull[\"head\"][\"repo\"][\"html_url\"]\n else:\n repo = None\n branch = pull[\"head\"][\"ref\"]\n created_at = pull[\"created_at\"]\n created_at = time.strptime(created_at, \"%Y-%m-%dT%H:%M:%SZ\")\n created_at = time.mktime(created_at)\n username = pull[\"user\"][\"login\"]\n user_info = github_get_user_info(urls, username)\n author = \"\\\"%s\\\" <%s>\" % (user_info.get(\"name\", \"unknown\"),\n user_info.get(\"email\", \"\"))\n branch_against = pull[\"base\"][\"ref\"]\n formatted_pulls.append({\n 'created_at': created_at,\n 'n': n,\n 'repo': repo,\n 'branch': branch,\n 'author': author,\n 'mergeable': mergeable,\n 'branch_against': branch_against,\n })\n formatted_pulls.sort(key=lambda x: x['created_at'])\n print \"\\nPatches that cannot be merged without conflicts:\"\n nonmergeable = []\n for pull in formatted_pulls:\n if pull['mergeable']:\n continue\n nonmergeable.append(int(pull['n']))\n if numbers_only:\n print pull['n'],\n else:\n print \"#%03d: %s %s (against %s)\" % (pull['n'], pull['repo'], pull['branch'], pull['branch_against'])\n print unicode(\" Author : %s\" % pull['author']).encode('utf8')\n print \" Date : %s\" % time.ctime(pull['created_at'])\n if numbers_only:\n print\n print\n print \"-\"*80\n print \"Patches that can be merged without conflicts:\"\n mergeable_list = []\n for pull in formatted_pulls:\n if not pull['mergeable']:\n continue\n mergeable_list.append(int(pull['n']))\n if numbers_only:\n print pull['n'],\n else:\n print \"#%03d: %s %s (against %s)\" % (pull['n'], pull['repo'], pull['branch'], pull['branch_against'])\n print unicode(\" Author : %s\" % pull['author']).encode('utf8')\n print \" Date : %s\" % time.ctime(pull['created_at'])\n if numbers_only:\n print\n print\n return nonmergeable, mergeable_list", "def _get_pull_requests(self):\n try:\n pull_requests = self.call_api(\n '/pulls?state=closed&base={}'.format(self.master_branch)\n )\n except GithubApiNoResultsError:\n pull_requests = []\n\n for pull_request in pull_requests:\n if self._include_pull_request(pull_request):\n yield pull_request", "def pull_request_by_number(self, context, params):\n\n access_token = context[\"headers\"][\"token\"]\n response = util.rest(\n \"GET\", f\"repos/{params.get('repo_name')}/pulls/{params.get('search_value')}\", access_token)\n response = json.loads(response.text)\n data = GithubPullRequest(\n repo_name=params.get('repo_name'),\n title=response[\"title\"],\n body=response[\"body\"],\n head=response[\"head\"][\"ref\"],\n base=response[\"base\"][\"ref\"],\n instant_merge=response[\"auto_merge\"],\n pull_request_id=params.get('search_value'),\n state=response[\"state\"]\n )\n return data.__dict__", "def get_pullrequest_infos(api, milestone):\n\n class NotesRenderer(mistune.Renderer):\n \"\"\"Renderer for the release notes\"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.in_notes = False\n\n def block_code(self, code, _lang): # pylint: disable=signature-differs\n if self.in_notes:\n self.in_notes = False\n return code\n return \"\"\n\n def paragraph(self, text):\n self.in_notes = \"Release Notes\" in text\n return \"\"\n\n summaries = []\n i = 0\n\n renderer = NotesRenderer()\n markdown = mistune.Markdown(renderer=renderer)\n\n for i, pull_request in enumerate(list_prs_for_milestone(api, milestone)):\n msg = markdown(pull_request.body)\n print(f\" * {pull_request.url}\")\n if not msg:\n msg = f\" * {pull_request.title}: {pull_request.body}\"\n summaries.append(msg)\n\n msg_ok(f\"Collected summaries from {i+1} pull requests.\")\n return \"\\n\\n\".join(summaries)", "def test_get_by_repo(self):\n # Comment from a public repository.\n repo1 = self.create_repository(name='repo1', public=True)\n review_request1 = self.create_review_request(publish=True,\n repository=repo1)\n review1 = self.create_review(review_request1, publish=True)\n comment1 = self._create_diff_comment(review_request1, review1)\n\n # Comment from a private repository that the requester has\n # access to from being listed in the repository's users list.\n repo2 = self.create_repository(name='repo2', public=False)\n repo2.users.add(self.user)\n review_request2 = self.create_review_request(publish=True,\n repository=repo2)\n review2 = self.create_review(review_request2, publish=True)\n comment2 = self._create_diff_comment(review_request2, review2)\n\n # An invite-only review group that the requester has access to.\n group_accessible = self.create_review_group(invite_only=True)\n group_accessible.users.add(self.user)\n\n # Comment from a private repository that the requester has\n # access to through being a member of a targeted review group.\n repo3 = self.create_repository(name='repo3', public=False)\n repo3.review_groups.add(group_accessible)\n review_request3 = self.create_review_request(publish=True,\n repository=repo3)\n review3 = self.create_review(review_request3, publish=True)\n comment3 = self._create_diff_comment(review_request3, review3)\n\n # Comment from a private repository that the requester does\n # not have access to.\n repo4 = self.create_repository(name='repo4', public=False)\n review_request4 = self.create_review_request(publish=True,\n repository=repo4)\n review4 = self.create_review(review_request4, publish=True)\n self._create_diff_comment(review_request4, review4)\n\n # Comment from a private repository that the requester has access\n # to through being a member of a targeted review group and\n # being listed on the repository's users list.\n repo5 = self.create_repository(name='repo5', public=False)\n repo5.review_groups.add(group_accessible)\n repo5.users.add(self.user)\n review_request5 = self.create_review_request(publish=True,\n repository=repo5)\n review5 = self.create_review(review_request5, publish=True)\n comment5 = self._create_diff_comment(review_request5, review5)\n\n # An invite-only review group that the requester does not have\n # access to.\n group_inaccessible = self.create_review_group(invite_only=True)\n\n # Comment from a private repository that targets an invite-only review\n # group, but that the requester has access to from being listed in the\n # repository's users list.\n repo6 = self.create_repository(name='repo6', public=False)\n repo6.review_groups.add(group_inaccessible)\n repo6.users.add(self.user)\n review_request6 = self.create_review_request(publish=True,\n repository=repo6)\n review6 = self.create_review(review_request6, publish=True)\n comment6 = self._create_diff_comment(review_request6, review6)\n\n # Comment from a private repository that targets an invite-only review\n # group and that the requester does not have access to.\n repo7 = self.create_repository(name='repo7', public=False)\n repo7.review_groups.add(group_inaccessible)\n review_request7 = self.create_review_request(publish=True,\n repository=repo7)\n review7 = self.create_review(review_request7, publish=True)\n self._create_diff_comment(review_request7, review7)\n\n rsp = self.api_get(get_root_diff_comment_list_url(), {},\n expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 5)\n self.compare_item(rsp_items[0], comment1)\n self.compare_item(rsp_items[1], comment2)\n self.compare_item(rsp_items[2], comment3)\n self.compare_item(rsp_items[3], comment5)\n self.compare_item(rsp_items[4], comment6)", "def github_list_pull_requests(urls, numbers_only=False):\n pulls = github_get_pull_request_all(urls)\n formated_pulls = []\n print \"Total pull count\", len(pulls)\n sys.stdout.write(\"Processing pulls...\")\n for pull in pulls:\n n = pull[\"number\"]\n sys.stdout.write(\" %d\" % n)\n sys.stdout.flush()\n pull_info = github_get_pull_request(urls, n)\n mergeable = pull_info[\"mergeable\"]\n if pull[\"head\"][\"repo\"]:\n repo = pull[\"head\"][\"repo\"][\"html_url\"]\n else:\n repo = None\n branch = pull[\"head\"][\"ref\"]\n created_at = pull[\"created_at\"]\n created_at = time.strptime(created_at, \"%Y-%m-%dT%H:%M:%SZ\")\n created_at = time.mktime(created_at)\n username = pull[\"head\"][\"user\"][\"login\"]\n user_info = github_get_user_info(urls, username)\n author = \"\\\"%s\\\" <%s>\" % (user_info.get(\"name\", \"unknown\"),\n user_info.get(\"email\", \"\"))\n formated_pulls.append((created_at, n, repo, branch, author, mergeable))\n formated_pulls.sort(key=lambda x: x[0])\n print \"\\nPatches that cannot be merged without conflicts:\"\n nonmergeable = []\n for created_at, n, repo, branch, author, mergeable in formated_pulls:\n if mergeable: continue\n nonmergeable.append(int(n))\n if numbers_only:\n print n,\n else:\n print \"#%03d: %s %s\" % (n, repo, branch)\n print unicode(\" Author : %s\" % author).encode('utf8')\n print \" Date : %s\" % time.ctime(created_at)\n if numbers_only:\n print\n print\n print \"-\"*80\n print \"Patches that can be merged without conflicts:\"\n mergeable_list = []\n for last_change, n, repo, branch, author, mergeable in formated_pulls:\n if not mergeable: continue\n mergeable_list.append(int(n))\n if numbers_only:\n print n,\n else:\n print \"#%03d: %s %s\" % (n, repo, branch)\n print unicode(\" Author : %s\" % author).encode('utf8')\n print \" Date : %s\" % time.ctime(last_change)\n if numbers_only:\n print\n return nonmergeable, mergeable_list", "def get_wrong_commits(pull):\n pr_author_email = (pull.user.email or \"\").lower()\n print(\"GitHub PR author email:\", pr_author_email)\n print(\"Check commits:\")\n wrong_commits = set()\n for commit in pull.get_commits():\n # import pprint; pprint.pprint(commit.raw_data)\n print(\"Commit SHA:\", commit.sha)\n # Use raw data because commit author can be non GitHub user\n commit_author_email = (commit.raw_data[\"commit\"][\"author\"][\"email\"] or \"\").lower()\n commit_committer_email = (commit.raw_data[\"commit\"][\"committer\"][\"email\"] or \"\").lower()\n print(\" Commit author email:\", commit_author_email)\n print(\" Commit committer email:\", commit_committer_email)\n if not github_api.is_valid_user(commit.author):\n print(\n \" ERROR: User with the commit author email is absent in GitHub:\",\n commit.raw_data[\"commit\"][\"author\"][\"name\"],\n )\n wrong_commits.add(commit.sha)\n if not github_api.is_valid_user(commit.committer):\n print(\n \" ERROR: User with the commit committer email is absent in GitHub:\",\n commit.raw_data[\"commit\"][\"committer\"][\"name\"],\n )\n wrong_commits.add(commit.sha)\n if not commit.raw_data[\"commit\"][\"verification\"][\"verified\"]:\n print(\n \" WARNING: The commit is not verified. Reason:\",\n commit.raw_data[\"commit\"][\"verification\"][\"reason\"],\n )\n if pr_author_email != commit_author_email or pr_author_email != commit_committer_email:\n print(\" WARNING: Commit emails and GitHub PR author public email are differnt\")\n return wrong_commits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get number of pending reviews from comments.
def pending_reviews(self): pending = QUORUM comments = self.get_comments() for comment in comments: username = comment['user']['login'] if (approve_regex.search(comment['body']) and (username in QUORUM_USERS or len(QUORUM_USERS) == 0)): pending = pending - 1 return pending
[ "def disapproved_comments_count(self, post):\r\n count = self.disapproved_comments(post).count()\r\n return count", "def get_reviews_count(self):\n\n return len(self.pull_requests)", "def get_num_reviews(self, submission_pk, submission_id):\n return len(list(self.execute(\"SELECT * FROM reviews WHERE submission_id = ? AND submission_pk = ?\", (submission_id, database_blob(submission_pk)))))", "def approved_comments_count(self, post):\r\n count = self.approved_comments(post).count()\r\n return count", "def count_review(self):\n reviews = ReviewRating.objects.filter(product=self, status=True).aggregate(count=Count('id'))\n count = 0\n if reviews['count'] is not None:\n count = int(reviews['count'])\n return count", "def get_comments_count(self,obj): \n return obj.comments.filter(is_approved=True).count()", "def comments_count(self, post):\r\n count = self.comments(post).count()\r\n return count", "def GetCommentCount(self):\n return self.comments_count", "def get_post_comments_count_request(post_id):\n post = get_post_by_id(post_id)\n if not post:\n abort(404)\n return jsonify({'count': get_post_comments_count(post_id)})", "def text_reviews_count(self):\n return int(self._book_dict[\"text_reviews_count\"])", "def get_product_total_reviews(self, driver):\n try:\n frame = driver.find_element_by_class_name(\"BVRRRatingSummary\")\n number = frame.find_element_by_class_name(\"BVRRNumber\").text\n return number\n except NoSuchElementException:\n return \"0\"", "def get_review_count(self, review_uid: int) -> Tuple[int, int]:\n db_reviews = DBDiscussionSession.query(LastReviewerEdit).filter_by(review_uid=review_uid)\n count_of_okay = db_reviews.filter_by(is_okay=True).count()\n count_of_not_okay = db_reviews.filter_by(is_okay=False).count()\n\n return count_of_okay, count_of_not_okay", "def comment_count(self) -> int:\n block = self.soup.find(\"div\", class_=\"submission-artist-stats\").text.split('|')\n return int(block[2])", "def test_portals_id_designs_nk_comments_count_get(self):\n pass", "def getCommentCountFor(obj):", "def get_comment_karma(self):\n comment_ids = [c.id for c in self.comments]\n select = CommentUpvote.select(db.and_(\n CommentUpvote.comment_id.in_(comment_ids),\n CommentUpvote.user_id != self.id\n )\n )\n rs = db.engine.execute(select)\n return rs.rowcount", "def test_portals_id_designs_nk_commenters_count_get(self):\n pass", "def fetch_issue_counts(review_request, extra_query=None):\n issue_counts = {\n BaseComment.OPEN: 0,\n BaseComment.RESOLVED: 0,\n BaseComment.DROPPED: 0,\n BaseComment.VERIFYING_RESOLVED: 0,\n BaseComment.VERIFYING_DROPPED: 0,\n }\n\n q = Q(public=True) & Q(base_reply_to__isnull=True)\n\n if extra_query:\n q = q & extra_query\n\n issue_statuses = review_request.reviews.filter(q).values(\n 'comments__pk',\n 'comments__issue_opened',\n 'comments__issue_status',\n 'file_attachment_comments__pk',\n 'file_attachment_comments__issue_opened',\n 'file_attachment_comments__issue_status',\n 'general_comments__pk',\n 'general_comments__issue_opened',\n 'general_comments__issue_status',\n 'screenshot_comments__pk',\n 'screenshot_comments__issue_opened',\n 'screenshot_comments__issue_status')\n\n if issue_statuses:\n comment_fields = {\n 'comments': set(),\n 'file_attachment_comments': set(),\n 'general_comments': set(),\n 'screenshot_comments': set(),\n }\n\n for issue_fields in issue_statuses:\n for key, comments in comment_fields.items():\n issue_opened = issue_fields[key + '__issue_opened']\n comment_pk = issue_fields[key + '__pk']\n\n if issue_opened and comment_pk not in comments:\n comments.add(comment_pk)\n issue_status = issue_fields[key + '__issue_status']\n\n if issue_status:\n issue_counts[issue_status] += 1\n\n logger.debug('Calculated issue counts for review request ID %s '\n 'across %s review(s): Resulting counts = %r; '\n 'DB values = %r; Field IDs = %r',\n review_request.pk, len(issue_statuses), issue_counts,\n issue_statuses, comment_fields)\n\n return issue_counts", "def count_completed_reviews(cls, review_steps):\n count = 0\n for review_step in review_steps:\n if review_step.state == domain.REVIEW_STATE_COMPLETED:\n count += 1\n return count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get account balance for the given currency Calls `GET /accounts/{account_id}/balances` endpoint and only return balance of the given currency. Returns 0 if given currency does not exist in the returned balances.
async def balance(self, currency: str) -> int: return (await self.balances()).get(currency, 0)
[ "def getAccountBalance(self, currency={}):\n data = self.getInfo()\n\n if currency.__contains__(\"BTC\"):\n return Decimal(data['return']['funds']['btc'])\n elif currency.__contains__(\"USD\"):\n return Decimal(data['return']['funds']['usd'])\n else:\n return {'BTC': Decimal(data['return']['funds']['btc']), 'USD': Decimal(data['return']['funds']['usd'])}", "def get_account_balance(self):\n return self.execute_private_api(\"/api/accounts/balance\", \"GET\")", "def balances():\n return _make_request('balances', private=True)['balances']", "def balance(self):\n assert self._id, \"Account must be created first.\"\n\n if hasattr(opentxs, 'OTAPI_Wrap_getAccountData'): # new api name\n res = opentxs.OTAPI_Wrap_getAccountData(self.server_id, self.nym._id, self._id)\n else: # todo: old api name, remove in due time\n res = opentxs.OTAPI_Wrap_getAccountFiles(self.server_id, self.nym._id, self._id)\n if res < 0:\n raise ReturnValueError(res)\n return opentxs.OTAPI_Wrap_GetAccountWallet_Balance(self._id)", "def get_balances(self):\n self.inventory = []\n for bal in self.account['balances']:\n symbol = bal['asset']\n amount = float(bal['free']) + float(bal['locked'])\n \n if (amount > 0 or symbol in TRADE_CURRENCIES) and (symbol in self.currencies):\n coin = deepcopy(self.currencies[self.currencies.index(symbol)])\n coin.amount = amount\n self.inventory.append(coin)\n\n if (symbol not in TRADE_CURRENCIES):\n print('Non-zero balance for ' + symbol + ' not included in trade currencies!')", "def balance(ctx, address):\n if address == '':\n address = config.PUBLIC_KEY.to_bytes().hex()\n id__address, address = normalize_address(address, asHex=True)\n if VERBOSE:\n app_log.info(f\"Get balance for address {address}\")\n assert(len(address) == 64) # TODO: better user warning\n\n url = \"{}/balance?walletId={}&action=run\".format(ctx.obj['client'], address)\n if VERBOSE:\n app_log.info(f\"Calling {url}\")\n res = get(url)\n if VERBOSE:\n app_log.info(res)\n if path.isdir(\"tmp\"):\n # Store for debug purposes\n with open(\"tmp/answer.txt\", \"w\") as fp:\n fp.write(res.text)\n # print(json.dumps(fake_table_to_list(res.text)))\n try:\n data = list(fake_table_to_list(res.text))[0]\n balance = data[\"balance\"].replace(\"\\u2229\", \"\")\n if ctx.obj['json']:\n print(json.dumps({\"block\": data[\"block height\"], \"balance\": balance,\n \"address\": address,\n \"id__\": id__address}))\n else:\n print(f\"At block: {data['block height']}\")\n print(f\"Your Balance is: {balance}\")\n return float(balance)\n except:\n data = {}\n if ctx.obj['json']:\n print(json.dumps({\"block\": \"N/A\", \"balance\": 0,\n \"address\": address, \"id__\": id__address}))\n else:\n print(f\"At block: N/A\")\n print(f\"Your Balance is: N/A\")\n return 0", "async def futures_account_balance(self, **params):\r\n return await self.client_helper(\"futures_account_balance\", **params)", "def get_balance(self, address, chain_id=None, token_id=None, issuer_id=None):\n params = FATd.check_id_params(chain_id, token_id, issuer_id)\n params[\"address\"] = address\n return self._request(\"get-balance\", params)", "def calculate_account_balance(account):\n return models.Transaction.filter(\n account=account,\n ).order_by(\n 'executed_on',\n ).aggregate(\n Sum('amount'),\n )", "def get_balance(self, address=''):\n params = {'address': address}\n r = self._make_request('getBalance', **params)\n return r", "def get_balance(self):\n returnList = []\n for account in self.accounts:\n balance = self.f.get_balance(account).amount.amount + 42\n returnList.append(BalanceItem(account.iban, balance ))\n return returnList", "def balances_by_address(account):\n return wallet['obj'].balances_by_address(account)", "def balance(self) -> float:\n return self._get_account_info_double(AccountInfoDouble.ACCOUNT_BALANCE)", "def get_balance_by_cid(self, cid):\n #remove LIMIT 1 FOR UPDATE SKIP LOCKED\n query=sql.SQL(\"SELECT (banking.balance, cur.currency_name) FROM banking INNER JOIN currency AS cur ON (cur.id=banking.currency_id) WHERE banking.client_id={cid} ;\").\\\n format(cid=sql.Literal(cid))\n self.db_log.debug(query)\n self.cur.execute(query)\n fet=eval(self.cur.fetchone()[0])\n balance=fet[0]\n base=fet[1]\n return {'balance':balance, 'base': base}\n #return pd.read_sql(query, self.conn).ix[0]", "def get_balance(self, cr, uid, ids, context=None):\n total = 0.0\n if not ids:\n return total\n for line in self.read(\n cr, uid, ids, ['debit', 'credit'], context=context):\n total += (line['debit'] or 0.0) - (line['credit'] or 0.0)\n return total", "def get_balance(self):\n if self.available:\n return self.total_amount\n else:\n raise ValueError('This bank account is closed')", "def balanceOf(acct, tokenId):\n return Get(GetContext(), _concatkey(_concatkey(BALANCE_PREFIX, tokenId), acct))", "def balance(self):\n return Amount(self._balance, \"usd\")", "def get_current_account_balance(account_id, access_token):\n response = requests.get(\n f\"https://api.monzo.com/balance?account_id={account_id}\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return response.json()[\"balance\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send amount of currency to payee Calls `POST /accounts/{account_id}/payments` endpoint and returns payment details.
async def send_payment(self, currency: str, amount: int, payee: str) -> Payment: p = await self.client.create(self._resources("payment"), payee=payee, currency=currency, amount=amount) return Payment(id=p["id"], account_id=self.id, payee=payee, currency=currency, amount=amount)
[ "def send_fee(self, address, amount, currency):\n req = {\n 'address': address,\n 'amount': amount,\n 'currency': currency,\n }\n return self.do('GET', '/api/1/send_fee', req=req, auth=True)", "def pay_money(self, amount, receiver=None):\n currency = self.currency\n amount = round(amount, 2)\n if amount > currency:\n from server.utils.exceptions import PayError\n raise PayError(\"pay_money called without checking sufficient funds in character. Not enough.\")\n self.currency -= amount\n if receiver:\n receiver.currency += amount\n return True", "def withdraw_currency(self, coin, amount, wallet):\r\n\r\n url = self.url_base + 'id=' + self.user_id + '&email=' + self.email + '&password=' + self.password + \\\r\n '&manualwithdraw=' + coin + '&amount=' + str(amount) + '&wallet=' + wallet\r\n\r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n result = requests.get(url, timeout=self.timeout)\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return result.text", "def send_btc(amount, address):\n # Don't debug with httpbin while the headers are enabled in\n # execute_coinbase_http.\n #url = \"http://httpbin.org/post\"\n\n url = \"https://coinbase.com/api/v1/transactions/send_money\"\n\n body = json.dumps({\n \"transaction\": {\n \"to\": address,\n \"amount\": amount,\n },\n })\n\n response = execute_coinbase_http(url, body=body)\n content = json.loads(response.read())\n return content", "def payment_amount(self) -> Decimal:\n raise NotImplementedError", "def send_money(self):\n pass", "def transfer(request):\n amount = int(request.POST['amount'])\n currency = Currency.objects.get(id=int(request.POST['currency']))\n recipient = User.objects.get(id=int(request.POST['player']))\n\n from_account, created = Account.objects.get_or_create(user=request.user, currency=currency)\n to_account, created = Account.objects.get_or_create(user=recipient, currency=currency)\n\n if amount < 0:\n raise ValidationError('Attempted to transfer a negative amount of currency')\n else:\n with django.db.transaction.atomic():\n from_account.credit_or_debit(-amount)\n to_account.credit_or_debit(amount)\n\n return HttpResponseRedirect('/playerinfo/dashboard/')", "def transfer_wallet(self, currency, amount, wallet_from, wallet_to):\n body = {\n 'currency': currency,\n 'amount': str(amount),\n 'walletfrom': wallet_from,\n 'walletto': wallet_to,\n }\n return self.auth_req('v1/transfer', body)", "def test_post_same_currency(self):\n bob_acc = Account(owner='Bob', currency='USD', balance=1000)\n bob_acc.save()\n alice_acc = Account(owner='Alice', currency='USD', balance=800)\n alice_acc.save()\n\n response = self.client.post(reverse('payment-list'),\n data={'from_account': bob_acc.id,\n 'to_account': alice_acc.id,\n 'amount': 500}).data\n\n self.assertTrue(isinstance(response, dict))\n # check all fields of created model are present\n self.assertEqual(response['id'], 1)\n self.assertEqual(response['from_account'], bob_acc.id)\n self.assertEqual(response['to_account'], alice_acc.id)\n self.assertEqual(response['amount'], 500)\n # check that Payment model was created\n payments = Payment.objects.all()\n self.assertEqual(len(payments), 1)\n self.assertEqual(payments[0].id, 1)\n self.assertEqual(payments[0].from_account.id, bob_acc.id)\n self.assertEqual(payments[0].to_account.id, alice_acc.id)\n self.assertEqual(payments[0].amount, 500)\n # finally check that accounts were charged appropriately\n bob_acc.refresh_from_db()\n self.assertEqual(bob_acc.balance, 500)\n alice_acc.refresh_from_db()\n self.assertEqual(alice_acc.balance, 1300)", "def convert(self, amount):\n return self.compute(\n request.nereid_website.company.currency.id,\n amount,\n request.nereid_currency.id)", "def eth_to_usd(self, amount: Decimal) -> Decimal:\n return self.eth_rate * amount", "def pay(amount=None, tender_type=\"cash\", cash_card=\"GiftCard\"):\n def verify_journal(timeout=10):\n if refund:\n nonlocal tender_type\n tender_type = \"Refund \" + tender_type + \" \"\n start_time = time.time()\n while time.time() - start_time <= timeout:\n journal = read_transaction_journal()\n\n if [\"Cash Card Activation\"] in journal:\n if any(['Approved' in item[0] for item in journal]):\n return True\n else:\n logger.warning(\"Cash card activation was unsuccessful.\")\n return False\n\n for item in journal: # Find tender in journal\n if (tender_type.lower() == item[0].lower() and\n amount == item[1]):\n return True\n else:\n logger.warning(\"Didn't find payment in transaction journal.\")\n return False\n\n # Find out how much we need to pay\n refund = False\n balance = read_balance()\n try:\n total = balance['Balance']\n except KeyError:\n try:\n total = balance['Refund Due']\n refund = True\n except KeyError:\n logger.warning(\"There is no balance to pay out.\")\n return False\n\n if amount is None:\n amount = total\n \n # Get to tender screen\n logger.debug(\"Attempting to click the Tender/Pay button\")\n if not tender_keys.exists() and not click_function_key(\"pay\", timeout=30, verify=False):\n return False\n # Handle loyalty\n msg = read_message_box(timeout=5)\n if msg is not None and \"ID\" in msg:\n if not click_message_box_key('NO', verify=False):\n click_keypad(\"CANCEL\")\n return False\n # Select tender type\n logger.debug(\"Selecting the Tender Type\")\n while not click_tender_key(tender_type, verify=False):\n if not click_tender_key('more', verify=False):\n logger.warning(\"The tender type %s does not exist.\"\n %(str(tender_type)))\n return False\n\n # Transaction ends immediately for Imprinter (maybe other tenders?), check for this\n temp_level = logger.getEffectiveLevel()\n logger.setLevel(999)\n if verify_idle(timeout=1) and verify_journal(timeout=1):\n logger.setLevel(temp_level)\n return True\n\n # Select or enter tender amount\n if not click_tender_key(amount, timeout=1, verify=False):\n amount_to_enter = amount.replace(\"$\", \"\").replace(\".\", \"\")\n logger.setLevel(temp_level)\n for num in amount_to_enter:\n click_keypad(num, verify=False)\n click_keypad(\"ENTER\", verify=False) \n\n # Make sure we went back to idle (if not split tendering) and journal contains the paid tender\n logger.setLevel(temp_level)\n if amount < total: # Split tender case\n # TODO: Add status line verification for split tender case\n if not verify_journal():\n return False\n click_keypad(\"CANCEL\")\n return verify_idle()\n \n # Handle cash card swipe if needed\n status = read_status_line().lower()\n if \"activation\" in status or \"recharge\" in status:\n try: \n pinpad.swipe_card(brand=system.get_brand(), card_name=cash_card)\n except Exception as e:\n logger.warning(f\"Cash card swipe in pay failed. Exception: {e}\")\n click_keypad(\"CANCEL\", verify=False)\n click_message_box_key(\"YES\", verify=False)\n return False\n logger.info(\"Journal verification for cash card transactions is not implemented in pay(). Please verify success in your script.\")\n return verify_idle() # TODO: Success message varies by network. Find a way to verify?\n\n return (verify_idle() and verify_journal())", "def submitTransaction(self, senderAccount: CryptoAccount, coinTransfers: list,\n transactionType: str=SmartContractTransactionTypes.TYPE_P2PKH):\n senderAddress = senderAccount.getAddress()\n senderPrivateKey = senderAccount.getPrivateKey()\n senderPublicKey = senderAccount.getPublicKey()\n senderPublicKeySignature = senderAccount.getPublicKeySignature()\n\n # check that the available account balance of the sender is enough to make the transaction.\n # First calculate the total value of coin tranfers. Then get the amount available from the utxo set,\n # for the sender. Compare them.\n\n coinTransferTotalValue = 0 # the total value of the coin tranfers\n for cTransfer in coinTransfers:\n coinTransferTotalValue += cTransfer.getValue()\n\n # the available acc. balance of the senderin the utxo set\n availableAmount = self.getAccountAvailableTotal(senderAccount)\n\n if availableAmount > coinTransferTotalValue: # if the acc. balance is greater than the amount needed\n # do all the necessary actions to submit the transaction\n\n # first add some transaction inputs, from the utxo set. Go through the records of the utxo set\n # and get the unspent outputs you need to to make the transaction. Create the corresponding\n # tx inputs\n\n txInputList = list() # the transaction input list\n totalInputValue = 0 # the total value of the tx inputs\n for utxSetKey, utxoElement in self.__UTXOSet.items(): # for each unspent tx output in the utxo set\n\n # check if the tx output is spendable\n isSpendable = self.isTxOutputSpendable(utxSetKey, utxoElement, senderPrivateKey, senderPublicKey)\n\n # if the tx output is related to the specific recipient address\n # and if it can be spent (script result true)\n if utxoElement.getRecipient() == senderAddress and isSpendable:\n # add the value to the total input value\n totalInputValue += utxoElement.getValue()\n # create a tx input from the specific output and add it to the tx input list\n txInput = TransactionInput(utxoElement.getValue(), utxoElement.getRecipient(),\n self.getTransactionHashFromUTXOSetKey(utxSetKey),\n self.getTxOutputIndexFromUTXOSetKey(utxSetKey))\n # set the script for the tx input\n txInput.setScript(SmartContractScripts.getScriptSig(\n TLCUtilities.getHashSignature(\n txInput.getPreviousTransactionHash(), senderPrivateKey\n ), senderPublicKey\n ))\n txInputList.append(txInput)\n\n # when the total input value is enough, stop collecting more tx inputs from the utxo set\n if totalInputValue > coinTransferTotalValue:\n break\n\n # create the transaction\n t = Transaction(senderAddress) # initiate a new transaction\n\n # add the transaction inputs to the transaction\n t.extendTransactionInputList(txInputList)\n\n # add the transaction outputs to the transaction\n for coinTransfer in coinTransfers:\n # get the public key hash of the recipient to create the tx output script\n recipientPubKeyHash = TLCUtilities.getSHA256RIPEMDHash(coinTransfer.getRecipient().getAddress())\n\n # create the script\n script = '' # empty script, before the type is decided\n if transactionType == SmartContractTransactionTypes.TYPE_P2PKH: # standard tx output\n # select script for P2PKH transactions\n script = SmartContractScripts.getPayToPubKeyHashScript(recipientPubKeyHash)\n\n # create a new transaction output and add the script to it\n txOutput = TransactionOutput(coinTransfer.getValue(), senderAddress,\n coinTransfer.getRecipient().getAddress())\n txOutput.setScript(script)\n\n # add the output to the transaction\n t.addTransactionOutput(txOutput)\n\n # sign the transaction\n t.sign(senderPrivateKey)\n\n # add the transaction to the pending transaction list\n self.__pendingTransactionList.append(t)\n\n # add the sender to the accounts dictionary\n self.__addSenderAccount(senderAddress, senderPublicKey)", "def payment_input(self, account_str, payment, kind, description, meta):\n return TransferMessage(C_transfer_OK, money = payment)", "def payment_amount(self) -> Decimal:\n return self.__payment_amount", "def create_money_transfer(request_info: Dict) -> None:\n\n amount = Decimal(request_info.get(\"amount\"))\n\n with transaction.atomic():\n payer_name = request_info.get(\"payer\")\n recipient_name = request_info.get(\"recipient\")\n accounts = Account.objects.select_for_update().filter(\n Q(account=payer_name) | Q(account=recipient_name)\n )\n\n # len(accounts) < 2 when payer or recipient doesn't exist\n if len(accounts) < 2:\n raise AccountDoesNotExistError\n\n # acc_ordered_dict - creating dictionary from query to get info about account\n # with the key, where key - account_name\n acc_ordered_dict = {account.account: account for account in accounts}\n payer = acc_ordered_dict.get(payer_name)\n recipient = acc_ordered_dict.get(recipient_name)\n\n if payer.balance < amount:\n raise MoneyIsNotEnoughError\n\n payer.balance -= amount\n payer.save()\n recipient.balance += amount\n recipient.save()\n\n # creating data for making historical information about transaction\n request_info = request_info | {\n \"payer_id\": payer.id,\n \"recipient_id\": recipient.id,\n \"income_outcome\": False,\n }\n create_transfer_info(request_info)", "def request_payment(bitcoin_wallet):\n try:\n alert = util.alert(\"Your personal files have been encrypted. The service fee to decrypt your files is $100 USD worth of bitcoin (try www.coinbase.com or Google 'how to buy bitcoin'). The service fee must be tranferred to the following bitcoin wallet address: %s. The service fee must be paid within 12 hours or your files will remain encrypted permanently. Deadline: %s\" % (bitcoin_wallet, time.localtime(time.time() + 60 * 60 * 12)))\n return \"Launched a Windows Message Box with ransom payment information\"\n except Exception as e:\n return \"{} error: {}\".format(request_payment.__name__, str(e))", "def currency_exchange(currency, amount):\n if currency == 'EUR':\n amount *= 0.88\n elif currency == 'USD':\n amount *= 0.80\n elif currency == 'GBP':\n amount *= 1\n return round(amount, 2)", "def add_balance(self, org_id, amount):\n url = urljoin(self.client.base_url, 'organizations/{}/balance'.format(org_id))\n url = furl(url).add({'apikey': self.client.api_key}).url\n params = {\n 'addamount': amount\n }\n resp = requests.post(url, json=params)\n return resp.json()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an account identifier Calls `POST /accounts/{account_id}/account_identifiers` to generate account identifier.
async def generate_account_identifier(self) -> str: ret = await self.client.create(self._resources("account_identifier")) return ret["account_identifier"]
[ "def _get_account_id(self) -> str:\n return self._post(\n DEXCOM_AUTHENTICATE_ENDPOINT,\n json={\n \"accountName\": self._username,\n \"password\": self._password,\n \"applicationId\": DEXCOM_APPLICATION_ID,\n },\n )", "def account_id(org_client, mock_event):\n org = boto3.client(\"organizations\")\n account_status_id = mock_event[\"detail\"][\"responseElements\"][\"createAccountStatus\"][\n \"id\"\n ]\n account_status = org.describe_create_account_status(\n CreateAccountRequestId=account_status_id\n )\n return account_status[\"CreateAccountStatus\"][\"AccountId\"]", "def account_id(self):\n\n return self._account_id.value", "def generate_attendee_id(self):\n n = random.randint(1, 12)\n identifier = \"\".join(random.choice(string.ascii_letters) for i in range(n))\n return identifier", "def account_id(self): # DG: renamed\n pass", "def activate_customer_accounts(customer_id, institution_id, account_id, account_num, account_name, account_type):\n\n token = partner_auth()\n\n response = requests.put(\"https://api.finicity.com/aggregation/v2/customers/\" + customer_id +\n \"/institutions/\" + institution_id + \"/accounts\",\n json={\n \"accounts\": [\n {\n \"id\": account_id,\n \"number\": account_num,\n \"name\": account_name,\n \"type\": account_type\n }]\n },\n headers={\n \"Finicity-App-Key\" : os.environ['FINICITY_APP_KEY'],\n \"Finicity-App-Token\" : token,\n \"Accept\" : \"application/json\"\n })\n # print(response.decode('utf-8'))\n # print(response.content)\n # print(response.json())\n return response.json()", "def create_account():\n return models.Account.objects.create()", "def generation_account_number():\n return random.randrange(1111111111, 9999999999)", "def create_accounts():\n app.logger.info(\"Request to create an Account\")\n check_content_type(\"application/json\")\n\n # Create the account\n account = Account()\n account.deserialize(request.get_json())\n account.create()\n\n # Create a message to return\n message = account.serialize()\n location_url = url_for(\"get_accounts\", account_id=account.id, _external=True)\n\n return make_response(\n jsonify(message), status.HTTP_201_CREATED, {\"Location\": location_url}\n )", "def generate_account(self):\r\n card_number = self.card_number_generator()\r\n pin_number = self.pin_generation()\r\n insert = \"INSERT INTO card (number, pin) VALUES (?,?)\"\r\n generated_numbers = card_number, pin_number\r\n cur.execute(insert, generated_numbers)\r\n conn.commit()\r\n\r\n print(dedent(f\"\"\"\r\n Your card has been created\r\n Your card number:\r\n {card_number}\r\n Your card PIN:\r\n {pin_number}\"\"\"))", "def get_user_account_id(self):\n return self.response_json[\"account\"][\"id\"]", "def create_unique_id():\n from uuid import uuid4\n\n return str(uuid4())", "def get_account_id(event):\n return event['account']", "def paymentcard_digit_gen():\n return uuid.uuid4().hex[:10]", "def gen_tx_id():\n fake = Faker()\n return fake.bothify(text='TXID??????????????????????????????')", "def gen_bank_account(numeric_only = False):\n num_len = random.randrange(7, 12)\n upper_range = int(math.pow(10, num_len)-1)\n account_number = random.randrange(1, upper_range)\n if numeric_only:\n first_letter_seed = 22 #the percentage of account numbers with 1-2 initial letters.\n account_number_seed = random.randrange(0, 99)\n if account_number_seed <= first_letter_seed:\n account_number = 'AB' + str(account_number)\n return str(account_number)", "async def jsonrpc_account_create(self, account_name, single_key=False, wallet_id=None):\n wallet = self.wallet_manager.get_wallet_or_default(wallet_id)\n account = Account.generate(\n self.ledger, wallet, account_name, {\n 'name': SingleKey.name if single_key else HierarchicalDeterministic.name\n }\n )\n wallet.save()\n if self.ledger.network.is_connected:\n await self.ledger.subscribe_account(account)\n return account", "def _createIdentifier(bits=160, _urandom=urandom):\n return urandom(bits // 8).encode(\"hex\")", "def get_account_id(self):\n return self.wepay_account_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }