query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Reads feature vectors and labels from a file and prints information about their clustering properties. Here, we think of the space of feature vectors, and consider a vector v_i to be in cluster j if j is one of the labels for example i.
def analyze_feature_vector_clusters(features_file_path, distance=utils.L2_distance): feature_vectors, label_vectors = utils.read_feature_and_label_vectors(features_file_path) logging.info('Building clusters...') # Map from (integer j) --> (list of indices i such that feature_vectors[i] is in cluster j) # Cluster 0 indicates no disease indices_for_label = map_labels_to_example_indices(label_vectors) logging.info('...done.') logging.info('Computing global and within-cluster average distances') # Compute average distance between vectors overall global_average_distance = average_distance_between_vectors(feature_vectors, distance) logging.info('Global average ' + distance.__name__ + ' between vectors: ' + str(global_average_distance)) # Compute average distance within each cluster for j, vector_indices in indices_for_label.items(): vectors_in_cluster = [feature_vectors[index] for index in vector_indices] average_cluster_distance = average_distance_between_vectors(vectors_in_cluster, distance) logging.info('Average ' + distance.__name__ + ' between vectors in cluster ' + str(j) + ': ' + str(average_cluster_distance))
[ "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os.path.isfile(label_file_name):\n prettyPrint(\"Feature vector files {0} could not be found. Generating from scratch instead ...\".format(feat_file_name), color.CYAN)\n return None, None\n with open(feat_file_name, 'r') as f:\n feat_vec = pickle.load(f)\n with open(label_file_name, 'r') as f:\n labels = pickle.load(f)\n\n prettyPrint (\"Done loading feature vectors.\", color.CYAN)\n return feat_vec, labels", "def v_feature_vectors(): # v as verification\n basedir = sys_config.read('DataExpRoot')\n ifile = 'fset_kmeans_1000_labeled.csv'\n\n # [params]\n n_clusters = 1000\n n_subset = 10000 \n cluster_method = 'kmeans'\n\n fpath = os.path.join(basedir, ifile)\n df = pd.read_csv(fpath, sep=',', header=None, index_col=0, error_bad_lines=True)\n assert df.shape[1] == n_clusters # k-means repr given in t_cluster()\n\n # [output]\n df_subset = df.sample(min(n_subset, df.shape[0]))\n fpath = os.path.join(basedir, 'fset_%s_%s_labeled_subset.csv' % (cluster_method, n_clusters)) \n df_subset.to_csv(fpath, sep=',', index=True, header=False) \n print('byproduct> saving a SUBSET (size=%d) of (%s-) clustered feature set (labeled) to %s' % (df_subset.shape[0], cluster_method, fpath))\n\n return", "def cluster_miner():\n \n import os.path\n \n print(\"this is cluster_miner\")\n \n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"t2t_5979A7E9CBA41503507470597.txt\"),\"rU\")\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"david_clusters_t2t_5979A7E9CBA41503507470597_members.txt\"),\"w\")\n \n # with open(\"bob/processed/DAVID_FAC_24hbobprots_down_18012016_clusters.txt\", \"w\") as outF:\n # with open(\"bob/processed/DAVID_FAC_24hbobprots_down_18012016.txt\", \"r\") as inpF:\n clusterFlag = False\n entryCount = 1\n headerFlag = True\n startFlag = True\n protL = []\n outF.write(\"Enrichment score\\t1st term\\t2nd term\\t3rd term\\tnumber of proteins\\n\")\n for inpLine in inpF:\n inpList = inpLine.split(\"\\t\")\n if inpList[0] == \"\\n\": \n clusterFlag = False\n continue # skip empty lines\n \n if len(inpList) > 4:\n currProtL = inpList[5].rstrip(\"\\t\").split(\",\") # count member genes here\n for currProtI in currProtL:\n if currProtI not in protL:\n protL.append(currProtI)\n \n if clusterFlag: # take out first 3 terms from each cluster\n if entryCount < 4:\n if headerFlag:\n headerFlag = False\n continue\n if \"~\" in inpList[1]: # remove go term ID, just display the name\n outF.write(inpList[1][inpList[1].index(\"~\")+1:] + \"\\t\")\n # print repr(inpList[1][inpList[1].index(\"~\")+1:])\n else:\n outF.write(inpList[1] + \"\\t\")\n # print inpList[1]\n entryCount += 1\n else:\n clusterFlag = False\n\n \n \n \n if inpList[0][:18] == \"Annotation Cluster\": # start a new cluster here\n if not startFlag: outF.write(str(len(protL)) + \"\\t\") \n protL = []\n EnrScore = inpList[1][17:].strip()\n if float(EnrScore) < 2: break\n if startFlag:\n outF.write(str(EnrScore) + \"\\t\")\n startFlag = False\n else:\n outF.write(\"\\n\" + str(EnrScore) + \"\\t\")\n # print \"\"\n # print EnrScore\n headerFlag = True\n clusterFlag = True\n entryCount = 1\n outF.write(\"\\n\") \n inpF.close()\n outF.close()\n print(\"cluster_miner completed\")", "def read_data(feature_file, label_file):", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def main(datafile, k, verbose, max_iter, epochs, bisecting):\n points = np.loadtxt(datafile)\n algorithm = kmeans\n if bisecting:\n algorithm = bisecting_kmeans\n clusters = algorithm(\n points=points, k=k, verbose=verbose, max_iter=max_iter, epochs=epochs)\n visualize_clusters(clusters)", "def _print_ftr_to_svm_format(self, feat_file, letter_label, ft_list):\n\t\tfeat_index = 1 # dimension index\n\t\tft_line = letter_label\n\t\tfor h in ft_list:\n\t\t\tft_line = ft_line + \" \" + str(feat_index) + \":\" + str(h)\n\t\t\tfeat_index += 1\n\t\tfeat_file.write(ft_line + \"\\n\")", "def read_svm_file(filename, num_features):\n\n data = load_svmlight_file(filename, n_features = num_features)\n data0_dense = data[0].todense()\n\n return np.array(data0_dense), data[1]", "def read_vectors_file(file):\n words_and_vectors = {}\n all_vectors = []\n\n with open(file) as v_file:\n for line in v_file:\n line = line.strip()\n\n split_result = line.split(':')\n _word = split_result[0]\n temp_vectors = split_result[1]\n\n all_vectors = temp_vectors.split(' ')\n all_vectors = [float(f) for f in all_vectors]\n\n # add current word with its vector to the dictionary\n words_and_vectors[_word] = all_vectors\n\n return words_and_vectors, len(all_vectors)", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def svm_read_feature(data_file_name):\n\tprob_y = []\n\tprob_x = []\n\tfor line in open(data_file_name):\n\t\t#print line\n\t\tline = line.rsplit(None, 1)\n\t\t#print line\n\t\t# In case an instance with all zero features\n\t\t#if len(line) == 1: line += ['']\n\t\tfeatures, label = line \n\t\t#print features\n\t\t#print label\n\t\t#parse prob_x\n\t\txi = []\n\t\tind = 1\n\t\tfor e in features.split():\n\t\t\txi.append(float(e))\n\t\t\tind += 1\n\t\t#parse prob_y\n\t\tprob_y += [float(label)]\n\t\tprob_x += [xi]\t\n\treturn (prob_y, prob_x)", "def read_labeled_data(filename, class_vocab, word_vocab):\n\n labels = []\n row, col, value = [], [], []\n num_examples = 0\n num_cols = len(word_vocab)\n\n with open(filename, \"r\") as f:\n for i, line in enumerate(f):\n cols = line.strip().split('\\t')\n label, text = cols[0], cols[1]\n if label in class_vocab:\n num_examples += 1\n labels.append(class_vocab[label])\n words = re.sub(\"\\W+\", \" \", text.lower()).split()\n for w in words:\n if w in word_vocab:\n w_id = word_vocab[w]\n row.append(i)\n col.append(w_id)\n # Duplicate values at the same position\n # ij are summed\n value.append(1.0)\n\n X = csr_matrix((value, (row, col)), shape=(num_examples, num_cols))\n y = np.array(labels)\n # print(X.todense())\n # print(\"\\n \" , y)\n # print(\"\\n \", class_vocab)\n # print(\"\\n \", word_vocab)\n # print(\"\\n \", labels)\n return X, y", "def __saveToTxt(self, filename, vector):\n\n print(\"Saving file: \", filename)\n with open(filename, \"w\") as f:\n # labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))\n for row in vector:\n label_str = ' '.join(row.astype(str)[0:5])\n feature_str = ' '.join(row.astype(str)[5:])\n f.write(\"|labels {} |features {}\\n\".format(label_str, feature_str))", "def print_cluster_attributes(self, objects):\n print(\"\\n\")\n print((\"ClusterName\".ljust(35),\":\",objects.ClusterName.value()))\n print((\"Repository Disk\".ljust(35),\":\", \\\n objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value()))\n print(\"\\nNodes in the cluster :\\n-----------------------\")\n for Node in objects.Node.Node :\n print((\"HostName\".ljust(35),\":\",\\\n Node.HostName.value()))\n print((\"PartitionID\".ljust(35),\":\", \\\n Node.PartitionID.value()))\n print()", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def cluster_text(list_of_text):\n print(\"Clustering text info saved the clustering.txt\")\n vectorizer = TfidfVectorizer(stop_words=\"english\")\n transform = vectorizer.fit_transform(list_of_text)\n\n true_k = 70\n\n model = MiniBatchKMeans(n_clusters=true_k, init=\"k-means++\", max_iter=100, n_init=1)\n model.fit(transform)\n clusters = {}\n for i in model.labels_:\n if not i in clusters:\n clusters[i] = 1\n else:\n clusters[i] += 1\n\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n with open(\"clustering.txt\", \"w+\") as f:\n f.write(\"Top terms per cluster:\\n\")\n for i in range(true_k):\n with open(\"clustering.txt\", \"a\") as f:\n f.write(f\"Cluster {i}\\n\")\n f.write(f\"Number of tweets in this cluster: {clusters[i]}\\n\")\n term_list = []\n for ind in order_centroids[i, :10]:\n with open(\"clustering.txt\", \"a\") as f:\n f.write(terms[ind] + \"\\n\")\n term_list.append(terms[ind] + \"\\n\")\n return model.labels_", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def clusterCoexistenceMatrix(CLM, filename = None):\n pass;", "def load_glove_vectors():\n\n glove_home = '../assets/glove/'\n src_filename = os.path.join(glove_home, 'glove.6B.50d.txt')\n reader = csv.reader(open(src_filename), delimiter=' ', quoting=csv.QUOTE_NONE) \n \n word_to_index = {}\n embedding_mat = []\n\n counter = 0\n for line in reader:\n word_to_index[line[0]] = counter\n vec = np.array(list(map(float, line[1: ])))\n embedding_mat.append(vec)\n counter += 1\n\n return word_to_index, embedding_mat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test user's if favourite restaurant is added to DB
def test_add_to_fav_(self): result = self.client.post("/add_to_fav", data={"yelp_biz_id":"JA_V9TqDCrkgknqrcUndIQ", "yelp_rest_name":"Siam", "yelp_rating":"4", "yelp_category":"Thai", "yelp_price":"$$", "yelp_image_url":"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg" }) DB_result = Restaurant_details.query.filter_by(biz_id = "JA_V9TqDCrkgknqrcUndIQ").first() self.assertIsNotNone(DB_result) #testing that the returned result is not NONE self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be self.assertIn(b"Your Favourite has been saved", result.data)
[ "def test_add_favorites_add_favorite_to_favorites_of_user(self):\n p3 = Product.objects.create(barcode=\"123456\",\n product_name=\"Lait3\",\n brand=\"gandia +\",\n url_page=\"www.test.com\",\n image_url=\"www.image-test.com\",\n image_nutrition_url=\"www.nut-image.com\",\n nutrition_grade=\"A\",\n nutrition_score=1,\n category=self.cat)\n self.client.get('/favorite/1234/123456')\n fav_of_user = Favorite.objects.get_favorites_from_user(self.user1)\n expected = [\"Lait1 remplacé par Lait2\", \"Lait1 remplacé par Lait3\"]\n self.assertTrue(\n all(str(a) == b for a, b in zip(fav_of_user, expected)))", "def test_Favourite(self):\n self.assertEquals(self.fav_1.pk, 1)\n self.assertEquals(self.fav_1.date_added, '2019-12-20 09:00:00')\n self.assertEquals(self.fav_1.user.pk, 1)\n self.assertEquals(self.fav_1.product.pk, 1)", "def post(self, request):\r\n user = request.user\r\n check_user_status(user)\r\n\r\n user_id = user.id\r\n validate(instance=request.data, schema=schemas.user_fav_schema)\r\n body = request.data\r\n body['user_id'] = user_id\r\n rest_id = body['restaurant']\r\n UserFavRestrs.field_validate(body)\r\n response = UserFavRestrs.insert(user_id, rest_id)\r\n return JsonResponse(response, safe=False)", "def test_if_authenticated_user_can_add_book_to_favourite(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.get_token())\n response = self.client.post(\n reverse('user-favourites-list', kwargs={'users_username': self.user_cristy.username}),\n {'book_id': self.book.id},\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n data = json.loads(response.content)\n self.assertEqual(data['book']['id'], f'{self.book.id}')", "def test_add_favorites_redirect_user_if_favorites_already_in_dtb(self):\n response = self.client.get('/favorite/1234/12345')\n self.assertEqual(response.status_code, 302)", "def add_favorite(fav: Favorite, device_key: Optional[str] = None) -> bool:\n favorite_existence = Favorite.query(\n Favorite.model_key == fav.model_key,\n Favorite.model_type == fav.model_type,\n ancestor=fav.key.parent(),\n ).count()\n if favorite_existence == 0:\n # Favorite doesn't exist, add it\n fav.put()\n # Send updates to user's other devices\n TBANSHelper.update_favorites(fav.user_id, device_key)\n return True\n else:\n # Favorite already exists. Don't add it again\n return False", "def add_to_favorites(request):\n try:\n serializer = FavoriteSerializer(request.data)\n hotel = Hotel.objects.get(pk=serializer.data['hotel_id'])\n is_favorite = serializer.data['is_favorite']\n\n if is_favorite:\n hotel.user.add(request.user)\n content = {'Message': 'Hotel added to favorites'}\n else:\n hotel.user.remove(request.user)\n content = {'Message': 'Hotel removed from favorites'}\n\n return Response(content, status=status.HTTP_200_OK)\n except Exception as e:\n content = {'Error': str(e)}\n return Response(content, status=status.HTTP_404_NOT_FOUND)", "def add_favorite(user_id, type, id, date, **kwargs):", "def test_favourites_listing_for_existing_user(self):\n Favourite.objects.create(book=self.book, user=self.user_cristy)\n\n response = self.client.get(reverse('user-favourites-list',\n kwargs={'users_username': self.user_cristy.username}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = json.loads(response.content)\n self.assertTrue(len(data['results']) == 1)\n self.assertEqual(data['results'][0]['book']['id'], f'{self.book.id}')", "def test_update_favor(self):\n\n with self.client as c:\n resp = c.post('/login', data={\n \"username\": \"test1\",\n \"password\": \"123456\",\n }, follow_redirects=True)\n search = c.post(\"/search\", json={\"term\": \"brandy\", \"media\": \"music\",\n \"entity\": \"song\", \"country\": \"US\", \"limit\": 10, \"lang\": \"en\"})\n data = search.json[0]\n data.pop(\"isFavor\", None)\n\n fav_item = c.post(\"/update-fav\", json=data)\n user = User.query.filter_by(username=\"test1\").first()\n self.assertEqual(fav_item.get_json(), {\n 'message': 'successfully added'})\n self.assertEqual(user.favors[0].user_id, 100)\n\n delete_fav_item = c.post(\"/update-fav\", json=data)\n user = User.query.filter_by(username=\"test1\").first()\n self.assertEqual(delete_fav_item.get_json(), {\n 'message': 'successfully deleted'})\n self.assertEqual(user.favors, [])", "def test_if_error_raised_when_user_add_to_favourite_book_that_already_is_added(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.get_token())\n self.client.post(\n reverse('user-favourites-list', kwargs={'users_username': self.user_cristy.username}),\n {'book_id': self.book.id},\n )\n response = self.client.post(\n reverse('user-favourites-list', kwargs={'users_username': self.user_cristy.username}),\n {'book_id': self.book.id},\n )\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(data['message'][0], 'You have already added this book to your favourites.')", "def add_favorite(item_id):\n # Add post ID to favorites list\n User.objects(id = session['user']['id']).update_one(add_to_set__favorites = item_id)\n flash(\"Added as favorite.\")\n return redirect(url_for('home.home'))", "async def create(self, favorite: Favorite) -> Favorite:", "def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite", "def test_create_restaurant(self, restaurant):\n assert restaurant is not None", "def in_favorites(user, recipe):\n return recipe.favourites.filter(user=user).exists()", "def update_favorites():\n\n check_favorite = Favorite.query.filter(Favorite.favorited_item==session[\"athlete_id\"]).first()\n route = f'/athletes/{session[\"athlete_id\"]}'\n\n if check_favorite is None:\n new_update = Favorite(id=current_user.id, favorited_item=session[\"athlete_id\"])\n db.session.add(new_update) \n \n else:\n db.session.delete(check_favorite)\n \n db.session.commit()\n \n return redirect(route)", "def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))", "def test_add_favorites_redirect_user_if_not_logged(self):\n logout(self.client)\n response = self.client.get('/favorite/1234/123456')\n self.assertEqual(response.status_code, 302)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the eol mode map
def EOLModeMap(): # Maintenance Note: ints must be kept in sync with EDSTC_EOL_* in edstc return { EOL_MODE_CR : _("Old Machintosh (\\r)"), EOL_MODE_LF : _("Unix (\\n)"), EOL_MODE_CRLF : _("Windows (\\r\\n)")}
[ "def get_eol_for_open(self) -> str:\n map = {\n EOLTypes.CRLF: WINDOWS_EOL,\n EOLTypes.LF: UNIX_EOL,\n EOLTypes.NATIVE: linesep,\n }\n\n return map[self]", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "def getModeLookupTable(self):\n mode_table = []\n header = None\n for line in open(PublicTransit.MODE_LOOKUP_FILE_NAME):\n line = line.strip()\n if len(line) == 0: \n continue\n line = map(str.strip,line.split(\",\"))\n if header is None:\n header = line\n #CPT_AGENCYID\tAGENCYNAME\tCPT_MODE\tSCH_ROUTEDESIGNATOR\tMODECODE\tMODEGROUP\n continue\n data = {}\n for i in range(len(line)):\n data[header[i]] = line[i]\n mode_table.append(data)\n return mode_table", "def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes", "def __mode_modesetid(self, mode):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tix = val.index(mode)\n\t\t\tif ix is not None:\n\t\t\t\treturn key, ix", "def open_newline_mode(cls, mode):\n\n if mode == cls.N or mode == cls.Unknown:\n # universal, mapped to '\\n':\n return None\n\n if mode == cls.R:\n return '\\r'\n\n if mode == cls.RN:\n return '\\r\\n'\n\n # mixed, i.e. leave as is:\n return ''", "def levels(self):\n return {mode.name: mode.levels for mode in self.modes}", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)", "def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()", "def parse_session_mode_and_map(log_data):\n try:\n match = search(\n r\"<\\d{2}:\\d{2}> [^d]* Loading level \\w+\\/(\\w+), \\w+ (\\w+)\",\n log_data)\n line_map, line_mode = match.groups()\n return (line_mode, line_map)\n except Exception:\n print(\"Something is wrong with the log file!\")", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def GetCommandMap(self):\r\n\r\n return self.__fCmdMap", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)", "def availablemodes(self):\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11\n self._check_npcap_requirement()\n return POWERSHELL_PROCESS.query([_WlanHelper, self.guid[1:-1], \"modes\"], crp=False, rst_t=True)[0].split(\",\")", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)", "def get_valid_modes():\n\n modes = obd.commands.modes\n non_empty_modes = [ mode for mode in modes if len( mode )]\n\n return [ [ command for command in mode if command ] for mode in non_empty_modes ]", "def available_modes_with_ids(self):\n modes = {}\n for key, mode_id in self._load_matching([MODE_NAME_TO_ID_KEY, \"*\"]):\n modes[key.split(\"/\")[-1]] = mode_id\n if not modes:\n modes = DEFAULT_MODES\n return modes", "def mode(self) -> str:\r\n return {value: key for key, value in self._PIGPIO_MODES.items()}[self.gpio.get_mode(self.pin)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loops through each page within a single PDB and sums up the stats of each page to arrive at the overall total
def analyze(directory, pdf_file, doc_type): total_redaction_count = 0 total_redacted_text_area = 0 total_estimated_text_area = 0 total_estimated_num_words_redacted = 0 # Split the pdb (which is a pdf file) into individual jpgs. redaction_module.pdf_to_jpg(directory, pdf_file) os.chdir(directory) for jpg_file in os.listdir(directory): # Iterating through each page of the PDB if jpg_file.endswith(".jpg"): [redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type) total_redaction_count += redaction_count total_redacted_text_area += redacted_text_area total_estimated_text_area += estimated_text_area total_estimated_num_words_redacted += estimated_num_words_redacted # Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly). os.remove(jpg_file) # Now that we've gone through each page, we need to calculate the stats for the document. if total_estimated_text_area != 0: total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area) else: total_percent_text_redacted = 0 data = [] # open csv file and write the stats in a single row representing the document. with open('output.csv', mode='a+') as output: output_writer = csv.writer(output, delimiter=',') row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted] data.append(row) print(tabulate(data, headers=[" ", " ", " ", " ", " "])) output_writer.writerow(row) output.close()
[ "def total_db_hits(profile):\r\n nb = 0\r\n for child in profile.children:\r\n nb += total_db_hits(child)\r\n nb += profile.db_hits\r\n return nb", "def __get_totals(self, soup):\n pnum_span = soup.select_one('.pagenum')\n if pnum_span.text != 'no results':\n range_1 = int(pnum_span.span.select_one('.rangeFrom').text)\n range_2 = int(pnum_span.span.select_one('.rangeTo').text)\n total = int(pnum_span.select_one('.totalcount').text)\n self.__total = (range_1, range_2, total)\n self.__params['s'] = range_2\n # check for limit\n self.__limit = self.__check_limit(soup)\n return self.__total", "def loop_example():\n\n totals = []\n\n for row in poke_stats:\n totals.append(sum(row))\n \n return(totals)", "def totalpages():\n counter = 0\n url = 'https://www.economicos.cl/rm/departamento?operacion=Arriendo&dormitoriosDesde=2&pagina=%d#results' % (counter)\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'lxml')\n\n for each_div in soup.findAll('div', {'class': 'cont_right_ecn_pag'}):\n counter += int(each_div.text.split()[3])\n\n return counter", "def total_shares(portfolio): #8 points\n #initialize total to 0\n total = 0\n\n #iterate over portfolio and add the value in the location to total\n for i in portfolio:\n total += i[1]\n return total", "def pages_sum(self, pages_sum):\n\n self._pages_sum = pages_sum", "def IncrementStatsFrom(self, accounting):\r\n self.num_photos += accounting.num_photos\r\n self.tn_size += accounting.tn_size\r\n self.med_size += accounting.med_size\r\n self.full_size += accounting.full_size\r\n self.orig_size += accounting.orig_size", "def scrape_central(page):\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find(\"table\", {\"class\" : \"ez1\"})\n rows = table.findAll('tr')\n page = int(table.find('tr', {'class': 'black'}).span.text)\n\n data_page = []\n for row in rows[1:]:\n item = {}\n cols = row.findAll('td')\n\n if len(cols) == 38:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[35].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[34].text.strip()\n item['date_tor_apply'] = cols[24].text.strip()\n item['date_tor_granted'] = cols[27].text.strip()\n item['date_ec_receipt'] = cols[24].text.strip()\n item['date_ec_granted'] = cols[33].text.strip()\n clearance = cols[37].findAll('img', {'src': 'images/ec.png'})\n tor = cols[37].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[37].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[37].findAll('img', {'src': 'images/forms.png'})\n com = cols[37].findAll('img', {'src': 'images/com.png'})\n mon = cols[37].findAll('img', {'src': 'images/mon.png'})\n add = cols[37].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n\n if len(cols) == 29:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[26].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[25].text.strip()\n item['date_tor_apply'] = None\n item['date_tor_granted'] = None\n item['date_ec_receipt'] = None\n item['date_ec_granted'] = cols[24].text.strip()\n clearance = cols[28].findAll('img', {'src': 'images/ec.png'})\n tor = cols[28].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[28].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[28].findAll('img', {'src': 'images/forms.png'})\n com = cols[28].findAll('img', {'src': 'images/com.png'})\n mon = cols[28].findAll('img', {'src': 'images/mon.png'})\n add = cols[28].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n return data_page", "def computeDStats(genoCounts, blockSize):\n pass", "def getAllPageNumbers(self):\n\t\tfor subpage in self.subpages:\n\t\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(subpage.link)\n\t\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\t\t\tsubpage.setNbrPages( self.getNbrPages(soupPage) )", "def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total", "def yield_stats(go_analysis):\n for i in xrange(go_analysis.nrow()):\n yield go_analysis[0][i], go_analysis[1][i], go_analysis[2][i], go_analysis[3][i], p_value_from_r(go_analysis[4][i]), p_value_from_r(go_analysis[5][i])", "def page_walk(self):\n try:\n f=open(\"/proc/%s/pagemap\"%self.pid)\n except:\n print \"Error reading page map for proc pid=%d\"%(self.pid)\n sys.exit(-1)\n\n for x in self.maprange:\n vma_page_len=Pte.get_pte_offset(x[1]-x[0]) #no of page entries to read\n offset=Pte.get_pte_offset(x[0]) #idx to PTE\n f.seek(offset,os.SEEK_SET)\n entries=f.read(vma_page_len)\n if len(entries) > 0:\n vals=struct.unpack('<'+'Q'*(vma_page_len/8),entries)\n vm_entries=(x[0],x[1],vals)\n self.update_page_counters(vm_entries)", "def print_numa_stats(numafiles):\n for numafile in numafiles:\n numafile.seek(0)\n node_id = int(numafile.name[numafile.name.find(\"/node/node\")+10:-9])\n ts = int(time.time())\n stats = dict(line.split() for line in numafile.read().splitlines())\n for stat, tag in (# hit: process wanted memory from this node and got it\n (\"numa_hit\", \"hit\"),\n # miss: process wanted another node and got it from\n # this one instead.\n (\"numa_miss\", \"miss\")):\n print (\"sys.numa.zoneallocs %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Count this one as a separate metric because we can't sum up hit +\n # miss + foreign, this would result in double-counting of all misses.\n # See `zone_statistics' in the code of the kernel.\n # foreign: process wanted memory from this node but got it from\n # another node. So maybe this node is out of free pages.\n print (\"sys.numa.foreign_allocs %d %s node=%d\"\n % (ts, stats[\"numa_foreign\"], node_id))\n # When is memory allocated to a node that's local or remote to where\n # the process is running.\n for stat, tag in ((\"local_node\", \"local\"),\n (\"other_node\", \"remote\")):\n print (\"sys.numa.allocation %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Pages successfully allocated with the interleave policy.\n print (\"sys.numa.interleave %d %s node=%d type=hit\"\n % (ts, stats[\"interleave_hit\"], node_id))", "def eliminating_loop_example():\n\n totals_comp = [sum(row) for row in poke_stats]\n\n return(totals_comp)", "def find_stats(self) -> None:\n for animal in self.animal_list:\n self.animal_list[animal].find_stats()\n\n self.total_emissions = sum([self.animal_list[animal].weekly_emissions \\\n for animal in self.animal_list])\n self.total_country_emissions = sum([self.animal_list[animal].country_emissions \\\n for animal in self.animal_list])\n\n self.total_emissions_comparison = self.total_emissions - \\\n self.total_country_emissions\n self.total_emissions_percentage = 100 * self.total_emissions_comparison / \\\n self.total_country_emissions", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def total(self):\n return self._evaluate()['hits']['total']", "def summarize_process(mongo_string_search):\n result = getPublications(mongo_string_search)\n name_entities = []\n info_extr = []\n ts = TextSummarize()\n paragraphs_summarize = []\n chapter_summarize = []\n full_text_summarize = []\n dblkey = \"\"\n papers_summary = []\n mydict = {\n \"dblpkey\" : \"\",\n \"abstract_sum\":[],\n \"fulltext_sum\":[],\n \"chapter_sum\":[],\n \"paragraph_sum\":[]\n }\n for r in result:\n mydict['dblpkey'] = r['dblpkey']\n if \"content\" in r and \"abstract\" in r['content'] and \"abstract\" in to_sum:\n mydict['abstract_sum']= ts.summarize(r['content']['abstract'],10)\n if \"content\" in r and \"fulltext\" in r['content'] and \"fulltext\" in to_sum:\n full_text_summarize= ts.summarize(r['content']['fulltext'],50)\n mydict['fulltext_sum'] = full_text_summarize\n if \"content\" in r and \"chapters\" in r['content'] and \"chapters\" in to_sum:\n for chapter in r['content']['chapters']:\n str_chapter=\"\"\n for paragraph in chapter['paragraphs']:\n str_chapter += paragraph\n if \"paragraphs\" in to_sum:\n if len(word_tokenize(paragraph)) > 1:\n paragraphs_summarize.append(ts.summarize(str(paragraph),5))\n chapter_summarize.append(ts.summarize(str_chapter,10))\n mydict[\"chapter_sum\"] = chapter_summarize\n mydict[\"paragraph_sum\"] = paragraphs_summarize\n # here i use the copy() because otherwise we just copy the reference\n papers_summary.append(mydict.copy())\n paragraphs_summarize=[]\n chapter_summarize=[]\n full_text_summarize=[]\n\n return papers_summary" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new text input instance. colorNames a sequence of strings (each color must start with a different letter)
def __init__(self, colorNames): self._lengthOfPattern = 0 # will later be queried from the user self._palette = '' # initials for color choices, e.g., R for red for color in colorNames: self._palette += color[0].upper()
[ "def mkColor(self, name):\n known_attrs = [ 'font-family', 'font-style', 'font-weight', 'font-size', 'text-decoration', 'color', 'background-color' ]\n stack = []\n color = Color(name)\n for token in self.tokenizer:\n if token.text == \";\":\n stack[0].assert_symbol_name\n if stack[0].text not in known_attrs: raise Exception(\"%d:%d: Unknown color attribute %s\" % (stack[0].line, stack[0].col, stack[0].text))\n stack[1].must_be(\":\")\n stack[2].must_match(\"^\\w\", \"%d:%d: Expected a color attribute value instead of %s\" % (stack[2].line, stack[2].col, stack[2].text))\n color.attrs[stack[0].text] = stack[2].text\n stack = []\n elif token.text == \"}\":\n return color\n else:\n stack += [token]\n raise Exception(\"%d:%d: End-of-file reached while scanning color %s defined here.\" % (name.line, name.col, name.text))", "def from_name(cls, colorname):\n return cls(*cls.color_names[colorname])", "def test_color__name_str_arg(self):\n for name in (\"aquamarine3\", \"AQUAMARINE3\", \"AqUAmArIne3\"):\n color = pygame.Color(name)\n\n self.assertEqual(color.r, 102)\n self.assertEqual(color.g, 205)\n self.assertEqual(color.b, 170)\n self.assertEqual(color.a, 255)", "def _createNamedColors( self ):\n self._colors = vtkNamedColors()\n self._colors.SetColor( \"Background\", (0.1000, 0.1000, 0.2000, 1.0000) )", "def input_text_color(self, input_text_color):\n if input_text_color is not None and not re.search(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', input_text_color): # noqa: E501\n raise ValueError(r\"Invalid value for `input_text_color`, must be a follow pattern or equal to `/^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$/`\") # noqa: E501\n\n self._input_text_color = input_text_color", "def edit_colors_names_group(colors, names):\r\n colors[76] = np.array([6, 230, 230], dtype = np.uint8)\r\n names[77] = 'objects'\r\n for i in [1,9,15,19,33,43,44,145,11,9,101]:\r\n names[i] = 'wall'\r\n for i in [4,7,14,30,53,55,12, 54]:\r\n names[i] = 'floor'\r\n for i in [5,18]:\r\n names[i] = 'plant'\r\n for i in [8,14,16,20,25,34,32,76]:\r\n names[i] = 'furniture'\r\n return colors, names", "def from_name(cls, name):\n colors = {\n \"WHITE\": (255, 255, 255),\n \"BLACK\": (0, 0, 0),\n \"GRAY\": (128, 128, 128),\n \"RED\": (255, 0, 0),\n \"GREEN\": (0, 255, 0),\n \"BLUE\": (0, 0, 255),\n \"FUCHSIA\": (255, 0, 255),\n \"YELLOW\": (255, 255, 0),\n \"CYAN\": (0, 255, 255),\n \"LIME\": (0, 128, 0),\n \"BROWN\": (128, 0, 0),\n \"NAVY_BLUE\": (0, 0, 128),\n \"OLIVE\": (128, 128, 0),\n \"PURPLE\": (128, 0, 128),\n \"TEAL\": (0, 128, 128),\n \"SILVER\": (192, 192, 192),\n \"ORANGE\": (255, 128, 0)\n }\n return Color.from_rgb(*colors[name.upper()])", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def get_text_color_input(self):\n return \"red\"", "def from_name (name_str):\n if name_str in colour_names:\n return Colour(*colour_names[name_str])\n raise KeyError(\"'%s' is not a recognized colour name\"%name_str)", "def create(data):\n \n # init color\n color = Color(\n color_id = data.get('id'),\n name = data['name'],\n rgb = data['rgb'],\n is_trans = data['is_trans'])\n \n # get external names and IDs\n if 'external_ids' in data:\n for name, value in data['external_ids'].items():\n color.external_names[name] = [n for l in value['ext_descrs'] for n in l]\n color.external_ids[name] = value['ext_ids']\n \n return color", "def create_color(cls, text_color: int, background_color: int) -> int:\n global _COLOR_COUNTER\n unicurses.init_pair(_COLOR_COUNTER, text_color, background_color)\n color = unicurses.color_pair(_COLOR_COUNTER)\n _COLOR_COUNTER += 1\n return color", "def create_colored_text(text: str, color: Colors) -> str:\n return create_styled_text(text, color)", "def create_player():\n\n choose_colors_text = \"\"\"\n 1. red\n 2. green\n 3. yellow\\n\"\"\"\n character_colors = {'1': '\\033[31m', '2': '\\033[32m', '3': '\\033[33m'}\n os.system('clear')\n print('Character creation screen.')\n character_name = input(\"Choose your character's name: \")\n chosen_character_color = ''\n while chosen_character_color not in ['1', '2', '3']:\n print(\"Choose your character's color [1, 2 or 3].\")\n chosen_character_color = input(choose_colors_text)\n character_color = character_colors[chosen_character_color]\n return character_name, character_color", "def _rgb_txt_line(string):\n regexp = re.compile(\n r\"([ 0-9][ 0-9][ 0-9])\\s+([ 0-9][ 0-9][ 0-9])\\s+([ 0-9][ 0-9][ 0-9])\"\n r\"\\s+([a-zA-Z0-9 ]+)\\s*\"\n )\n match = regexp.match(string)\n if not match:\n return \"\", (-1, -1, -1)\n red, green, blue, name = match.groups()\n return name.strip(), (int(red), int(green), int(blue))", "def color(self, text, **kwargs):\n return text", "def make_text_objs(text, font, color):\n surf = font.render(text, True, color)\n return surf, surf.get_rect()", "def colorString(rgbColor: unicode, text: unicode) -> unicode:\n ...", "def _makeColor(self, renderer, name, space, color):\n # assemble the arguments\n args = (renderer.literal(value) for value in color)\n # build and return the expression\n return renderer.set(name=name, value=renderer.call(func=space, args=args))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Robustly prompt the user for an integer from small to large.
def _readInt(self, prompt, small, large): prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? ' answer = small - 1 # intentionally invalid while not small <= answer <= large: try: answer = int(raw_input(prompt)) if not small <= answer <= large: print 'Integer must be from '+str(small)+' to '+str(large)+'.' except ValueError: print 'That is not a valid integer.' return answer
[ "def enforceInt(prompt, minValue = None, maxValue = None):\n testInput = input(prompt)\n try:\n testInput = int(testInput)\n return sizeCheck(testInput, minValue, maxValue)\n except:\n return f'Input \"{testInput}\" cannot be converted into an integer'", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)", "def get_int(prompt, min):\n choice = input(prompt)\n while not choice.isnumeric() or int(choice) < min:\n print('Please enter an integer greater than or equal to', min)\n choice = input(prompt)\n return int(choice)", "def prompt() -> int:\n msg: str = \"How many ranges to generate - (default={}) \\n\".format(\n str(DEFAULT_RANGES)\n )\n while True:\n try:\n return int(input(msg) or DEFAULT_RANGES)\n except ValueError:\n raise ValueError(\"I only accept decimal numbers\")", "def ask_number(question, min, max):\n answer = None\n while answer not in range(min, max):\n answer = int(input(question))\n return answer", "def force_number(user_input):\n while True:\n try:\n # Checks if value is an interger.\n number = int(input(user_input))\n break\n except ValueError:\n print(\"Please enter a valid number\")\n return number", "def get_int(question):\n while True:\n try:\n answer = raw_input(question + '\\n>>>')\n except NameError:\n answer = input(question + '\\n>>>')\n if answer == 'q':\n raise SystemExit\n try:\n output = int(answer)\n return output\n except ValueError:\n print('must be int OR \"q\" to quit')\n continue", "def get_int_input():\n in_ = raw_input()\n return int(float(in_))", "def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response", "def get_integer_entry(prompt=\"0\", text=\"Input integer value\"):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n return abs(int(data))\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n continue", "def ask_num(message, min, max):\n while True:\n try:\n number = int(input(message))\n except:\n print(\"that was not a number\")\n continue\n if max >= number >= min:\n break\n return number", "def get_integer(prompt):\n while True:\n temp = input(prompt)\n if temp.isnumeric():\n return int(temp)\n #else:\n print(\"{} is not a valid number\".format(temp))", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def get_integer_value(prompt, low, high):\n\n while True:\n try:\n value = int(input(prompt))\n except ValueError:\n print(\"Input was not a valid integer value.\")\n continue\n if value < low or value > high:\n print(\"Input was not inside the bounds (value <= {0} or value >= {1}).\".format(low, high))\n else:\n break\n return value", "def _int_input_in_range(self, print_out, range_):\n try:\n i = int(input(print_out))\n assert range_[0] <= i <= range_[1]\n return i\n except AssertionError:\n print('Please, enter a vaild number')\n return None\n except ValueError:\n print('Please, enter a number not a string')\n return None", "def specify_int_in_range(min, max, message=\"Your choice \", error=\"\"):\n temp_message = message\n temp_message += \"\\nYour choice\"\n if len(error) > 0:\n temp_message += \" ('\" + str(error) + \"' to exit)\"\n\n temp_message += Fore.WHITE + \": \" + Fore.WHITE\n\n while True:\n search_res = input(temp_message)\n if search_res == error:\n return -1\n temp_message = message + \"\\n\"\n if RepresentsInt(search_res):\n if int(search_res) >= min and int(search_res) <= max:\n return int(search_res)\n temp_message += Fore.LIGHTRED_EX + \"Value out of range. Must be between \" + str(min) + \" and \" + str(max) + \". Try again.\\n\" + Fore.WHITE\n else:\n temp_message += Fore.LIGHTRED_EX + \"Must be integer. Try again.\\n\" + Fore.WHITE\n\n temp_message += \"\\nYour choice\"\n if len(error) > 0:\n temp_message += \" ('\" + str(error) + \"' to exit)\"\n\n temp_message += \": \"\n clear_terminal()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ask the user how many pegs in the secret pattern.
def queryLengthOfPattern(self): self._lengthOfPattern = \ self._readInt('How many pegs are in the secret', 1, 10) return self._lengthOfPattern
[ "def guessing(guess, count):", "def numplayers_ask(context):\n response = 0\n while response == 0:\n response = int(raw_input(\"How many players? (2-6):\"))\n if response in [2,3,4,5,6]:\n return response\n else: \n print \"Bad Input\"\n response = 0", "def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True", "def password_hint(self):\n user_forgot = input(\"Forgot password\\nEnter 1 to access your password hint question.\\nEnter 2 to try again\")\n attempts= 0\n if \"1\" in user_forgot:\n while attempts < 3:\n hint = input(\"hintAccessAns\")\n #return self.hints\n if hint in self.hints:\n print (\"hint matches\") \n break\n else:\n attempts += 1\n print (f\"try again, {3 - attempts} attempts remaining\")\n print(\"Attempt exceeded\")", "def user_avoid_count():\n\tforbidden = input('Enter a string of forbidden letters.\\n> ')\n\tprint(len({w for w in word_set if avoids(w, forbidden)}))", "def userguess(secret) :\n guess = int(input(\"Your guess\"))\n while guess != secret :\n guess = int(input(\"Your guess?\"))", "def nuggets(number):\r\n try:\r\n limit = int(number)\r\n assert type(limit) == int, \"input value is not an integer\"\r\n assert limit>=0, \"input value is negative\"\r\n\r\n for c in range(int(limit/20)+1):\r\n for b in range(int(limit/9)+1):\r\n for a in range(int(limit/6)+1):\r\n n=6*a+9*b+20*c\r\n if n==limit:\r\n return \"6 packs = %s, 9 packs = %s, 20 packs = %s\" %\\\r\n (str(a),str(b),str(c))\r\n return \"No soltuion for %s nuggets.\" % str(limit)\r\n except:\r\n return \"An error occured, make sure you enter an Integer.\"", "def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue", "def totalpossibilities(disks, pegs):\n bruteforce(disks, pegs)\n return len(success_instances)", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries", "def countGuesses( hidden ):\n guess = random.choice( range(0,100) ) # 0 to 99, inclusive \n numguesses = 1 # we just made one guess, above\n while guess != hidden:\n guess = random.choice( range(0,100) ) # guess again!\n numguesses += 1 # add one to our number of guesses\n return numguesses", "def prompt() -> int:\n msg: str = \"How many ranges to generate - (default={}) \\n\".format(\n str(DEFAULT_RANGES)\n )\n while True:\n try:\n return int(input(msg) or DEFAULT_RANGES)\n except ValueError:\n raise ValueError(\"I only accept decimal numbers\")", "def guesses():\n tries = 3\n print (\" You may choose your maximum number of tries per question.\"\n \"The default is 3.\")\n player_prompt = \" Please type in your preferred number: \"\n while tries > 0:\n user_choice = raw_input(player_prompt)\n if user_choice.isdigit():\n print \"\\n OK, {} {} allowed per blank. Here we go!\\n\".format(user_choice, how_many(user_choice))\n return int(user_choice)\n tries -= 1\n player_prompt = (\" Silly, that's not a valid number of guesses! {} more {}. \\n\"\n \" Try again: \").format(tries, how_many(tries))\n if tries == 0:\n print \" You defaulted your number of guesses, so 3 it is!\"\n return 3", "def handleUserInputWordLength():\n length = int(input(\"How many letters in the word you'll guess: \"))\n return length", "def countGuesses(hidden):\r\n guess = random.choice(range(0, 100)) # 0 to 99, inclusive\r\n numguesses = 1 # we just made one guess, above\r\n while guess != hidden:\r\n guess = random.choice(range(0, 100)) # guess again!\r\n numguesses += 1 # add one to our number of guesses\r\n return numguesses", "async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))", "def user_pick(self):\n player_taking = True\n while player_taking:\n play_take = int(input(\"How many dots would you like to remove?(1-4)\"))\n if not 1 <= play_take <= 4:\n print(\"You may only take between 1 and 4 balls\")\n else:\n player_taking = False\n return play_take" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a guess from the user and return it as a Pattern instance.
def enterGuess(self): validPattern = False while not validPattern: print # intentional blank line prompt = 'Enter a guess (colors are ' prompt += self._palette[:self._numColorsInUse] + '): ' patternString = raw_input(prompt) validPattern = True if len(patternString) != self._lengthOfPattern: print 'The pattern must have', self._lengthOfPattern, 'pegs' validPattern = False else: for i in range(self._lengthOfPattern): if patternString[i].upper() not in self._palette[:self._numColorsInUse]: validPattern = False if not validPattern: print 'The color options are', self._palette[:self._numColorsInUse] if validPattern: pattern = Pattern(self._lengthOfPattern) for i in range(self._lengthOfPattern): pattern.setPegColor(i, self._palette.index(patternString[i].upper())) return pattern
[ "def get_guess(self):\n return self._guess", "def get_guess(self):\n new_guess = \"\"\n try:\n new_guess = input(\"Enter a letter: \").lower()\n if len(new_guess) > 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too long. Make sure that it is only one character\")\n elif len(new_guess) < 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too short. Make sure that it is only one character\")\n elif ord(new_guess) < 97 or ord(new_guess) > 122:\n new_guess = \"INVALID\"\n raise ValueError(\"Your input was deemed invalid! Please make sure input is a character a-z\")\n elif new_guess in self.guesses:\n print(f\"You already guessed the letter {new_guess}, try again\")\n new_guess = \"INVALID\"\n except ValueError as err:\n print(err)\n return new_guess", "def pattern_factory(self):\n\t\treturn self.args[1]", "def guess(self):\n\t\t\n\t\tpeg_guess_color_list = []\n\t\tguess_input = self.view.input_guess()\n\n\t\t# Convert guess_input into a list- each color being a string\n\t\tguess_color_list = re.split(\",\", guess_input)\n\t\t\n\n\t\tfor each_color in guess_color_list:\n\n\t\t\t#associate each string with a peg object\n\t\t\tpeg_guess = ColorPeg(each_color)\n\t\t\t\n\t\t\t# Append the peg_guess color list to make a list of peg guess objects\n\t\t\tpeg_guess_color_list.append(peg_guess)\n\n\t\t\t# Plug our peg objects into our guess object\n\t\t\tuser_guess = Guess(peg_guess_color_list)\n\n\t\t\t# Store guess object in our MasterModel\n\t\t\tself.model.guesses[self.model.status] = user_guess\n\n\t\t\t# Make a variable that\n\n\n\t\t# ### TESTS ###\n\t\t# print (\"This is each color: \", each_color)\n\t\t# print (\"print guess input again: \", guess_input)\n\t\t# print(\"prints each peg color for guess: \", peg_guess)\n\t\t# print(\"Prints the list of color guesses: \", peg_guess_color_list)\n\t\t# for peg_guess in peg_guess_color_list:\n\t\t# \tprint(\"Prints the list of guess pegs: \", peg_guess.peg_color)\n\n\t\t# print(\"Prints out the first list of guesses. Key = Guess 1\", self.model.guesses[\"Guess 1\"])", "def get_guess():\n print('Choose a letter:')\n return input()", "def get_pattern(self) -> \"Pattern\":\n return self.__pattern", "def guessing(guess, count):", "def get_guess(self):\n\n guess_distribution = self.get_distribution('guess')\n return guess_distribution.get_value()", "def get_atom_guess(self):\r\n return self._player.get_atom_guesses()", "def get_inputs(self):\n self.board.guy()\n self.board.parachute()\n choice = input(\"Guess a letter a-z \")\n self.board.check_guess(choice)", "def getPattern(self, text, pattern, default=None):\n \n matches = self.getPatterns(text, pattern)\n if matches:\n return matches[0]\n else:\n return default", "def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):\n if len(pattern) > 2 and pattern[2] == \":\" and pattern[:2].isalnum():\n (style, pattern) = (pattern[:2], pattern[3:])\n cls = get_pattern_class(style)\n else:\n cls = fallback\n return cls(pattern, recurse_dir)", "def simple_guess(title, abstract):\n\n ## importation\n import re\n\n ## parameters\n match_item = False\n\n ## preprocess text\n text = str(title)+\". \"+str(abstract)\n text = text.replace(\".. \", \". \")\n text = text.lower()\n\n ## hunt random\n if(re.search('random', text)):\n match_item = True\n\n ## return match status\n return match_item", "def checkUsrGuess():\r\n if pattern == guesses:\r\n pass\r\n else:\r\n tooBad()", "def guess_word(self):\r\n guess = input(\"# Guess the Word :\")\r\n if not guess:\r\n print(\"Please enter a valid word.\")\r\n else:\r\n if game_instance.check_word(guess):\r\n print(\"Correct! You did it Champ!\")\r\n game_instance.calculate_score(self.frequency)\r\n self.instances.append(game_instance)\r\n obj.create_new_game()\r\n else:\r\n print(\"Wrong Guess. Try Again!\")", "def get_guess_char(guess, turns):\n while True:\n print('=============================')\n print(f'The word looks like: {guess}')\n print(f'You have {turns} guesses left')\n guess_char = str(input('Your guess:')).upper()\n if guess_char.encode().isalpha() and len(guess_char) == 1:\n return guess_char\n else:\n print('!! Invalid format !!')", "def user_guess():\n return list(input(\"What is your guess?\"))", "def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()", "def show(rndCounter):\r\n guesses.clear()\r\n pattern.clear()\r\n for i in range(rndCounter):\r\n rndmchce = random.choice(shapes1)\r\n pattern.append(rndmchce)\r\n for f in pattern:\r\n f()\r\n usrInput()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restarts the timer and closes any existing progress bar.
def restart(self): self.done() self.counter = 0 self.start_time = time.time()
[ "def restart_timer(self):\n self.log.info(\"{} timer restarted ({} seconds)\".format(self.name, self.interval))\n self.count = self.interval / self.sleep_chunk\n if not self.defer and self.interval > 0:\n self._callback()\n if self.start_event.is_set():\n self.reset_event.set()\n else:\n self.start_event.set()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def restart(self):\n self.kill()\n self.start()", "def close_progress(self):\n\t\tsys.stdout.write(\"|\\n\")\t\t# Ending the progression bar\n\t\tself.i_progress = 0\t\t\t# Resets the progress advancement", "def stop_timer(self):\r\n self.countdown_timer_time = 0\r\n self.end_time = 0\r\n self.total_lbl.set(\"0\")", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def reset_timer(self):\r\n \r\n self.time = 0 \r\n self.clock = pg.time.Clock()", "def restart():\n pass", "def restart(self):\n # time recorders\n self._t_start = time.time()\n self._t_last = self._t_start\n self._t_current = self._t_start\n # progress recorders\n self._p_start = 0\n self._p_last = self._p_start\n self._p_current = self._p_start", "def stop_timer(self):\n self.timer.stop()", "def close(self) -> None:\n\n if not self.simple_tui:\n self.rich_progress_bar.stop()\n\n logging.shutdown()", "def reset_timer(self):\n self.d_time_count.fill(0)", "def reset_timer():\r\n window.after_cancel(timer)\r\n canvas.itemconfig(timer_text, text=f\"00:00\")\r\n pomodoro_title.config(text=\"Timer\", fg=GREEN)\r\n check_marks.config(text=\"\")", "def reset_stop_timer(self) -> None: \r\n self.stop_timer = 0", "def untie(self):\n self.timer_label = None", "def restart_process(self):\n self.deactivate_process()\n self.restart = True\n self.progress.emit(0)\n LOG.info('resume\\'s process restart activate')", "def reset():\n global counter, total_attempts, successful_stops\n timer.stop()\n counter = 0\n total_attempts = 0\n successful_stops = 0", "def stop_timer(self):\n self.end_time = datetime.now()", "def restart(self, delay=None):\n if self._timer:\n self._timer.cancel()\n if not delay:\n delay = self.delay\n self._timer = Timer(delay, self.callback)\n self._timer.daemon = True\n self._timer.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Advances the progress bar. If visible, shows progress, otherwise updates in the background. If the time threshold has passed and the progress bar should appear, this method creates it.
def next(self): if self.skip: return self.counter += 1 if self.pbar is None and (time.time() - self.start_time) > self.threshold: self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter) elif self.pbar is not None: self.pbar.update(1)
[ "def updateProgressBar(self):\n while not self.abortProgressBar:\n time.sleep(0.05)\n cur = self.progressTracker\n if cur == None or cur.startTime == None:\n continue\n remaining = self.getEstimatedTime(cur)-(datetime.now()-cur.startTime)\n percent = int(min(max(10000-10000*remaining/self.totalTime,0),10000))\n remaining = timedelta(seconds = int(remaining.seconds))\n self.progressCallback.emit(f\"{remaining} remaining, {cur.wavelength}nm: {cur.integrationtime}s {self.currentAverage+1}/{cur.average}\",percent)\n else:\n self.progressCallback.emit(f\"Done!\",10000)", "def _update_progress(self):\n with self.progress_meter.get_lock():\n self.progress_meter.value += 1.0", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def _show_time_updates(p_bar):\n while p_bar.total > p_bar.n:\n time.sleep(1)\n if p_bar.total > p_bar.n:\n p_bar.refresh()", "def update_gesture_bar(self, value):\n if value > self.record_time:\n value = self.record_time\n self.progress_gesture[\"value\"] = value\n introduction_window.update()", "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "def incProgress(self, val):\n\n if val is not None:\n self._progressBar.show()\n self._progressBar.setTextVisible(True)\n self.progress = self.progress + val\n try:\n self._progressBar.setValue(self.progress)\n qApp.processEvents()\n except:\n pass\n else:\n self._progressBar.setTextVisible(False)\n self._progressBar.hide()\n self._progressBar.reset()\n\n if self.isHidden is True:\n self.isHidden = False\n self.show_()", "def pb_start(self):\n if not self.thread.isAlive():\n VALUE = self.progressbar[\"value\"]\n self.progressbar.configure(mode=\"indeterminate\",\n maximum=self.maximum,\n value=VALUE)\n self.progressbar.start(self.interval)", "def set_progressbar(self, _) -> None:\r\n current: int = self.progressbar.value()\r\n self.progressbar.setValue(current + 1)\r\n if current == self.progressbar.maximum():\r\n self.progressbar.hide()\r\n self.progressbar.setValue(0)", "def increase_progress(self, value):\r\n\r\n pass", "def activate_progress_bars():\n threeML_config[\"interface\"][\"progress_bars\"] = 'on'", "def update_progress_bars(self, value):\n self.progress_session[\"value\"] += value\n self.progress_total[\"value\"] += value\n introduction_window.update()", "def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))", "def progressTimer(self):\n current_time = int(time.time() % 30)\n self.progress.setValue(current_time)\n if current_time == 0:\n if self.isVisible():\n self.copy_auth_code()", "def loading_bar_update():\n with _loading_bar_lock:\n global _loading_bar_width, _loading_bar_start_time, _loading_bar_count, _loading_bar_total, _loading_bar_fmt_str\n _loading_bar_count += 1\n progress = float(_loading_bar_count) / _loading_bar_total\n bars = int(_loading_bar_width * progress)\n curr_time = time.time()\n time_elapsed = curr_time - _loading_bar_start_time\n time_remaining = time_elapsed / progress - time_elapsed\n sys.stdout.write(_loading_bar_fmt_str.format('=' * bars, progress, _loading_bar_count, _loading_bar_total, _format_time(time_elapsed), _format_time(time_remaining)))\n # sys.stdout.flush()", "def done(self):\n self.progress_bar.place(x=-275, y=640)\n self.progress = 0\n self.progress_step = 0\n self.progress_bar.update()", "def render_progressbar(surface, x, y, w, h, progress):\n surface.strokerect(x, y, w, h)\n bar_width = int((w - 4) * commons.clamp(progress, 0, 1))\n surface.fillrect(x + 2, y + 2, bar_width, h - 4)", "def _create_bar(self, iterator):\n self.progress_bar = progress_bar(iterator, parent=self.master_bar, leave=False)\n self.progress_bar.update(0)", "def set_progress(self, progress: float):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The worker function, invoked in a thread. 'nums' is a list of numbers to factor. The results are placed in outdict.
def worker(nums, outdict): print(threading.current_thread().name) print ("pid:", os.getpid()) for n in nums: outdict[n] = factorize_naive(n)
[ "def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)", "def find_num_factors(number, pause=0, num_procs=1, num_threads=1):\n def find_factors(number, queue, pause=0, interval=1, start=1):\n factors = 0\n for i in range(start, number, interval):\n if number % i == 0:\n factors += 1\n # This sleep is meant to simulate possible input / output, such as reading data from a disk.\n # The point is that inactivity causes a thread to give up the GIL, allowing another thread to do some work.\n time.sleep(pause)\n queue.put(factors)\n return factors\n\n\n # Turns out getting return values from threads isn't straight forward.\n # A queue is 'thread safe'. In fact, multiprocessing has an implementation\n # good for both threads and processes.\n if num_procs > 1:\n q = mp.Queue() # Special multithreaded implementation of queues.\n # Doesn't seem to be safe with threads from threading module.\n else:\n q = queue.Queue()\n factors = 0\n # If we aren't using threads or multi processes ...\n if num_procs == 1 and num_threads == 1:\n find_factors(number, q, pause=pause)\n\n # Using multiple threads ...\n elif num_threads > 1:\n threads = []\n for i in range(num_threads):\n threads.append(threading.Thread(target=find_factors,\n args=(number, q),\n kwargs={'pause':pause, 'start':i + 1, 'interval':num_threads},\n ))\n [i.start() for i in threads]\n [i.join() for i in threads]\n\n elif num_threads == 1 and num_procs > 1:\n # Code something with the multiprocessing module\n # Turns out the multiprocessing module has an extremely similar api to the threading module\n procs = []\n for i in range(num_procs):\n procs.append(mp.Process(target=find_factors,\n args=(number, q),\n kwargs={'pause':pause, 'start':i + 1, 'interval':num_procs},\n ))\n [i.start() for i in procs]\n [i.join() for i in procs]\n\n\n else:\n print('Threads > 1 and processes > 1 at the same time is not supported')\n return\n\n while not q.empty():\n factors += q.get()\n\n return factors", "def __call__(self, inputs, num_workers, verbose=False):\n print(\"Map is running...\")\n map_responses = self.map_parallel(inputs, num_workers, verbose)\n if verbose:\n print(\"Map response\")\n print(\"{\" + \"\\n\".join(\"{}: {}\".format(k, v) for k, v in map_responses.items()) + \"}\")\n print(\"Map is finished.\")\n print(\"Reduce is running...\")\n reduced_values = self.reduce_parallel(map_responses, num_workers, verbose)\n print(\"Reduce is finished.\")\n return reduced_values", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def calc_pi_workers(total, workers=None): #3\n if not workers:\n workers = cpu_count() #4\n min_n = total // workers\n counters = [min_n] * workers\n reminder = total % workers\n for count in range(reminder):\n counters[count] += 1\n pool = Pool(processes=workers) #5\n results = [pool.apply_async(count_inside, (counter,))\n for counter in counters] #6\n inside_count = sum(result.get() for result in results) #7\n return 4 * inside_count / float(total) #8", "def compute_metrics(self, results: list) -> dict:", "def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)", "def worker(num_loops, cnt):\t\n\n\tglobal mutex\n\n\tfor i in range(num_loops):\n\t\ttotal = 0\n\t\tinside =0\n\n\t\tfor j in range(1000):\n\t\t\tx = random.random()\n\t\t\ty = random.random()\n\n\t\t\tif (x*x + y*y) <= 1:\n\t\t\t\t\tinside += 1\n\n\t\t\ttotal += 1\n\n\t\tmutex.acquire()\n\t\tcnt.add(total, inside)\n\t\tmutex.release()", "def parallel(\n fn,\n workers=10,\n return_results=True,\n identifiers=None,\n args=None,\n kwargs=None,\n):\n # Check user input\n if args is not None and kwargs is not None:\n err = 'Amount of args must match those of kwargs'\n assert len(args) == len(kwargs), err\n\n if (args is not None or kwargs is not None) and identifiers is not None:\n err = 'Amount of identifier must match those of kw/args'\n n_args = len(args) if args is not None else len(kwargs)\n assert n_args == len(identifiers), err\n\n # Preprocessing for arguments lists\n identifiers = [] if identifiers is None else identifiers\n args = [] if args is None else args\n kwargs = [] if kwargs is None else kwargs\n\n if len(args) == 0 and len(kwargs) == 0:\n args = [None]\n kwargs = [None]\n else:\n if len(args) == 0:\n args = [[] for _ in range(len(kwargs))]\n if len(kwargs) == 0:\n kwargs = [dict() for _ in range(len(args))]\n\n # Initialize all the futures\n executor = futures.ThreadPoolExecutor(max_workers=workers)\n _futures = [\n executor.submit(fn, *args[i], **kwargs[i])\n for i in range(len(args))\n ]\n\n # Return only futures when requested\n if not return_results:\n return _futures\n\n # Block until we received all results\n if len(identifiers) > 0:\n results = {}\n else:\n results = []\n\n for i, future in enumerate(_futures):\n result = future.result()\n\n if len(identifiers) > 0:\n results[identifiers[i]] = result\n else:\n results.append(result)\n\n return results", "def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d", "def manager(num_thrds, num_loops):\n\n\tmutex.acquire()\n\tcnt.reset()\n\tmutex.release()\n\n\t# initialize the thread pool\n\tthread_pool = []\n\n\tfor i in range(num_thrds):\n\t\tthrd = threading.Thread(target=worker, args=(num_loops, cnt))\n\t\tthread_pool.append(thrd)\n\n\t# start threads\n\tfor i in range(len(thread_pool)):\n\t\tthread_pool[i].start()\n\n\tfor i in range(len(thread_pool)):\n\t\tthreading.Thread.join(thread_pool[i])\n\n\t#cnt.display()", "def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results", "def foo_job():\n add_one(multiply_two(add_nums(return_one(), return_two())))", "def _compute(histoparam, worker_id, worker_type, taskQueue, resultQueue, r_max, n_bins, t0):\n\n worker_str = \"%s worker %02d\" % (worker_type, worker_id)\n\n if (worker_type == \"cpu\"):\n if (histoparam['cpu']['module'] == 'pydh'):\n from .kernel import pydh\n if not pydh.have_c_pydh:\n from .kernel import common\n raise RuntimeError(common.import_pydh_error_msg)\n elif (histoparam['cpu']['module'] == 'dist'):\n from .kernel import dist\n else:\n raise RuntimeError(\"unsupported CPU histogram kernel requested: \" + str(histoparam['cpu']['module']))\n # handle NUMA pinning for CPU kernels and special cases only\n if (histoparam['general']['numa_aware']):\n numa_topology = util.get_numa_domains()\n n_numa_domains = len(numa_topology)\n if (n_numa_domains > 0) and (histoparam['cpu']['workers'] % n_numa_domains == 0):\n numa_id = worker_id % n_numa_domains\n stat = util.set_numa_domain(numa_id, numa_topology)\n if (stat and histoparam['general']['verbose']):\n print(\" %s %s: pinned to NUMA domain %d\" % (util.timeStamp(t0=t0), worker_str, numa_id))\n elif (worker_type == \"gpu\"):\n from .kernel import cudh\n if not cudh.have_c_cudh:\n from .kernel import common\n raise RuntimeError(common.import_cudh_error_msg)\n if (cudh.get_num_cuda_devices() == 0):\n raise RuntimeError(\"no usable CUDA-enabled GPU detected\")\n else:\n raise RuntimeError(\"unsupported worker type requested: \" + str(worker_type))\n\n if (histoparam['general']['verbose']):\n print(util.SEP)\n if (worker_type == \"cpu\"):\n threads_str = \"(%d threads)\" % histoparam['cpu']['threads']\n else:\n threads_str = \"\"\n print(\" %s %s: worker started %s\" % (util.timeStamp(dateAndTime=True), worker_str, threads_str))\n print(util.SEP)\n notify_master = False\n icount = 0\n termination_msg = \"\"\n t1 = time.time()\n\n try:\n while True:\n work_item = taskQueue.get()\n if work_item in [\"done\", \"stop\"]:\n resultQueue.put(work_item)\n taskQueue.task_done()\n termination_msg = \"(\" + work_item + \")\"\n break\n else:\n t2 = time.time()\n wait_time = t2 - t1\n #\n frm = work_item[0]\n species_Crds = []\n for el in frm.get_keys(base.loc_coordinates):\n coord_set = frm.get_data(base.loc_coordinates + '/' + el)\n species_Crds.append(coord_set)\n bap = work_item[1]\n #\n histogram_scale_factors = frm.get_data(base.loc_histogram_scale_factors)\n histogram_mask = frm.get_data(base.loc_histogram_mask)\n periodic_box = frm.get_data(base.loc_dimensions)\n #\n if (histoparam['general']['verbose']):\n print(\" %s %s: processing frame %d ...\" % (util.timeStamp(t0=t0), worker_str, frm.i))\n #\n try:\n if (worker_type == \"cpu\"):\n if (histoparam['cpu']['module'] == 'pydh'):\n histograms = pydh.histograms(species_Crds, r_max, n_bins,\n histoparam['cpu']['precision'],\n histoparam['cpu']['threads'],\n scale_factors=histogram_scale_factors,\n mask_array=histogram_mask,\n check_input=histoparam['cpu']['check_input'],\n box=periodic_box)\n elif (histoparam['cpu']['module'] == 'dist'):\n histograms = dist.histograms(species_Crds, r_max, n_bins)\n else:\n raise RuntimeError(\"unsupported CPU histogram kernel requested: \" +\n str(histoparam['cpu']['module']))\n elif (worker_type == \"gpu\"):\n histograms = cudh.histograms(species_Crds, r_max, n_bins,\n histoparam['gpu']['precision'],\n gpu_id=worker_id,\n scale_factors=histogram_scale_factors,\n mask_array=histogram_mask,\n check_input=histoparam['gpu']['check_input'],\n box=periodic_box)\n else:\n raise RuntimeError(\"unsupported worker type requested: \" + str(worker_type))\n except ValueError as error:\n print(\" %s %s: value error: pair distance > r_max\" % (util.timeStamp(t0=t0), worker_str))\n notify_master = True\n break\n except RuntimeError as error:\n print(\" %s %s: runtime error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n break\n except Exception as error:\n print(\" %s %s: general error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n break\n #\n t1 = time.time()\n comp_time = t1 - t2\n #\n # --- temporarily pack the 2D histograms array into the Container instance\n frm.put_data('tmp/histograms', histograms)\n # --- delete the coordinate data\n frm.del_data(base.loc_coordinates)\n #\n result_item = (frm, comp_time, wait_time, worker_type, bap)\n #\n resultQueue.put(result_item)\n #\n icount += 1\n if (icount % histoparam['output']['flush_interval'] == 0):\n sys.stdout.flush()\n taskQueue.task_done()\n except Exception as error:\n print(\" %s %s: general error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n if notify_master:\n print(\" %s %s: sending shutdown signal to master process\" % (util.timeStamp(t0=t0), worker_str))\n os.kill(os.getppid(), signal.SIGUSR1)\n if (histoparam['general']['verbose']):\n print(util.SEP)\n print(\" %s %s: shutting down %s\" % (util.timeStamp(t0=t0), worker_str, termination_msg))\n print(util.SEP)\n sys.stdout.flush()\n sys.exit(0)", "def run(self, func, args):\n num_of_args_start = len(args)\n if len(args) == 0:\n return []\n args = self.check_and_format_args(args)\n\n size = self.COMM.Get_size()\n\n # broadcast function to worker processors\n self.COMM.bcast(func, root=0)\n\n # chunkify the argument list\n args_chunk = self._split(args, size)\n\n # scatter argument chunks to workers\n args_chunk = self.COMM.scatter(args_chunk, root=0)\n\n if type(args_chunk) != list:\n raise Exception(\"args_chunk needs to be a list\")\n\n # perform the calculation and get results\n result_chunk = [func(*arg) for arg in args_chunk]\n sys.stdout.flush()\n\n result_chunk = self.COMM.gather(result_chunk, root=0)\n\n if type(result_chunk) != list:\n raise Exception(\"result_chunk needs to be a list\")\n\n # group results\n results = self._join(result_chunk)\n\n if len(results) != num_of_args_start:\n results = [x for x in results if type(x) != type(self.Empty_object)]\n results = flatten_list(results)\n\n if type(results) != list:\n raise Exception(\"results needs to be a list\")\n\n results = [x for x in results if type(x) != type(self.Empty_object)]\n sys.stdout.flush()\n return results", "def run_parallel(dict,mpdclass,fieldname,fielderr,fieldlim,Nk,NLstar,kvalall,Lstarall,selfctval,errdist='normal',\n verbose=True,dummyval=False):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def worker(mpdclass,fieldname,fielderr,fieldlim,Nk,NLstar,kvalall,Lstarall,selfctval,dummyval,return_dict):\n \"\"\"\n Multiprocessing worker function\n \"\"\"\n fieldtable = singlefield(mpdclass,fieldname,fielderr,fieldlim,Nk,NLstar,kvalall,Lstarall,\n selfctval,errdist=errdist,dummyval=dummyval,verbose=verbose)\n return_dict[fieldname+'lookup'] = fieldtable\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n if verbose: print ' ---- Starting multiprocess run of field look-up tables: ---- '\n tstart = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n Nfields = len(fieldname)\n\n mngr = multiprocessing.Manager() # initialize Manager too kepp track of worker function output\n return_dict = mngr.dict() # define Manager dictionar to store output from Worker function in\n jobs = []\n\n for ff in xrange(Nfields):\n job = multiprocessing.Process(target=worker,\n args=(mpdclass,fieldname[ff],fielderr[ff],fieldlim[ff],Nk,NLstar,\n kvalall,Lstarall,selfctval,dummyval,return_dict),name=fieldname[ff])\n\n jobs.append(job)\n job.start()\n #job.join() # wait until job has finished\n\n for job in jobs:\n job.join()\n\n tend = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if verbose:\n print '\\n ---- The parallel_run finished running the jobs for all fields ----'\n print ' Start : '+tstart\n print ' End : '+tend\n print ' Exitcode = 0 : job produced no error '\n print ' Exitcode > 0 : job had an error, and exited with that code (signal.SIGTERM)'\n print ' exitcode < 0 : job was killed with a signal of -1 * exitcode (signal.SIGTERM)'\n\n for job in jobs:\n print ' - The job running field ',job.name,' exited with exitcode: ',job.exitcode\n\n\n if verbose: print ' - Adding output from parallelized run to dictionary'\n\n for key in return_dict.keys():\n dict[key] = return_dict[key] # filling dictionary\n\n return dict", "def _reducer(self, input, output, input_keys, worker_number, chunksize):\n print(\"Thread {0} is working\".format(worker_number))\n id_start = worker_number * chunksize\n id_end = min((worker_number + 1) * chunksize, len(input_keys))\n print(\"It processes keys: \", input_keys[id_start: id_end])\n for key in input_keys[id_start:id_end]:\n if key in output:\n self.the_reducer(key, input[key], output)\n else:\n output[key] = 0\n self.the_reducer(key, input[key], output)", "def pooling(fct,lst,nb_pool=10):\n p = Pool(nb_pool)\n infos = p.map(fct,lst)\n p.terminate()\n p.join()\n return infos", "def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The worker function, invoked in a process. 'nums' is a list of numbers to factor. The results are placed in a dictionary that's pushed to a queue.
def worker(nums, out_q): outdict = {} print(threading.current_thread().name) print ("pid:", os.getpid()) print ("data size:", nums) for n in nums: outdict[n] = factorize_naive(n) out_q.put(outdict)
[ "def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)", "def find_num_factors(number, pause=0, num_procs=1, num_threads=1):\n def find_factors(number, queue, pause=0, interval=1, start=1):\n factors = 0\n for i in range(start, number, interval):\n if number % i == 0:\n factors += 1\n # This sleep is meant to simulate possible input / output, such as reading data from a disk.\n # The point is that inactivity causes a thread to give up the GIL, allowing another thread to do some work.\n time.sleep(pause)\n queue.put(factors)\n return factors\n\n\n # Turns out getting return values from threads isn't straight forward.\n # A queue is 'thread safe'. In fact, multiprocessing has an implementation\n # good for both threads and processes.\n if num_procs > 1:\n q = mp.Queue() # Special multithreaded implementation of queues.\n # Doesn't seem to be safe with threads from threading module.\n else:\n q = queue.Queue()\n factors = 0\n # If we aren't using threads or multi processes ...\n if num_procs == 1 and num_threads == 1:\n find_factors(number, q, pause=pause)\n\n # Using multiple threads ...\n elif num_threads > 1:\n threads = []\n for i in range(num_threads):\n threads.append(threading.Thread(target=find_factors,\n args=(number, q),\n kwargs={'pause':pause, 'start':i + 1, 'interval':num_threads},\n ))\n [i.start() for i in threads]\n [i.join() for i in threads]\n\n elif num_threads == 1 and num_procs > 1:\n # Code something with the multiprocessing module\n # Turns out the multiprocessing module has an extremely similar api to the threading module\n procs = []\n for i in range(num_procs):\n procs.append(mp.Process(target=find_factors,\n args=(number, q),\n kwargs={'pause':pause, 'start':i + 1, 'interval':num_procs},\n ))\n [i.start() for i in procs]\n [i.join() for i in procs]\n\n\n else:\n print('Threads > 1 and processes > 1 at the same time is not supported')\n return\n\n while not q.empty():\n factors += q.get()\n\n return factors", "def __init__ (self, *funcs_workers):\n self.numpools = len(funcs_workers)\n self.numworkerslist = []\n self.queues = [Queue() for _ in xrange(self.numpools+1)]\n for i, (func, numworkers) in enumerate(funcs_workers):\n self.numworkerslist.append(numworkers)\n for _ in xrange(numworkers):\n Process(target=worker, args=(\n func, self.queues[i], self.queues[i+1]\n )).start()", "def _compute(histoparam, worker_id, worker_type, taskQueue, resultQueue, r_max, n_bins, t0):\n\n worker_str = \"%s worker %02d\" % (worker_type, worker_id)\n\n if (worker_type == \"cpu\"):\n if (histoparam['cpu']['module'] == 'pydh'):\n from .kernel import pydh\n if not pydh.have_c_pydh:\n from .kernel import common\n raise RuntimeError(common.import_pydh_error_msg)\n elif (histoparam['cpu']['module'] == 'dist'):\n from .kernel import dist\n else:\n raise RuntimeError(\"unsupported CPU histogram kernel requested: \" + str(histoparam['cpu']['module']))\n # handle NUMA pinning for CPU kernels and special cases only\n if (histoparam['general']['numa_aware']):\n numa_topology = util.get_numa_domains()\n n_numa_domains = len(numa_topology)\n if (n_numa_domains > 0) and (histoparam['cpu']['workers'] % n_numa_domains == 0):\n numa_id = worker_id % n_numa_domains\n stat = util.set_numa_domain(numa_id, numa_topology)\n if (stat and histoparam['general']['verbose']):\n print(\" %s %s: pinned to NUMA domain %d\" % (util.timeStamp(t0=t0), worker_str, numa_id))\n elif (worker_type == \"gpu\"):\n from .kernel import cudh\n if not cudh.have_c_cudh:\n from .kernel import common\n raise RuntimeError(common.import_cudh_error_msg)\n if (cudh.get_num_cuda_devices() == 0):\n raise RuntimeError(\"no usable CUDA-enabled GPU detected\")\n else:\n raise RuntimeError(\"unsupported worker type requested: \" + str(worker_type))\n\n if (histoparam['general']['verbose']):\n print(util.SEP)\n if (worker_type == \"cpu\"):\n threads_str = \"(%d threads)\" % histoparam['cpu']['threads']\n else:\n threads_str = \"\"\n print(\" %s %s: worker started %s\" % (util.timeStamp(dateAndTime=True), worker_str, threads_str))\n print(util.SEP)\n notify_master = False\n icount = 0\n termination_msg = \"\"\n t1 = time.time()\n\n try:\n while True:\n work_item = taskQueue.get()\n if work_item in [\"done\", \"stop\"]:\n resultQueue.put(work_item)\n taskQueue.task_done()\n termination_msg = \"(\" + work_item + \")\"\n break\n else:\n t2 = time.time()\n wait_time = t2 - t1\n #\n frm = work_item[0]\n species_Crds = []\n for el in frm.get_keys(base.loc_coordinates):\n coord_set = frm.get_data(base.loc_coordinates + '/' + el)\n species_Crds.append(coord_set)\n bap = work_item[1]\n #\n histogram_scale_factors = frm.get_data(base.loc_histogram_scale_factors)\n histogram_mask = frm.get_data(base.loc_histogram_mask)\n periodic_box = frm.get_data(base.loc_dimensions)\n #\n if (histoparam['general']['verbose']):\n print(\" %s %s: processing frame %d ...\" % (util.timeStamp(t0=t0), worker_str, frm.i))\n #\n try:\n if (worker_type == \"cpu\"):\n if (histoparam['cpu']['module'] == 'pydh'):\n histograms = pydh.histograms(species_Crds, r_max, n_bins,\n histoparam['cpu']['precision'],\n histoparam['cpu']['threads'],\n scale_factors=histogram_scale_factors,\n mask_array=histogram_mask,\n check_input=histoparam['cpu']['check_input'],\n box=periodic_box)\n elif (histoparam['cpu']['module'] == 'dist'):\n histograms = dist.histograms(species_Crds, r_max, n_bins)\n else:\n raise RuntimeError(\"unsupported CPU histogram kernel requested: \" +\n str(histoparam['cpu']['module']))\n elif (worker_type == \"gpu\"):\n histograms = cudh.histograms(species_Crds, r_max, n_bins,\n histoparam['gpu']['precision'],\n gpu_id=worker_id,\n scale_factors=histogram_scale_factors,\n mask_array=histogram_mask,\n check_input=histoparam['gpu']['check_input'],\n box=periodic_box)\n else:\n raise RuntimeError(\"unsupported worker type requested: \" + str(worker_type))\n except ValueError as error:\n print(\" %s %s: value error: pair distance > r_max\" % (util.timeStamp(t0=t0), worker_str))\n notify_master = True\n break\n except RuntimeError as error:\n print(\" %s %s: runtime error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n break\n except Exception as error:\n print(\" %s %s: general error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n break\n #\n t1 = time.time()\n comp_time = t1 - t2\n #\n # --- temporarily pack the 2D histograms array into the Container instance\n frm.put_data('tmp/histograms', histograms)\n # --- delete the coordinate data\n frm.del_data(base.loc_coordinates)\n #\n result_item = (frm, comp_time, wait_time, worker_type, bap)\n #\n resultQueue.put(result_item)\n #\n icount += 1\n if (icount % histoparam['output']['flush_interval'] == 0):\n sys.stdout.flush()\n taskQueue.task_done()\n except Exception as error:\n print(\" %s %s: general error: %s\" % (util.timeStamp(t0=t0), worker_str, error.message))\n notify_master = True\n if notify_master:\n print(\" %s %s: sending shutdown signal to master process\" % (util.timeStamp(t0=t0), worker_str))\n os.kill(os.getppid(), signal.SIGUSR1)\n if (histoparam['general']['verbose']):\n print(util.SEP)\n print(\" %s %s: shutting down %s\" % (util.timeStamp(t0=t0), worker_str, termination_msg))\n print(util.SEP)\n sys.stdout.flush()\n sys.exit(0)", "def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)", "def calc_pi_workers(total, workers=None): #3\n if not workers:\n workers = cpu_count() #4\n min_n = total // workers\n counters = [min_n] * workers\n reminder = total % workers\n for count in range(reminder):\n counters[count] += 1\n pool = Pool(processes=workers) #5\n results = [pool.apply_async(count_inside, (counter,))\n for counter in counters] #6\n inside_count = sum(result.get() for result in results) #7\n return 4 * inside_count / float(total) #8", "def foo_job():\n add_one(multiply_two(add_nums(return_one(), return_two())))", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def _internal_worker_process(args: typing.Tuple[Queue, Queue, Queue, typing.Callable]) -> None:\n arguments_queue: Queue = args[0]\n result_queue: Queue = args[1]\n target: typing.Callable = args[2]\n log_queue: Queue = args[3]\n log_configurer: typing.Callable = args[4]\n\n # Configure logging\n log_configurer(log_queue)\n\n # _logger.debug(\"worker process started {}\".format(current_process().name))\n # print(f\"[INFO] {current_process().name} > worker process started\")\n _logger.info(\"worker process started\")\n _current_work_pids.append(os.getpid())\n counter: int = 0\n error_count: int = 0\n while True:\n if error_count > 3:\n break\n try:\n # wait until a new job is available\n # print(f\"[INFO] {current_process().name} > waiting on new jobs\")\n _logger.info(\"waiting on new jobs\")\n kwargs = arguments_queue.get(block=True)\n _logger.info(\"copying\")\n kwargs_copy = copy.copy(kwargs)\n # execute the job\n try:\n # TODO add timelimit to single work in the worker\n # print(f\"[INFO] {current_process().name} > executing job\")\n result = target(**kwargs)\n # assert hasattr(result['fitted_pipeline'], 'runtime'), \\\n # '[DJM] Eval does not have runtime'\n except:\n _logger.exception(\n f'Target evaluation failed {hash(str(kwargs))}', exc_info=True)\n # print(f'[INFO] {current_process().name} > Target evaluation failed {hash(str(kwargs))}')\n # traceback.print_exc()\n # _logger.error(traceback.format_exc())\n result = None\n\n # push the results\n if result is not None:\n result_simplified = result.copy()\n if \"ensemble_tunning_result\" in result:\n result_simplified.pop(\"ensemble_tunning_result\")\n\n _logger.info(f\"Pushing Results: {result['id'] if result and 'id' in result else 'NONE'}\")\n _logger.debug(f\"Pushing Results={result} kwargs={kwargs}\")\n\n try:\n result_queue.put((kwargs, result))\n except BrokenPipeError:\n _logger.exception(f\"Result queue put failed. Broken Pipe.\")\n exit(1)\n except:\n # traceback.print_exc()\n _logger.exception(f\"Result queue put failed.\", exc_info=True)\n _logger.info(f\"Result queue is full: {result_queue.full()}\")\n\n try:\n _logger.info(\"Pushing result None. Maybe Result failed to pickle.\")\n result_queue.put((kwargs_copy, None))\n except:\n # traceback.print_exc()\n # _logger.exception(f\"{current_process().name} > {traceback.format_exc()}\")\n # print(f\"[INFO] {current_process().name} > cannot even push None\")\n _logger.exception(f\"Result queue put failed with empty Result.\", exc_info=True)\n _logger.info(\"Cannot even push None\")\n exit(1)\n\n # exit(1)\n counter += 1\n # print(f\"[INFO] {current_process().name} > is Idle, done {counter} jobs\")\n _logger.info(f\"is Idle, done {counter} jobs\")\n except BrokenPipeError:\n error_count += 1\n print(f\"{current_process().name:17} > Broken Pipe. Error count={error_count}\")\n _logger.exception(f\"Broken Pipe. Error count={error_count}\")\n except Exception:\n error_count += 1\n print(f\"{current_process().name:17} > Unexpected Exception. Error count={error_count}\")\n _logger.exception(f\"Unexpected Exception. Error count={error_count}\", exc_info=True)\n print(f\"{current_process().name:17} > Worker EXITING\")\n _logger.warning('Worker EXITING')", "def _process_worker(call_queue, result_queue, shutdown):\n while True:\n try:\n call_item = call_queue.get(block=True, timeout=0.1)\n except queue.Empty:\n if shutdown.is_set():\n return\n else:\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException:\n e = sys.exc_info()[1]\n result_queue.put(_ResultItem(call_item.work_id,\n exception=e))\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue", "def __call__(self, inputs, num_workers, verbose=False):\n print(\"Map is running...\")\n map_responses = self.map_parallel(inputs, num_workers, verbose)\n if verbose:\n print(\"Map response\")\n print(\"{\" + \"\\n\".join(\"{}: {}\".format(k, v) for k, v in map_responses.items()) + \"}\")\n print(\"Map is finished.\")\n print(\"Reduce is running...\")\n reduced_values = self.reduce_parallel(map_responses, num_workers, verbose)\n print(\"Reduce is finished.\")\n return reduced_values", "def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)", "def worker(input, output):\n for args in iter(input.get, 'STOP'):\n result = generate_list(*args)\n output.put( (result, current_process().name, args) )", "def main(config):\n all_procs = []\n result_q = mp.Queue()\n for seed in config[\"seeds\"]:\n config[\"seed\"] = seed\n p = mp.Process(target=run, args=(config, result_q))\n p.start()\n all_procs.append(p)\n\n for p in all_procs:\n p.join()\n\n all_returns = [result_q.get() for p in all_procs]\n mean_per_restart = np.mean(all_returns, axis=1)\n mean, std = np.mean(mean_per_restart), np.std(mean_per_restart)\n\n # Return the negative since we're minimizing the function\n # .. the metric minimized is suggested from Duan et al. (2016)\n return -(mean - std)", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def worker1() -> None:\n x = 10\n while x > 0:\n logging.info('Info from Process1 {0}'.format(x))\n time.sleep(0.25)\n x -= 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge a sequence of operations into a crossproduct tree.
def merge(from_args): assert len(from_args) > 0 def cross(x, y): return algebra.CrossProduct(x, y) from_ops = from_args.values() op = reduce(cross, from_ops) return (op, __calculate_offsets(from_args))
[ "def cross(self, *args):\n tmp = self\n for arg in args:\n tmp = _SetProduct(tmp, arg)\n return tmp", "def build_operations(self):\n pipeline_steps = []\n\n del_operation = self.build_delete_step(self.configs_.get('delete', []))\n pipeline_steps.append(('delete', del_operation))\n\n add_configs = self.configs_.get('add', [])\n for i in range(0, len(add_configs)):\n add_operation = self.build_add_step(add_configs[i])\n pipeline_steps.append(('add_'+str(i), add_operation))\n\n self.pipeline_ = FeatureUnion(pipeline_steps)", "def all_ops(ops=None, isolate=False):\n if ops is None:\n ops = []\n try:\n all_ops = Op.get_all_ops()\n all_ops.append(ops)\n yield (ops)\n finally:\n all_ops.pop()\n parent = all_ops[-1]\n if not isolate and parent is not None:\n parent.extend(ops)", "def reduce_operations(expression, format):\n\n # Get formats\n add = format[\"add\"]([\"\", \"\"])\n mult = format[\"multiply\"]([\"\", \"\"])\n group = format[\"grouping\"](\"\")\n\n # Be sure that we have an expanded expression\n expression = expand_operations(expression, format)\n\n # Group variables to possibly reduce complexity\n expression = group_vars(expression, format)\n\n # Get variables and products\n prods, variables = get_simple_variables(expression, format)\n\n # Get the variables for which we can reduce the expression\n max_vars = reduction_possible(variables)\n new_prods = []\n no_mult = []\n max_vars.sort()\n\n # If we have variables that can be moved outside \n if max_vars:\n for p in prods:\n # Get the list of variables in current product\n li = split_expression(p, format, mult)\n li.sort()\n\n # If the list of products is the same as what we intend of moving\n # outside the parenthesis, leave it\n # (because x + x*x + x*y should be x + (x + y)*x NOT (1.0 + x + y)*x)\n if li == max_vars:\n no_mult.append(p)\n continue\n else:\n # Get list of all variables from max_vars that are in li\n indices = [i for i in max_vars if i in li]\n # If not all were present add to list of terms that shouldn't be\n # multiplied with variables and continue\n if indices != max_vars:\n no_mult.append(p)\n continue\n\n # Remove variables that we are moving outside\n for v in max_vars:\n li.remove(v)\n\n # Add to list of products\n p = mult.join(li)\n new_prods.append(p)\n\n # Sort lists\n no_mult.sort()\n new_prods.sort()\n else:\n # No reduction possible\n return expression\n\n # Recursively reduce sums with and without reduced variable\n new_prods = add.join(new_prods)\n if new_prods:\n new_prods = reduce_operations(new_prods, format)\n if no_mult:\n no_mult = [reduce_operations(add.join(no_mult), format)]\n\n # Group new products if we have a sum\n g = new_prods\n len_new_prods = len(split_expression(new_prods, format, add))\n if len_new_prods > 1:\n g = format[\"grouping\"](new_prods)\n\n # The new expression is the sum of terms that couldn't be reduced and terms\n # that could be reduced multiplied by the reduction e.g.,\n # expr = z + (x + y)*x\n new_expression = add.join(no_mult + [mult.join([g, mult.join(max_vars)])])\n\n return new_expression", "def add_operations(self, operations):\n for op in operations:\n self.add_operation(op)", "def cross_product(*inputs):\n return (list(itertools.product(*inputs)))", "def _combine_operations(\n self, operations: typing.Sequence[qop.QuantumOperation]\n ) -> qop.QuantumOperation:\n op1, op2, op3 = operations[0], operations[1], operations[2]\n\n new_operation = copy.copy(numpy.random.choice(operations, p=self._r))\n control_number = len(new_operation.controls)\n new_operation.controls = []\n new_operation.target = numpy.random.choice(\n [op1.target, op2.target, op3.target], p=self._r\n )\n\n while len(new_operation.controls) < control_number:\n ctrl = numpy.random.randint(0, self._qubit_number)\n if ctrl != new_operation.target and ctrl not in new_operation.controls:\n new_operation.controls.append(ctrl)\n\n if new_operation.is_parametrised():\n raise NotImplementedError(\n \"Parametrised operations are not supported for the moment.\"\n )\n return new_operation", "def _operation_traverse(self, op, op_f, aggregate_f, combine_f): # noqa\n # apply op_f for each operation\n op_res = op_f(op)\n if len(op.children) == 0:\n return op_res # no children return\n else:\n # apply _operation_traverse recursively\n children = [\n self._operation_traverse(child, op_f, aggregate_f, combine_f)\n for child in op.children\n ]\n # combine the operation result with the children aggregated result\n return combine_f(op_res, aggregate_f(children))", "def extract_operator_products(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operator_products(arg, independent=independent)\n\n elif isinstance(e, Mul):\n c, o = split_coeff_operator(e)\n if o != 1:\n ops.append(o)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n no_ops = []\n for op in ops:\n no_op = normal_ordered_form(op.expand(), independent=independent)\n if isinstance(no_op, (Mul, Operator, Pow)):\n no_ops.append(no_op)\n elif isinstance(no_op, Add):\n for sub_no_op in extract_operator_products(no_op, independent=independent):\n no_ops.append(sub_no_op)\n else:\n raise ValueError(\"Unsupported type in loop over ops: %s: %s\" %\n (type(no_op), no_op))\n\n return list(set(no_ops))", "def _combine_expr(left_set: list, right_set: list):\n for left_expr, right_expr in itertools.product(left_set, right_set):\n yield Node(Node.NODE_TYPE_OPERATOR, '+', left_expr, right_expr)\n\n yield Node(Node.NODE_TYPE_OPERATOR, '*', left_expr, right_expr)\n\n if left_expr.value >= right_expr.value:\n yield Node(Node.NODE_TYPE_OPERATOR, '-', left_expr, right_expr)\n\n if right_expr.value != 0:\n yield Node(Node.NODE_TYPE_OPERATOR, '/', left_expr, right_expr)", "def compose_nodes(nodes, retrieve=False):\n l = list(nodes)\n tree = []\n\n if not isinstance(l[0], Iteration):\n # Nothing to compose\n body = flatten(l)\n body = List(body=body) if len(body) > 1 else body[0]\n else:\n body = l.pop(-1)\n while l:\n handle = l.pop(-1)\n body = handle._rebuild(body, **handle.args_frozen)\n tree.append(body)\n\n if retrieve is True:\n tree = list(reversed(tree))\n return body, tree\n else:\n return body", "def build_mutations(ops, to_ops):\n # note: mutations to self are excluded.\n # None is a special mutation meaning to delete the operator!\n # 1) when to_op is None isinstance(from_op, None) will blow up because\n # the second parameter needs to be a class\n # 2) when to_op != None we do the isinstance() check to figure out\n # whether or not to include the operator in the list of possible\n # mutations\n #\n # The `if to_op is None or isinstance(from_op, to_op)` expression handles\n # both scenarios very elegantly. First we handle 1) and if this is True\n # the rest of the expression is not evaluated and None is returned. Else\n # we're in scenario 2) where the left part of the expression is False so\n # the right part is evaluated. Since the left part of the expression has\n # confirmed that to_op != None then we're confident that the isinstance()\n # method will always work.\n return [(idx, to_op)\n for idx, from_op in enumerate(ops)\n for to_op in to_ops(from_op)\n if to_op is None or not isinstance(from_op, to_op)]", "def as_ops(xs):\n return tuple(as_op(x) for x in xs)", "def compose_children(self):\n for l_symbol, l_info in self.matrix[self.i][self.k].items():\n l_rhs = Nonterminal(l_symbol)\n for r_symbol, r_info in self.matrix[self.k][self.j].items():\n r_rhs = Nonterminal(r_symbol)\n\n # check the subtrees in [i][k] and [k][j] to see if you can make a valid rhs\n potential_rules = [p for p in self.grammar.productions(rhs=l_rhs) if p.rhs()[1] == r_rhs]\n for potential_rule in sorted(potential_rules, key=lambda x: x.prob()):\n new_lhs = potential_rule.lhs().symbol()\n new_tree = Tree(new_lhs, [l_info[1], r_info[1]])\n new_prob = log(potential_rule.prob()) + l_info[0] + r_info[0]\n if new_lhs not in self.matrix[self.i][self.j] or new_prob > self.matrix[self.i][self.j][new_lhs][0]:\n self.matrix[self.i][self.j][new_lhs] = (new_prob, new_tree)", "def arithmetics(tree):\n\n def basic_function(func_name, args):\n expr = pmml.Apply(function=func_name)\n for a in args:\n expr.append(a)\n return expr\n\n def mod_function(args):\n expr = pmml.Apply(function='-')\n expr.append(args[0])\n mul = pmml.Apply(function='*')\n mul.append(args[1])\n floor = pmml.Apply(function='floor')\n mul.append(floor)\n div = pmml.Apply(function='/')\n floor.append(div)\n div.append(args[0])\n div.append(args[1])\n return expr\n\n # TODO: test me\n def greedy_evaluation(node):\n if isinstance(node, str):\n # field reference\n return (lambda df: df[node]), pmml.FieldRef(field=node)\n elif isinstance(node, (tuple, list)):\n # eval arguments\n args = map(greedy_evaluation, node[1:])\n functions = {\n '*': lambda df: np.multiply(*[_[0](df) for _ in args]),\n '-': lambda df: np.subtract(*[_[0](df) for _ in args]),\n '+': lambda df: np.add(*[_[0](df) for _ in args]),\n '/': lambda df: np.divide(*[_[0](df) for _ in args]),\n '%': lambda df: np.mod(*[_[0](df) for _ in args]),\n }\n assert isinstance(node[0], str), 'First element should be a code of operation'\n assert node[0] in functions, 'Unknown function code {}. Supported codes: {}'.format(node[0], functions.keys())\n expr = {\n '*': partial(basic_function, '*'),\n '-': partial(basic_function, '-'),\n '+': partial(basic_function, '+'),\n '/': partial(basic_function, '/'),\n '%': mod_function\n }.get(node[0])([a[1] for a in args])\n func = functions[node[0]]\n return func, expr\n else:\n # numeric terminal\n return lambda df: node, pmml.Constant(node, dataType='double')\n\n function, transformation = greedy_evaluation(tree)\n\n return {\n DerivedFeatureTransformations.TRANSFORMATION: transformation,\n DerivedFeatureTransformations.FUNCTION: function\n }", "def flatten(self, operands):\n if not self.is_associative(): return operands\n o = []\n for a in operands:\n if not isinstance(a, Expr):\n o.append(a)\n continue\n if a.function != self.function:\n o.append(a)\n continue\n else:\n o.extend(a.operands)\n return o", "def cartesian_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_edges_cross_nodes(G, H))\n GH.add_edges_from(_nodes_cross_edges(G, H))\n return GH", "def expand(expr, *args, **kwargs):\n def pop(s):\n if s:\n return [s.pop()]\n return s\n reduction_rules = [\n distributivity_distribute_multiplication,\n distributivity_distribute_division,\n ]\n return transform(\n expr, reduction_rules=reduction_rules, reduce_plugin=pop,\n multiprocessing=False).pop()", "def run_all(operations=ops):\n for operation in operations:\n run(operation)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting the agent name.
def test_get_agent_name(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "agent.agent_name"], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert result.output == "Agent0\n"
[ "def get_name(self):\n return self.agent_name", "def test_get_agents_names(self):\n pass", "def agent_name(self) -> str:\n return self.identity.name", "def test_get_agent(self):\n pass", "def agentName(self):\n return self.__class__.__name__", "def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")", "def getAgentName(platform, name):\n if not name:\n name = \"osprov_ogfs_agent\"\n platform = getShortName(platform)\n return \"%s-%s\" % (name, platform)", "def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")", "def test_job_story_case_name(self):\n inquiry = Interface()\n inquiry.ask(\"What is this computer\\'s hostname?\")\n inquiry.teach(host_name())\n result = inquiry.ask(\"What is this computer\\'s hostname?\")\n self.assertEqual(result, socket.gethostname())", "def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])", "def get_agent_prop(agent, propname):\n return udcli(\"getAgentProperty -agent '%s' -name '%s'\" % (agent, propname))", "def GetTesterName(self):\n callResult = self._Call(\"GetTesterName\", )\n\n if callResult is None:\n return None\n\n return callResult", "def get_name(self):\n return self.bot_name", "def test_get_similar_agent(self):\n pass", "def robot_name(self):\n return self.__robot_name", "def test_get_full_name(self):\n\t\tself.assertEqual(self.resume.get_full_name(), \"Sharvil Jani\")", "def test_register_agent(self):\n pass", "def GetModernizedTestName(self, arg):\n return arg", "def testbed_name(self): \n return \"C-Lab\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test getting the 'dummy' skill name.
def test_get_skill_name(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "skills.dummy.name"], standalone_mode=False, ) assert result.exit_code == 0 assert result.output == "dummy\n"
[ "def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def sample_name(data):\n return get_sample_attr(data, \"name\")", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def test_get_dummy_sample_name():\n # GIVEN a raw sample name from the index file\n raw_sample_name = \"D10 - D710-D504 (TCCGCGAA-GGCTCTGA)\"\n\n # WHEN converting it to a dummy sample name\n dummy_sample_name: str = get_dummy_sample_name(sample_name=raw_sample_name)\n\n # THEN the name had spaces and parentheses replaced by dashes\n assert dummy_sample_name == \"D10---D710-D504--TCCGCGAA-GGCTCTGA-\"", "def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)", "def test_get_full_name(self):\n\t\tself.assertEqual(self.resume.get_full_name(), \"Sharvil Jani\")", "def test_get_name_string_id_not_present(filled_names):\n assert filled_names.get_name_string(3) is None", "def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")", "def random_noun():\n return petname.Name()", "def test_empty_name(self):\n self.data[\"name\"] = None\n self.analyze()\n self.assert_failed(with_errors=True)", "def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def test_verify_profile_name_normal(get_msg, get_cfg):\n msg = get_msg\n cfg = get_cfg\n cfg.data['myProfile'] = {}\n verify_profile_name(msg, cfg)", "def get_skill_name(self, skill_id):\n return self.skill_names.get(skill_id, skill_id)", "def test_get_item_none(self):\n samp = model.Sample(\"s1\", \"desc\", 1, None, 1)\n self.assertIsNone(samp.__getitem__(\"not a key\"))", "def test_getting_animal_name(self):\n self.assertEqual(self.bob.get_name(), \"Bob\")\n self.assertNotEqual(self.bob.get_name(), \"bob\")\n self.assertNotEqual(self.bob.get_name(), \"bill\")", "def test_teams_name_name_exists_get(self):\n pass", "def test_lookup_first_name(self):\n result = self.env.run('phonebook ' + \\\n ('lookup Mary ') + \\\n ('-b %s/phonebook_fixture.pb' % self.prefix))\n expected_output = (\"Mary Anderson 572 932 1921\")\n nose.tools.assert_in(expected_output, result.stdout)", "def mock_username(self, sid):\n return \"aUsername\"", "def test_get_name_string_id_present(filled_names, name, name_id):\n assert filled_names.get_name_string(name_id) == name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the 'get' fails because the path is too short but the root is correct.
def test_too_short_path_but_root_correct(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "agent"], standalone_mode=False ) assert result.exit_code == 1 assert ( result.exception.message == "The path is too short. Please specify a path up to an attribute name." ) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", "skills.dummy"], standalone_mode=False, ) assert result.exit_code == 1 assert ( result.exception.message == "The path is too short. Please specify a path up to an attribute name." )
[ "def test_get_invalid_path(self):\n yield self.start_server()\n url = \"%s/%s\" % (self.url, 'bad_path',)\n resp = yield http_request_full(method='GET', url=url)\n\n self.assertEqual(resp.code, http.NOT_FOUND)", "def test_root_get(self):\n pass", "def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)", "def test_unit_get_by_path(self):\n pass", "def test_get_page_from_path(self):\n # Clearing cache\n get_page_from_path('/', clear_cache=True)\n\n # Check error handling\n self.assertRaisesMessage(\n TypeError,\n \"Path must be string\",\n get_page_from_path,\n [\"/\", \"/hem\"]\n )\n\n # Check root page lookups\n self.assertEqual(self.root, get_page_from_path('/'))\n self.assertEqual(self.root, get_page_from_path(''))\n\n # Check correct lookup for sub-page page\n self.assertEqual(self.child_1, get_page_from_path('/' + self.child_1.slug))\n\n # Check that cache works as intended (only one lookup when cached, multiple else).\n with self.assertRaises(self.failureException):\n self.assertNumQueries(1, get_page_from_path, '', {'clear_cache': True})\n self.assertNumQueries(1, get_page_from_path, '')", "def test_adapter_get_bad_path(self, test_sysstatus_adapter):\n bad_path = '/bad/path'\n expected_response = {'error': 'Invalid path: {}'.format(bad_path)}\n\n response = test_sysstatus_adapter.adapter.get(bad_path, test_sysstatus_adapter.request)\n\n assert response.data == expected_response\n assert response.status_code == 400", "def testCheckAbsolutePath(self):\n p=\"/tmp\"\n self.assertRaises(ValidationError, checkrelpath, p)", "def testCheckRelativePathWithNonObviousRefToEnclosingDir(self):\n p=\"some/../../relative/./path\"\n self.assertRaises(ValidationError, checkrelpath, p)", "def test_validate_value_fail_acces_denied_to_root(self):\n path_parm_serializer = PathParameterSerializer(user=self.user)\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\"./\")\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\".\")\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\"/\")", "def test_path_too_long(self):\n page1 = self.new_page(content={'slug': 'page1'})\n page2 = self.new_page(content={'slug': 'page2'})\n from basic_cms import urlconf_registry as reg\n reg.register_urlconf('test', 'basic_cms.testproj.documents.urls',\n label='test')\n page2.delegate_to = 'test'\n page1.delegate_to = 'test'\n page1.save()\n page2.save()\n page2.parent = page1\n page2.save()\n\n from basic_cms.testproj.documents.models import Document\n doc = Document(title='doc title 1', text='text', page=page1)\n doc.save()\n\n req = get_request_mock()\n self.set_setting(\"PAGE_HIDE_ROOT_SLUG\", False)\n page1.invalidate()\n page2.invalidate()\n\n def _get_context_page(path):\n return details(req, path, 'en-us')\n self.assertEqual(_get_context_page('/').status_code, 200)\n self.assertEqual(_get_context_page('/page1/').status_code, 200)\n self.assertEqual(_get_context_page('/page1/').status_code, 200)\n self.assertEqual(_get_context_page('/page1/page2').status_code, 301)\n self.assertEqual(_get_context_page('/page1/page2/').status_code, 301)\n self.assertEqual(_get_context_page('/page1/page2/doc-%d' % doc.id\n ).status_code, 301)\n self.assertRaises(Http404, _get_context_page,\n '/page1/page-wrong/doc-%d' % doc.id)\n\n reg.registry = []", "def validate_short_path(short_path):", "def test_validate_value_fail_acces_denied_to_root(self):\n path_parm_serializer = UnextpathParameterSerializer(user=self.user)\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\"./\")\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\".\")\n with self.assertRaises(serializers.ValidationError):\n path_parm_serializer.validate_value(\"/\")", "def test_read_value_from_path_not_exist(tmp_path: Path) -> None:\n with pytest.raises(ValueError):\n read_value_from_path(f\"file://{(tmp_path / 'something.txt').absolute()}\")", "def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass", "def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)", "def test_get_root_paths(self):\n pass", "def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)", "def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')", "def testBadPath(self):\n task = ReadTextCatalogTask()\n badPath = \"does/not/exists.garbage\"\n with self.assertRaises(IOError):\n task.run(badPath)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that getting a nested object in 'dummy' skill fails because path is not valid.
def test_get_fails_when_getting_nested_object(self): with pytest.raises( ClickException, match=r"Attribute `.* for .* config does not exist" ): self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "get", "skills.dummy.non_existing_attribute.dummy", ], standalone_mode=False, catch_exceptions=False, )
[ "def test_get_object_nested_dotted(basic_object, basic_object_value):\n acc = Accessor(getter=\"value.key.key\")\n assert acc.get(basic_object) == \"value\"", "def test_contextpath_getattr_readable():\n assert ContextPath() == ContextPath(\"$$\")\n assert ContextPath().Execution == ContextPath(\"$$.Execution\")\n assert ContextPath().Map.Item.Index == ContextPath(\"$$.Map.Item.Index\")\n assert ContextPath().Execution.Input.foo.bar.baz == ContextPath(\"$$.Execution.Input.foo.bar.baz\")", "def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_access_nested_map_exception(self, nested_map, path, expected):\n self.assertRaises(KeyError)", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def test_unit_get_by_path(self):\n pass", "def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))", "def test_resolve_obj_nested_dict():\n r = resolve('#/a/b/c', t_obj)\n assert r == [1,2,3]", "def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )", "def test_deep_path(self):\n eq_(linked_pathname('hey/thankyou', 'code'),\n [('/code/source', 'code'),\n ('/code/source/hey', 'hey'),\n ('/code/source/hey/thankyou', 'thankyou')])", "def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)", "def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last", "def test_diystage_path_none(self):\n with pytest.raises(ValueError):\n DIYStage(None)", "def get_node(obj, path):\n subobj = obj\n indices = []\n for item in path:\n try:\n subobj = subobj[item]\n except Exception as e:\n indices.append(item)\n subobj, indices = _select(subobj, indices)\n if isinstance(subobj, dict) or (isinstance(subobj, list) and \n subobj and \n isinstance(subobj[0], dict)):\n return Wrapped(obj, path)\n else:\n assert not indices, \"This path does not exist.\"\n return subobj", "def test_load_level_first(self):\n\n self.assertRaises(SerializerReadException, self.s.load_level)", "def test_custom_paths(self):\n hpo_pipeline_result = self._help_test_hpo(\n study_name=\"HPO with custom triples paths\",\n training=NATIONS_TRAIN_PATH, # mock \"custom\" paths\n testing=NATIONS_TEST_PATH,\n validation=NATIONS_VALIDATE_PATH,\n )\n self.assertNotIn(\"dataset\", hpo_pipeline_result.study.user_attrs)\n # Since paths were passed for training, testing, and validation,\n # they should be stored as study-level attributes\n self.assertIn(\"training\", hpo_pipeline_result.study.user_attrs)\n self.assertEqual(str(NATIONS_TRAIN_PATH), hpo_pipeline_result.study.user_attrs[\"training\"])\n self.assertIn(\"testing\", hpo_pipeline_result.study.user_attrs)\n self.assertEqual(str(NATIONS_TEST_PATH), hpo_pipeline_result.study.user_attrs[\"testing\"])\n self.assertIn(\"validation\", hpo_pipeline_result.study.user_attrs)\n self.assertEqual(str(NATIONS_VALIDATE_PATH), hpo_pipeline_result.study.user_attrs[\"validation\"])", "def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_anatomicalstructures_get(self):\n pass", "def testNoPathsRemain(self):\n for directory in self.skinstool.objectValues():\n self.failIf(directory.getDirPath().startswith(\"Products.NuPlone:\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that getting a vendor component with wrong component type raises error.
def test_get_fails_when_getting_vendor_dependency_with_wrong_component_type(self): result = self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "get", "vendor.fetchai.component_type_not_correct.error.non_existing_attribute", ], standalone_mode=False, ) assert result.exit_code == 1 s = "'component_type_not_correct' is not a valid component type. Please use one of ['protocols', 'connections', 'skills', 'contracts']." assert result.exception.message == s
[ "def test_register_component_with_invalid_type():\n\n with pytest.raises(InvalidComponentTypeError):\n component = CoreObject()\n application_services.register_component(component)", "def test_get_component_doesnt_exist(composite):\n ct = composite()\n with pytest.raises(SarasvatiException) as ex:\n ct.get_component(\"no\")\n assert ex.value.message == \"Component 'no' not found for 'MyComposite'\"", "def test_wrong_component(self):\n msg = self._create_message(self.adt_a01)\n unkn_component = Component(version='2.6')\n msg.msh.msh_9.add(unkn_component)\n self.assertRaises(ValidationError, msg.validate, report_file=self.report_file)\n self._test_report_file('ERROR')", "def test_remove_component_invalid():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component_to_remove')", "def test_register_component_with_invalid_name():\n\n with pytest.raises(InvalidComponentNameError):\n component = ComponentWithInvalidNameMock('')\n application_services.register_component(component)", "def test_get_asset_component():\n assetpack = get_test_assetpack()\n asset = ddl.asset_exploration.get_asset(assetpack, \"Component: test.a\")\n assert isinstance(asset, ComponentAsset)", "def test_get_component_with_invalid_custom_key():\n\n component = ComponentWithInvalidCustomKeyMock('component_with_invalid_key')\n custom_component = DuplicateComponentWithInvalidCustomKeyMock('component_with_invalid_key',\n component_custom_key=3000)\n application_services.register_component(component)\n application_services.register_component(custom_component)\n assert application_services.get_component('component_with_invalid_key',\n component_custom_key=999) == component\n\n application_services.remove_component(component.get_id())\n application_services.remove_component(custom_component.get_id())", "def test_register_component_with_invalid_type_only_manager():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyManagerMock()\n application_services.register_component(component)", "def test_invalid_package_type():\n\n err = ErrorBundle()\n err.detected_type = PACKAGE_ANY\n assert conduit.test_conduittoolbar(err) is None\n err.detected_type = PACKAGE_THEME\n assert conduit.test_conduittoolbar(err) is None\n err.detected_type = PACKAGE_SEARCHPROV\n assert conduit.test_conduittoolbar(err) is None", "def test_component_loading(self):\n # we only have two component\n self.assertEqual(1, len(self.component_manager.components))\n self.assertTrue(TestDriver in self.component_manager)\n \n # make sure it is the right one!\n self.assertEqual(self.component, self.component_manager[TestDriver])\n \n def test_not_component_exception():\n \"\"\"\n Raises exception when called\n \"\"\"\n self.component_manager[TestNotLoadableComponant]\n self.assertRaises(core.TicError, test_not_component_exception)\n \n def test_exception_on_init():\n \"\"\"\n TODOC\n \"\"\"\n self.component_manager[TestErrorOnInitComponant]\n self.assertRaises(core.TicError, test_exception_on_init)", "def test_wrong_component(self):\n msg = self._create_message(self.rsp_k21)\n unkn_component = Component()\n msg.msh.msh_9.add(unkn_component)\n self.assertRaises(ValidationError, msg.validate, report_file=self.report_file)\n self._test_report_file('ERROR')", "def test_component_loading_component_exception(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=AEAComponentLoadException(\"Generic exception\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"Package loading error: An error occurred while loading protocol an_author/a_protocol:0.1.0: Generic exception\",\n ):\n load_component_from_config(component_configuration)", "def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)", "def test_vendor_http_error(response_with_error):\n\n logging.debug('First run')\n http_error = HttpError('Foo bar', response_with_error)\n assert isinstance(http_error, HttpError)\n assert str(http_error) == 'Foo bar'\n\n logging.debug('Second run')\n HttpError.VendorType = SAP.BusinessGatewayError\n sap_error = HttpError('Another foo bar', response_with_error)\n assert isinstance(sap_error, SAP.BusinessGatewayError)\n assert str(sap_error) == 'Gateway Error'", "def test_component_loading_module_not_found_error_non_framework_package(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'generic.package'\"),\n ):\n with pytest.raises(ModuleNotFoundError):\n load_component_from_config(component_configuration)", "def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_component_loading_module_not_found_error_framework_package_with_wrong_author(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'packages.some_author'\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author; No AEA package found with author name 'some_author'\",\n ):\n load_component_from_config(component_configuration)", "def test_component_loading_generic_module_not_found_error(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"Package loading error: An error occurred while loading .*: Generic error\"\n ),\n ):\n with pytest.raises(ModuleNotFoundError, match=\"Generic error\"):\n load_component_from_config(component_configuration)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting the agent name.
def test_set_agent_incorrect_value(self): with pytest.raises( ClickException, match="Attribute `not_agent_name` is not allowed to be updated!", ): self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "agent.not_agent_name", "new_name"], standalone_mode=False, catch_exceptions=False, )
[ "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def test_get_agents_names(self):\n pass", "def agent_name(self) -> str:\n return self.identity.name", "def get_name(self):\n return self.agent_name", "def change_name(self):\n if self.agent and self.agent.name != \"Agent\":\n for action in self._actions:\n split = action.description.split()\n if split[0] == \"The\" and split[1] == \"agent\":\n action.description.replace(\"The agent\", self.agent.name)", "def test_get_agent(self):\n pass", "def test_register_agent(self):\n pass", "def agentName(self):\n return self.__class__.__name__", "def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")", "def test_set_name(self):\n test_instance = RosCCClass(\"tIdentifier\")\n\n test_name = \"tName\"\n test_instance.set_name(test_name)\n\n self.assertEqual(test_name, test_instance.name)", "def test_change_name_of_the_devicetrue():", "def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)", "def configure_isystemname(self, context, systemname):\n\n LOG.debug(\"configure_isystemname: sending systemname to agent(s)\")\n rpcapi = agent_rpcapi.AgentAPI()\n rpcapi.configure_isystemname(context, systemname=systemname)\n\n return", "def test_job_story_case_name(self):\n inquiry = Interface()\n inquiry.ask(\"What is this computer\\'s hostname?\")\n inquiry.teach(host_name())\n result = inquiry.ask(\"What is this computer\\'s hostname?\")\n self.assertEqual(result, socket.gethostname())", "def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])", "async def setname(self, ctx, new: str):\n await self.bot.user.edit(username=new)\n logger.info('Bot name set to: {}'.format(new))", "def getAgentName(platform, name):\n if not name:\n name = \"osprov_ogfs_agent\"\n platform = getShortName(platform)\n return \"%s-%s\" % (name, platform)", "def test_change_name_of_the_devicefalse():", "def set_agent_prop(agent, propname, propvalue, secure=False):\n command = \"setAgentProperty -agent '%s' -name '%s' -value '%s'\" % (agent, propname, propvalue)\n if secure:\n command += ' -isSecure true'\n udcli(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting the 'dummy' skill name.
def test_set_skill_name_should_fail(self): result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "skills.dummy.name", "new_dummy_name"], standalone_mode=False, ) assert result.exit_code == 1
[ "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "def test_empty_name(self):\n self.data[\"name\"] = None\n self.analyze()\n self.assert_failed(with_errors=True)", "def test_name_empty_string(self):\r\n self.name = \"\"", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def test_name_false(self):\r\n self.name = False", "def is_dummy_name(self):\n return ida_bytes.has_dummy_name(self.flags)", "def sample_name(data):\n return get_sample_attr(data, \"name\")", "def BossPhaseExcelAddNormalAttackSkillUniqueName(builder, NormalAttackSkillUniqueName):\n return AddNormalAttackSkillUniqueName(builder, NormalAttackSkillUniqueName)", "def test_change_name_of_the_devicefalse():", "def test_change_name_of_the_devicetrue():", "def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def test_get_dummy_sample_name():\n # GIVEN a raw sample name from the index file\n raw_sample_name = \"D10 - D710-D504 (TCCGCGAA-GGCTCTGA)\"\n\n # WHEN converting it to a dummy sample name\n dummy_sample_name: str = get_dummy_sample_name(sample_name=raw_sample_name)\n\n # THEN the name had spaces and parentheses replaced by dashes\n assert dummy_sample_name == \"D10---D710-D504--TCCGCGAA-GGCTCTGA-\"", "def test_verify_profile_name_normal(get_msg, get_cfg):\n msg = get_msg\n cfg = get_cfg\n cfg.data['myProfile'] = {}\n verify_profile_name(msg, cfg)", "def test_create_skill_without_auth(self):\n url = '/api/v1/skills/'\n data = {\n 'category': 1,\n 'description': 'test category'\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)", "def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")", "def set_testname(val):\n global testname\n testname = val\n print()\n print('* Test:', testname)\n print()", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test setting a nested attribute.
def test_set_nested_attribute(self): path = "skills.dummy.behaviours.dummy.args.behaviour_arg_1" new_value = "10" # cause old value is int result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", path, new_value], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", path], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert new_value in result.output
[ "def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_setattr_property(self) -> None:\n obj = SampleConfigComponentDefinition.parse_obj({\"name\": \"test\"})\n with pytest.raises(AttributeError):\n obj.no_set = \"new value\" # type: ignore\n assert obj.can_set == \"test.can_set\"\n obj.can_set = \"new\"\n assert obj.can_set == \"new.can_set\"", "def test_update_attribute_data(self):\n pass", "def test_set_attribute(self):\n measurement = self.set_entity_attribute()\n entity = measurement[\"sources\"][0][\"entity_user_data\"][\"entity_key\"]\n self.assertEqual({\"attribute\": \"value\"}, entity)\n self.assertEqual(\n {\n \"description\": \"John Doe changed the attribute of 'entity title/foo/None' from '' to 'value'.\",\n \"email\": JOHN[\"email\"],\n \"uuids\": [REPORT_ID, SUBJECT_ID, METRIC_ID, SOURCE_ID],\n },\n measurement[\"delta\"],\n )", "def test_documented_attributes_writable(hlwm, clsname, object_path, json_doc):\n object_path = object_path(hlwm)\n for _, attr in json_doc['objects'][clsname]['attributes'].items():\n print(\"checking attribute {}::{}\".format(clsname, attr['cpp_name']))\n full_attr_path = '{}.{}'.format(object_path, attr['name']).lstrip('.')\n value = hlwm.get_attr(full_attr_path)\n if value == 'default':\n continue\n if attr['writable']:\n hlwm.call(['set_attr', full_attr_path, value])\n else:\n hlwm.call_xfail(['set_attr', full_attr_path, value]) \\\n .expect_stderr('attribute is read-only')", "def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')", "def setattr_nested(obj, attributes, value):\n pre, _, post = attributes.rpartition(\".\")\n setattr(getattr_nested(obj, pre) if pre else obj, post, value)", "def test_setAttribute():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute();\n x.setAttribute(\"foo\");\n x.setAttribute(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute(\"onfoo\", \"bar\");\n \"\"\").failed()", "def test_setter(self):\n setter = self.analyzer.get_object(['settable'])\n assert isinstance(setter, Attribute)\n assert setter.type == 'string'", "def test_rw_callable_nested_param_set(self, test_rw_tree):\n new_float_value = test_rw_tree.nested_rw_param + 2.3456\n test_rw_tree.rw_callable_tree.set('branch/nestedRwParam', new_float_value)\n assert test_rw_tree.nested_rw_param == new_float_value", "def set_nested_attr(__obj: object, __name: str, __value: Any):\n pre, _, post = __name.rpartition('.')\n return setattr(get_nested_attr(__obj, pre) if pre else __obj, post, __value)", "def test_set_attribute(self, attr1, value1):\n self.assertTrue(self.client.set_attribute(self.app_name, attr1, value1))\n self.assertTrue(self.client.delete_attribute(self.app_name, attr1))", "def test_relativistic_body_setters(attr_to_set, set_value, attr_to_test, expected):\r\n relativistic_body = RelativisticBody(proton, v_over_c=0.1)\r\n setattr(relativistic_body, attr_to_set, set_value)\r\n\r\n actual = getattr(relativistic_body, attr_to_test)\r\n\r\n assert u.isclose(expected, actual, rtol=1e-8), (\r\n f\"When setting {attr_to_set} to {set_value!r} in a \"\r\n f\"RelativisticBody instance, the value of {attr_to_test} was \"\r\n f\"expected to be {expected!r}, but was instead {actual!r}.\"\r\n )", "def test_set_key_attr(self):\n ld = LabelledDict(label='all_objects')\n with self.assertRaises(AttributeError):\n ld.key_attr = 'another_name'", "def set(self, attr_name, value):\n return self.parent._set_spec_attr(spec_constants[attr_name], value, grating_number = self.grating_number)", "def traverse_setter(obj, attribute, value):\n obj.traverse(lambda x: setattr(x, attribute, value))", "def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )", "def test_set_field(self):\n pass", "def test_attribute_access(self):\n cd = ConfigDict()\n\n cd['x'] = 1\n self.assertEquals(cd.x, 1)\n\n cd.y = 2\n self.assertEquals(cd['y'], 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that setting the 'dummy' skill behaviours fails because not a primitive type.
def test_set_fails_when_setting_non_primitive_type(self): with pytest.raises( ClickException, match="Attribute `behaviours` is not allowed to be updated!" ): self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", "skills.dummy.behaviours", "value"], standalone_mode=False, catch_exceptions=False, )
[ "def _dummy(self):\n pass", "def test_categorical_disallow_special_values() -> None:\n json_config = \"\"\"\n {\n \"type\": \"categorical\",\n \"values\": [\"foo\", \"bar\", \"foo\"],\n \"special\": [\"baz\"],\n \"default\": \"foo\"\n }\n \"\"\"\n config = json.loads(json_config)\n with pytest.raises(ValueError):\n Tunable(name='test', config=config)", "def testTypeSingle(self):\n prop = make_prop(kind=bool)\n with self.assertRaises(TypeError):\n prop.interpret(1, {})\n\n self.assertEqual(True, prop.interpret(True, {}))", "def test_numerical_tunable_disallow_null_default(tunable_type: str) -> None:\n with pytest.raises(ValueError):\n Tunable(name=f'test_{tunable_type}', config={\n \"type\": tunable_type,\n \"range\": [0, 10],\n \"default\": None,\n })", "def test_bad_input(self):\n # setting a non-boolean input should throw an AssertionError.\n w1_name = \"w1\"\n w1 = sw.StealthInputWire(w1_name, None)\n self.assertRaises(AssertionError, w1.evaluate)\n w1 = sw.StealthInputWire(w1_name, \"this_is_a_string\")\n self.assertRaises(AssertionError, w1.evaluate)", "def test__generate_wealth_positive(self, *mocks):\n ctx = ContextMock()\n _try_generate_wealth(ctx, \"type\", True)", "def test_nondefault_handtype(self):\n player = Player(HandWithMelds)\n self.assertIsInstance(player, Player)", "def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def test_pluggable_type(self):\n assert True", "def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_badyvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, True, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def test_init_unset(self):\n boolean = primitives.Boolean()\n self.assertEqual(True, boolean.value)", "def test_no_wires(self):\n\n class DummyOp(qml.operation.Operator):\n num_wires = 1\n num_params = 1\n\n with pytest.raises(ValueError, match=\"Must specify the wires\"):\n DummyOp(1.234)", "def test_categorical_tunable_disallow_repeats() -> None:\n with pytest.raises(ValueError):\n Tunable(name='test', config={\n \"type\": \"categorical\",\n \"values\": [\"foo\", \"bar\", \"foo\"],\n \"default\": \"foo\",\n })", "def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])", "def test_default_sound_system(self):\n\n self.assertFalse(self.mc.machine_config['sound_system']['enabled'])\n self.assertIsNone(self.mc.sound_system)", "def test_018(self):\n page = Page()\n with pytest.raises(TypeError):\n page.classification = 10", "def test_num_wires_default_any_wires(self):\n\n class DummyOp(qml.operation.Operator):\n r\"\"\"Dummy custom operator\"\"\"\n\n assert DummyOp.num_wires == qml.operation.AnyWires\n assert Operator.num_wires == qml.operation.AnyWires", "def test_unknown_action(self):\n self.assertFalse(self.animal.do_something(action=\"play\"))\n self.assertFalse(self.animal.do_something(action=\"jump\"))\n self.assertFalse(self.animal.do_something(action=\"think\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that setting a nested object in 'dummy' skill fails because path is not valid.
def test_get_fails_when_setting_nested_object(self): with pytest.raises( ClickException, match=r"Attribute `non_existing_attribute.dummy` is not allowed to be updated!", ): self.runner.invoke( cli, [ *CLI_LOG_OPTION, "config", "set", "skills.dummy.non_existing_attribute.dummy", "new_value", ], standalone_mode=False, catch_exceptions=False, )
[ "def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def test_complex_tree_set_invalid_path(self, test_param_tree):\n invalid_path = 'invalidPath/toNothing'\n\n with pytest.raises(ParameterTreeError) as excinfo:\n test_param_tree.complex_tree.set(invalid_path, 0)\n\n assert 'Invalid path: {}'.format(invalid_path) in str(excinfo.value)", "def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output", "def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value", "def test_access_nested_map_exception(self, nested_map, path, expected):\n self.assertRaises(KeyError)", "def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_set_with_extra_branch_paths(self, test_param_tree):\n branch_data = deepcopy(test_param_tree.nested_dict['branch'])\n branch_data['extraParam'] = 'oops'\n\n with pytest.raises(ParameterTreeError) as excinfo:\n test_param_tree.complex_tree.set('branch', branch_data)\n\n assert 'Invalid path' in str(excinfo.value)", "def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def set_recursively(structure, path, value):\n\n path = path.split('.')\n lastkey = path.pop()\n\n for key in path:\n if not key in structure or not isinstance(structure[key], dict):\n structure[key] = {}\n structure = structure[key]\n\n structure[lastkey] = value", "def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))", "def test_get_object_nested_dotted(basic_object, basic_object_value):\n acc = Accessor(getter=\"value.key.key\")\n assert acc.get(basic_object) == \"value\"", "def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def testInitialize(self):\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n data_stream=u'test', location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=u'/test', parent=None)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(inode=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec, bogus=u'BOGUS')", "def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )", "def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value", "def test_011(self):\n page = Page()\n with pytest.raises(FileNotFoundError):\n page.path = 'nonexist.txt'", "def test_set_without_path_sets_the_root(self):\n mock_config = {'foo': 'bar'}\n root_config = Config()\n root_config.set(value=mock_config)\n self.assertDictEqual(root_config.get(), mock_config)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test component value updated in agent config not in component config.
def test_set_get_correct_path(self): agent_config = self.load_agent_config() assert not agent_config.component_configurations config_value = self.get_component_config_value() assert config_value == self.INITIAL_VALUE result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", self.PATH], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert str(self.INITIAL_VALUE) in result.output result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "set", self.PATH, str(self.NEW_VALUE)], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 config_value = self.get_component_config_value() assert config_value == self.INITIAL_VALUE result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "config", "get", self.PATH], standalone_mode=False, catch_exceptions=False, ) assert result.exit_code == 0 assert str(self.NEW_VALUE) in result.output agent_config = self.load_agent_config() assert agent_config.component_configurations
[ "def test_component_configuration_removed_from_agent_config(self):\n with cd(self._get_cwd()):\n self.run_cli_command(\n \"add\", \"--local\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)\n )\n self.run_cli_command(\"add\", \"--local\", \"connection\", \"fetchai/http_server\")\n\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.soef.config.api_key\",\n \"some_api_key\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.http_server.config.port\",\n \"9000\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n config = self.load_config()\n assert config.component_configurations\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n in config.component_configurations\n )\n\n self.run_cli_command(\"remove\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID))\n\n config = self.load_config()\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n not in config.component_configurations\n )\n assert config.component_configurations", "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def test_update_room_configuration_attribute(self):\n pass", "def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills", "def test_update_setting(self):\n pass", "def test_change_configuration_property(self) -> None:\n\n self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True)\n csrf_token = self.get_new_csrf_token()\n new_config_value = False\n\n response_dict = self.get_json('/adminhandler')\n response_config_properties = response_dict['config_properties']\n self.assertDictContainsSubset({\n 'value': False,\n }, response_config_properties[\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name])\n\n payload = {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name: (\n new_config_value),\n }\n }\n self.post_json('/adminhandler', payload, csrf_token=csrf_token)\n\n response_dict = self.get_json('/adminhandler')\n response_config_properties = response_dict['config_properties']\n self.assertDictContainsSubset({\n 'value': new_config_value,\n }, response_config_properties[\n config_domain.\n ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE.name])\n\n self.logout()", "def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()", "def test_update_dashboard_panel_setting(self):\n pass", "def test_update_portal_setting(self):\n pass", "def test_update_node_driveconfig(self):\n pass", "def test_update_settings_flow(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_update_report_setting(self):\n pass", "def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output", "def test_update_program_custom_field(self):\n pass", "def test_update_antivirus_settings(self):\n pass", "def test_projects_agents_partial_update(self):\n pass", "def test_change_default():\n old_default = ExampleComponent.param.default_value\n ExampleComponent.param.default_value = 199.0\n comp = ExampleComponent()\n assert comp.param == 199.0\n ExampleComponent.param.default_value = old_default", "def test_update_room_configuration(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test agent config manager get_overridables.
def test_AgentConfigManager_get_overridables(): path = Path(CUR_PATH, "data", "dummy_aea") agent_config = AEABuilder.try_to_load_agent_configuration_file(path) config_manager = AgentConfigManager(agent_config, path) agent_overridables, component_overridables = config_manager.get_overridables() assert "description" in agent_overridables assert "is_abstract" in list(component_overridables.values())[0]
[ "def get_overridables(self) -> Tuple[Dict, List[Dict]]:\n (\n agent_overridables,\n components_overridables,\n ) = self.agent_config_manager.get_overridables()\n components_configurations = []\n for component_id, obj in components_overridables.items():\n if not obj: # pragma: nocover\n continue\n obj.update(component_id.json)\n components_configurations.append(obj)\n\n return agent_overridables, components_configurations", "def test_get_addresses(self, *args) -> None:\n self.test_add_agent()\n agent_alias = self.manager.get_agent_alias(self.agent_name)\n keys = {\n name: agent_alias._create_private_key(\n ledger=name, replace=True, is_connection=False\n )\n for name in crypto_registry.supported_ids\n }\n\n connection_keys = {\n name: agent_alias._create_private_key(\n ledger=name, replace=True, is_connection=True\n )\n for name in crypto_registry.supported_ids\n }\n agent_alias.set_overrides(\n {\"private_key_paths\": keys, \"connection_private_key_paths\": connection_keys}\n )\n\n assert len(agent_alias.get_addresses()) == len(crypto_registry.supported_ids)\n assert len(agent_alias.get_connections_addresses()) == len(\n crypto_registry.supported_ids\n )", "def netapi32_NetConfigGetAll(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"server\", \"component\", \"bufptr\", \"totalavailable\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_get_canary_configs_using_get(self):\n pass", "def test_list_planner_overrides(self):\n\n r = self.client.list_planner_overrides()", "def antenny_list_configs(self):\n return self.antenny_config.list_configs()", "def getConfigAll(self):\n return self.configAll(False)", "def test_list_external_admission_hook_configuration(self):\n pass", "def test_list_config_nodes(self):\n with self.override_role():\n self.config_client.list_config_nodes()", "def test_find_agent_ips(self):\n\n with patch(\n \"salt.cloud.clouds.proxmox.query\",\n return_value={\n \"result\": [\n {\n \"name\": \"eth0\",\n \"ip-addresses\": [\n {\"ip-address\": \"1.2.3.4\", \"ip-address-type\": \"ipv4\"},\n {\"ip-address\": \"2001::1:2\", \"ip-address-type\": \"ipv6\"},\n ],\n },\n {\n \"name\": \"eth1\",\n \"ip-addresses\": [\n {\"ip-address\": \"2.3.4.5\", \"ip-address-type\": \"ipv4\"},\n ],\n },\n {\n \"name\": \"dummy\",\n },\n ]\n },\n ) as mock_query:\n vm_ = {\n \"technology\": \"qemu\",\n \"host\": \"myhost\",\n \"driver\": \"proxmox\",\n \"ignore_cidr\": \"1.0.0.0/8\",\n }\n\n # CASE 1: Test ipv4 and ignore_cidr\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2.3.4.5\"\n\n # CASE 2: Test ipv6\n\n vm_[\"protocol\"] = \"ipv6\"\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2001::1:2\"", "def test_list_global_system_configs(self):\n with self.override_role():\n self.config_client.list_global_system_configs()", "def test_get_canary_config_using_get(self):\n pass", "def test_getorgs(self):\n pass", "def _get_interfaces(self):\n return self.__interfaces", "def update_excluded_networks():\n if CONF['exclude_ipv4_bogons']:\n urls = [\n \"http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt\",\n ]\n for url in urls:\n try:\n response = requests.get(url, timeout=15)\n except requests.exceptions.RequestException as err:\n logging.warning(err)\n else:\n if response.status_code == 200:\n CONF['exclude_ipv4_networks'] = list_excluded_networks(\n response.content,\n networks=CONF['exclude_ipv4_networks'])\n logging.info(\"IPv4: %d\",\n len(CONF['exclude_ipv4_networks']))\n\n if CONF['exclude_ipv6_bogons']:\n urls = [\n \"http://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt\",\n ]\n for url in urls:\n try:\n response = requests.get(url, timeout=15)\n except requests.exceptions.RequestException as err:\n logging.warning(err)\n else:\n if response.status_code == 200:\n CONF['exclude_ipv6_networks'] = list_excluded_networks(\n response.content,\n networks=CONF['exclude_ipv6_networks'])\n logging.info(\"IPv6: %d\",\n len(CONF['exclude_ipv6_networks']))", "def get_control_net_server_interfaces(self):\n d0 = self.options.get_config(\"controlnetif0\")\n if d0:\n logging.error(\"controlnet0 cannot be assigned with a host interface\")\n d1 = self.options.get_config(\"controlnetif1\")\n d2 = self.options.get_config(\"controlnetif2\")\n d3 = self.options.get_config(\"controlnetif3\")\n return [None, d1, d2, d3]", "def test_override_ledger_configurations_positive():\n new_chain_id = \"some_chain\"\n agent_config = MagicMock()\n agent_config.component_configurations = {\n ComponentId(ComponentType.CONNECTION, PublicId.from_str(LEDGER_CONNECTION)): {\n \"config\": {\"ledger_apis\": {DEFAULT_LEDGER: {\"chain_id\": new_chain_id}}}\n }\n }\n old_configurations = deepcopy(LedgerApis.ledger_api_configs)\n\n expected_configurations = deepcopy(old_configurations[DEFAULT_LEDGER])\n expected_configurations[\"chain_id\"] = new_chain_id\n try:\n _override_ledger_configurations(agent_config)\n actual_configurations = LedgerApis.ledger_api_configs.get(\"fetchai\")\n assert expected_configurations == actual_configurations\n finally:\n # this is important - _ovveride_ledger_configurations does\n # side-effect to LedgerApis.ledger_api_configs\n LedgerApis.ledger_api_configs = old_configurations\n assert (\n LedgerApis.ledger_api_configs[DEFAULT_LEDGER][\"chain_id\"]\n == FETCHAI_DEFAULT_CHAIN_ID\n )", "def test_client_addresses_list(self):\n pass", "def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate prior to posterior distribution using input data.
def iterate(self, data): # Append data to self.data self.data = np.append(self.data, data) for i, d in enumerate(data): update = self.current*self.likelihood(d) self.current = self._normalize(update) self.posterior = np.concatenate((self.posterior,[self.current])) print(str(len(data)) + " iterations completed!") return None
[ "def posterior_sample(self):\n pass", "def sample_from_prior(self, *args, **kwargs):\n pass", "def priorLikelihood(self, step):\n # grab the portion of the sample that's mine\n θ = self.restrict(theta=step.theta)\n # and the storage for the prior likelihoods\n likelihood = step.prior\n # go through each parameter set\n for pset in self.psets.values():\n # and ask each one to {prep} the sample\n pset.priorLikelihood(theta=θ, priorLLK=likelihood)\n # all done\n return self", "def _compute_prior(self,hist_data=None):\n def compute_prior_conversion(hist_data):\n \"\"\"Summary\n\n Args:\n hist_data (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n #Conversion is modelled as ~Bernoulli(lambda) with prior lambda ~Beta(alpha,beta)\n\n #non-informative prior\n alpha = 1\n beta = 1\n\n if hist_data is not None:\n if 'conversion' in hist_data.keys():\n print(\"Warning!! Using std as .1 @compute_prior_conversion\")\n alpha,beta = self._estimate_beta_dist(\n hist_data['conversion']['mean'],\n .1\n )\n\n return {'lambda':{'alpha':alpha,'beta':beta}}\n\n def compute_prior_aov(hist_data):\n \"\"\"Summary\n\n Args:\n hist_data (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n #AOV (== ticket medio) is modelled as ~Exp(theta) with prior theta ~Gamma(k,omega)\n\n #non-informative prior\n k = 1\n omega = 1\n\n if hist_data is not None:\n if 'aov' in hist_data.keys():\n print(\"Warning!! Using std as .1 @ compute_prior_aov\")\n k,omega = self._estimate_gamma_dist(\n 1/hist_data['aov']['mean'],\n #1 #std of gamma(1,1)\n 3e-03\n )\n\n return {'theta':{'k':k,'omega':omega}}\n\n def compute_prior_arpu(hist_data):\n \"\"\"Summary\n\n Args:\n hist_data (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n #ARPU is modelled as ~Bernoulli(lambda)*Exp(theta) with priors as in 'aov' and 'conversion'\n prior = {}\n prior.update(compute_prior_conversion(hist_data))\n prior.update(compute_prior_aov(hist_data))\n return prior\n\n func_map = {\n 'conversion':compute_prior_conversion,\n 'aov':compute_prior_aov,\n 'arpu':compute_prior_arpu\n }\n\n return func_map[self.test_type](hist_data)", "def recreate_posterior(prior, counts, obs_prob):\n post = prior\n for i in range(counts.size):\n ct = counts[i].astype(np.int)\n for j in range(ct):\n vec = obs_prob[i]\n post = vec*post\n post = post / post.sum()\n return post", "def computePosterior(self):\n # in their log form, posterior = prior + beta * datalikelihood\n # make a copy of prior at first\n self.posterior.copy(self.prior)\n # add the data likelihood\n altar.blas.daxpy(self.beta, self.data, self.posterior)\n # all done\n return self", "def delayed_rejection(xi, pxi,xnext,pxnext,sigma,bins, fun):\n #make step\n for i in xrange(50):\n #generate new point\n zdr = {bins:fun.proposal(xi[bins],sigma[bins])}\n #check if in prior\n if not nu.isfinite(fun.prior(zdr,bins)):\n break\n else:\n #after 50 trials give up if not in priors\n return \n #get next proposal\n propphi_zdr = self._prop_phi([zdr])\n #calc lik\n zdrprob, zdrlik = self._get_post_prob([zdr],propphi_zdr)\n #acceptance prob for new param a(zdrprob[0],zprob)\n alpha2 = min(zdrprob[0]*(1-self._alpha1(self,zdrprob[0],zprob))/(pxi*(1-self._alpha1(self, pxi, zprob))), 1)\n acc = 0; lik = 0; pr = 0; prop = 0\n if random()< alpha2:\n xi = zdr\n acc = 1\n liks = zdrlik\n pr = zdrprob[0]\n prop = propphi_zdr\n return xi, acc, lik, pr, prop", "def posterior(self, samples):\n unique_samps = set(samples)\n denominator = 0\n posteriors = []\n n_samps = len(samples)\n for concept in self.concepts:\n num = 0\n if unique_samps.issubset(set(concept.extension)):\n num = concept.prior*concept.likelihood(n_samps)\n denominator += num\n posteriors.append(num)\n return np.divide(posteriors, denominator)", "def posterior_distribution(x, t, M, noise_precision, prior_mu, prior_precision):\n A = np.array([x ** i for i in range(M)]).reshape((1, M)) # (M, 1)\n\n new_precision = prior_precision + noise_precision * np.dot(np.transpose(A), A)\n new_mu = np.dot(np.linalg.inv(new_precision), noise_precision * t * np.transpose(A) + np.dot(prior_precision, prior_mu))\n\n return new_mu, new_precision", "def p_prior(self):\n p0 = [self.from_prior() for i in range(self.__nwalkers)]\n return p0", "def posterior_predictive_check(self):\n self.ppc = {}\n for gene in self.initial_genes:\n self.ppc[gene] = self._gene_ppc(gene)", "def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data", "def sample_from_prior(self):\n raise NotImplementedError", "def priorLikelihood(self, theta, prior):", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def run_prior(config, ndata):\n np.random.seed(1)\n # Parse parameters\n m_prior, fwd_args = parse_config(config, ndata)\n m_prior = reshape_prior(m_prior)\n\n # Run simulation\n results = np.zeros((len(m_prior), 13, len(fwd_args['time'])))\n for i in tqdm(range(len(m_prior)), desc='Running prior ensemble'):\n param = m_prior[i]\n results[i] = coronaSEIR.base_seir_model(param,fwd_args)\n\n tnew = fwd_args['time'] - fwd_args['time_delay']\n\n return results, tnew, m_prior, fwd_args", "def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()", "def sample_z_from_prior(self):\n pass", "def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates credible interval for any probability distribution given input interval for cdf.
def credible_interval(self, distType='current', interval=(0.025, 0.975)): # Calculate cdf to use for credible interval distCred = self.cumulative_distribution(dist=distType) # Prior and Current credible intervals if (distType=='current' or distType=='prior'): minCred = self.hypotheses[np.where((distCred-interval[0])>0)[0].min()] maxCred = self.hypotheses[np.where((distCred-interval[1])>0)[0].min()] ci = [(minCred, maxCred)] # Posterior: all iterations credible intervals else: ci = [] for i, row in enumerate(distCred): minCred = self.hypotheses[np.where((distCred[i]-interval[0])>0)[0].min()] maxCred = self.hypotheses[np.where((distCred[i]-interval[1])>0)[0].min()] ci.append((minCred, maxCred)) return ci
[ "def get_probabilty_in_closed_interval(h2_values, cdf, interval):\n p = get_approx_cdf(h2_values, cdf, interval[1]) - get_approx_cdf(h2_values, cdf, interval[0])\n if interval[0] == 0.0:\n p += cdf[0]\n return p", "def credible_interval(samples, ci=.9):\n # number of intervals to compute\n nci = int((1 - ci)*samples.size)\n\n # find highest posterior density (HPD) credible interval\n # i.e. the one with minimum width\n argp = np.argpartition(samples, [nci, samples.size - nci])\n cil = np.sort(samples[argp[:nci]]) # interval lows\n cih = np.sort(samples[argp[-nci:]]) # interval highs\n ihpd = np.argmin(cih - cil)\n\n return cil[ihpd], cih[ihpd]", "def credibleInterval(self, percentage=90):\n cdf = self.makeCdf()\n return cdf.credibleInterval(percentage)", "def compute_credible_interval(vals, weights, confidence: float = 0.95):\n if confidence <= 0.0 or confidence >= 1.0:\n raise ValueError(\n f\"Confidence {confidence} must be in the interval (0.0, 1.0).\"\n )\n alpha_lb = 0.5 * (1.0 - confidence)\n alpha_ub = confidence + alpha_lb\n lb = compute_quantile(vals, weights, alpha_lb)\n ub = compute_quantile(vals, weights, alpha_ub)\n return lb, ub", "def calculate_ci(ci_value, data):\n df = len(data) - 1 # degrees of freedom\n ci = stats.t.interval(ci_value, df, loc=np.mean(data),\n scale=stats.sem(data))\n return ci", "def credible_intervals(x, ci, weights=None):\n from numpy import asarray, vstack, sort, cumsum, searchsorted, round, clip\n ci = asarray(ci, 'd')\n target = (1 + vstack((-ci, +ci))).T/2\n\n if weights is None:\n idx = clip(round(target*(x.size-1)), 0, x.size-1).astype('i')\n return sort(x)[idx]\n else:\n idx = numpy.argsort(x)\n x, weights = x[idx], weights[idx]\n # convert weights to cdf\n w = cumsum(weights/sum(weights))\n return x[searchsorted(w, target)]", "def credible_interval(self, parameter, interval=[0.05, 0.95]):\n\n if parameter not in self.parameters:\n raise ValueError(f\"Parameter '{parameter}' is not available\")\n\n intervals = {}\n for key, value in self.results.items():\n if isinstance(value, Grid):\n intervals[key] = Plot._credible_interval_grid(\n value, parameter, interval\n )\n else:\n credint = value.posterior[parameter].quantile(interval).to_list()\n intervals[key] = credint[0] if len(interval) == 1 else credint\n\n return list(intervals.values())[0] if len(self.results) == 1 else intervals", "def cdf_function(self, param):\n # Solve P'(x) == p(x), with P[lower_boun] == 0\n def dP_cdf(p, x):\n if x > self.rlim[param] or x < self.llim[param]:\n return 0\n return self.pdf[param](x)\n x_i = numpy.linspace(self.llim[param], self.rlim[param], 1000)\n # Integrator needs to have a step size which doesn't step over the\n # probability mass\n # TODO: Determine h_max.\n cdf = integrate.odeint(dP_cdf, [0], x_i, hmax=0.01*(self.rlim[param]-self.llim[param])).T[0]\n if cdf[-1] != 1.0: # original pdf wasn't normalized\n self._pdf_norm[param] = cdf[-1]\n cdf /= cdf[-1]\n # Interpolate the inverse\n return interpolate.interp1d( x_i,cdf)", "def calculateConfidenceInterval(accuracies):\n # calculate the sample mean of the accuracies\n sampleMean = np.mean(accuracies)\n\n # calculate the sample standard deviation of the accuracies\n sumOfDiffs = np.sum((accuracies-sampleMean)**2)\n\n # get the bounds of the confidence interval (ci)\n ciBound1 = chi2.ppf(0.95, len(accuracies)-1) \n ciBound2 = chi2.ppf(0.05, len(accuracies)-1)\n\n # calculate the confidence interval itself\n ci = [sumOfDiffs/ciBound1, sumOfDiffs/ciBound2]\n\n print(\"Mean:\", sampleMean)\n print(ci)\n # print(\"StdDev:\", sampleStdDev)\n\n return sampleMean, ci", "def interval_for_cdf(cdf: np.ndarray, mass: float) -> Interval:\n lower_alpha = (1 - mass) / 2\n upper_alpha = (1 + mass) / 2\n wide_lower_idx, _ = find_index_in_sorted(cdf, lower_alpha)\n _, wide_upper_idx = find_index_in_sorted(cdf, upper_alpha)\n return Interval(\n # wide_lower_idx is the maximum point with probability sum <=\n # lower_alpha. Therefore it and every point below do not need\n # to be in the interval, so add one to get the first point in\n # the interval. If cdf[0] > lower_alpha, then wide_lower_idx =\n # -1, and we start at index 0.\n lower=(wide_lower_idx + 1),\n # wide_upper_idx is the minimum point with probability sum >=\n # upper_alpha, and therefore represents the last point of the\n # interval. In the highly unlikely scenario cdf[-1] <\n # upper_alpha, wide_upper_idx will be len(cdf), so we cap it\n # to the max index.\n upper=np.minimum(wide_upper_idx, (len(cdf) - 1)),\n )", "def cdf(values, bins):\n if hasattr(bins, \"__getitem__\"):\n range = (np.nanmin(bins), np.nanmax(bins))\n else:\n range = None\n\n h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int\n\n # cumulative fraction below bin_k\n c = np.cumsum(h / np.sum(h, dtype=float))\n # append 0 to beginning because P(X < min(x)) = 0\n return np.append(0, c), bins", "def c_func(c_df, COH, age):\n COH = np.where(COH < UPPER_BOUND_COH, COH, UPPER_BOUND_COH) # prevent extrapolation\n COH = np.where(COH > 1, COH, 1) # prevent extrapolation\n spline = CubicSpline(c_df[str(END_AGE)], c_df[str(age)], bc_type='natural')\n C = spline(COH)\n if any(C < 0):\n # set coeffs of 2nd and 3rd order term to 0, 1st order term to the slope between the first two points\n spline.c[:2, 0] = 0\n spline.c[2, 0] = (c_df.loc[1, str(age)] - c_df.loc[0, str(age)]) / (c_df.loc[1, str(END_AGE)] - c_df.loc[0, str(END_AGE)])\n C = spline(COH) # redo the interpolation\n return C", "def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds", "def get_interval_from_confidence_file(self, interval_dict):\n for arc in self.arc_info.keys():\n weight = self.arc_info[arc][\"weight\"]\n if weight == 0:\n interval = [0, 0]\n else:\n interval = interval_dict[weight]\n ub = interval[1]\n lb = interval[0]\n self.arc_info[arc][\"upper_bound\"] = ub\n self.arc_info[arc][\"lower_bound\"] = lb", "def confidence_interval(acc, n, conf = .95):\n\n alpha = 1 - conf \n z = np.absolute(st.norm.ppf(alpha/2)) # Two tails\n\n sqrt =(z**2 + 4*n*acc - 4*n*acc**2)**.5\n rho_max = (2*n*acc + z**2 + z*sqrt) / (2*(n + z**2))\n rho_min = (2*n*acc + z**2 - z*sqrt) / (2*(n + z**2))\n\n return [rho_min, rho_max]", "def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)", "def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high", "def _credible_interval_grid(grid, parameter, interval):\n\n from pesummary.utils.array import Array\n\n margpost = grid.marginalize_posterior(not_parameters=parameter)\n intervals = Array.percentile(\n grid.sample_points[parameter],\n weights=margpost,\n percentile=[100 * val for val in interval],\n )\n\n return intervals if len(interval) > 1 else intervals[0]", "def cchalf(dataframe, function, bins):\n dist = dataframe.set_index(['H', 'K', 'L'])['D'].drop_duplicates()\n dmin = dist.min()\n dmax = dist.max()\n binedges = np.linspace(dmin**-2, dmax**-2, bins+1)**-0.5\n binedges = list(zip(binedges[:-1], binedges[1:]))\n a,b = split(dataframe)\n xval_a, xval_b = function(a), function(b)\n#TODO: Fix this awful hack\n key = [i for i in xval_a if i!='D'][0]\n xval_a, xval_b = xval_a.join(dist),xval_b.join(dist)\n idx = xval_a.index.intersection(xval_b.index)\n xval_a,xval_b = xval_a.loc[idx],xval_b.loc[idx]\n cchalf = []\n for dmin,dmax in binedges:\n idx = (xval_a['D'] > dmin) & (xval_a['D'] < dmax)\n a = np.array(xval_a[idx][key]).flatten()\n b = np.array(xval_b[idx][key]).flatten()\n cchalf.append(np.corrcoef(a,b)[0, 1])\n return cchalf, binedges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize the product of likelihood and prior.
def _normalize(self, inp): return inp/inp.sum()
[ "def _normalize(self, probs):\n probs = np.array(probs)\n probs = probs / probs.sum()\n return probs", "def normalize(self):\n var = self.a_variable()\n return self.scale(1 / self.coefficient(var))", "def _normalize(self, distribution):\n #print distribution\n normalizer = distribution.sum()\n if normalizer == 0:\n return log2(distribution)\n else:\n return log2(distribution) - log2(normalizer)", "def __normalize_weights(self):\n result = norm(self.weights, 1)\n self.weights = self.weights / result if not result == 0.0 else self.weights", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def normalized(self, predictions):\n return self.w * predictions + self.b", "def normalizeValues(valoresPrueba, mu, sigma):\n return (valoresPrueba - mu)/sigma", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def normalize(self):\n\ttry:\n\t self.as_matrix /= self.get_norm()\n\texcept:\n\t raise DMRGException(\"Wavefunction norm is zero\")", "def normalize(self, external=None):\n if external is None:\n return np.max(self.y)\n else:\n return self.y/external", "def _orthonormalize(self):\n\n def get_normalization(v):\n return np.sqrt(np.dot(v, v))\n\n self.THETA[0] /= get_normalization(self.THETA[0])\n for i in range(1, self.K):\n for j in range(0, i):\n v = np.dot(self.THETA[i], self.THETA[j])\n self.THETA[i] -= v * self.THETA[j]\n \n self.THETA[i] /= get_normalization(self.THETA[i])", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize_l2(x):\n return x / (npla.norm(x))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def normalize_probability(p_unnormalized):\n p_normalized=p_unnormalized/p_unnormalized.sum(axis=0)\n return p_normalized", "def _normalize_probs(self):\n\n cols = [c for c in self.dfX.columns if c.startswith('prob_') and c.find('pseudo') < 0]\n\n for c in cols:\n self.dfX[c] = self.dfX[c] / self.dfX.groupby('race_id')[c].transform(sum)", "def normalize(u):\n return u/math.sqrt(numpy.dot(u, u))", "def normalize_log_likelihoods(X):\n h, w = np.shape(X)\n return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Longest run testcases with more than one target
def test_longest_run_mult(self): self.assertTrue(geneutil.longestRun('QQQQN','QN')==5) self.assertTrue(geneutil.longestRun('QQANNQ','QN',1)==6) self.assertTrue(geneutil.longestRun('QQNPPQ','QN',1)==3) self.assertTrue(geneutil.longestRun('QQQAANN','QN',2)==7) self.assertTrue(geneutil.longestRun('ANQNQAN','QN',1)==6) self.assertTrue(geneutil.longestRun('ANQNQANP','QN',1)==6)
[ "def test_returns_number_of_ways_to_reach_target(self):\n result = find_target_sum_ways([1,1,1,1,1], 3)\n self.assertEqual(result, 5)", "def num_targets(self) -> int:", "def test_max_run_start():\n state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run(1, state) == 6", "def getResult(targets, i=None):", "def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def test_max_run_end():\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert max_run(1, state) == 9", "def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE", "def select_max_test_case(self, leftover, test_cases, stmt):\n max_tc_pos = 0\n max_intersection = 0\n for pos, tc in enumerate(test_cases):\n if stmt:\n if len(leftover.intersection(tc[1])) > max_intersection:\n max_tc_pos = pos\n max_intersection = len(leftover.intersection(tc[1]))\n else:\n if len(leftover.intersection(tc[2])) > max_intersection:\n max_tc_pos = pos\n max_intersection = len(leftover.intersection(tc[2]))\n return test_cases.pop(max_tc_pos)", "def reduce_targets(targets): \n if len(targets)<1: return []\n if targets[0]=='objective' or targets[0]=='subjective':\n binaries = [0 if target=='objective' else 1 for target in targets]\n else:\n binaries = [0 if target=='negative' else 1 for target in targets]\n return binaries", "def run_automatic_tester():\n number_of_target_maps = len(os.listdir(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH))\n overall_true_positive_count = 0\n overall_false_positive_count = 0\n overall_target_count = 0\n\n for index_0 in range(number_of_target_maps):\n\n answer_sheet = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_MAP_ANSWER_SHEET_PATH, str(index_0 + 1) + \".json\")))\n answer_list = []\n\n for index_1 in range(len(answer_sheet[\"targets\"])):\n answer_list.append((answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][0], answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][1]))\n overall_target_count += len(answer_list)\n\n target_detection_result = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH, str(index_0 + 1) + \".json\")))\n result_list = []\n\n for index_2 in range(len(target_detection_result[\"image_processing_results\"])):\n result_list.append((target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][0] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][2] / 2), target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][1] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][3] / 2)))\n\n current_true_positive_count = 0\n current_false_positive_count = 0\n banned_index_list = []\n\n for index_3 in range(len(answer_list)):\n true_positive_found = False\n\n for index_4 in range(len(result_list)):\n is_index_4_banned = False\n\n for index_5 in range(len(banned_index_list)):\n if (index_4 == banned_index_list[index_5]):\n is_index_4_banned = True\n\n if (is_index_4_banned == True):\n continue\n\n correct_target_center_x = answer_list[index_3][0]\n correct_target_center_y = answer_list[index_3][1]\n\n detected_target_center_x = result_list[index_4][0]\n detected_target_center_y = result_list[index_4][1]\n\n if ((abs(correct_target_center_x - detected_target_center_x) <= 20) and (abs(correct_target_center_y - detected_target_center_y) <= 20)):\n current_true_positive_count += 1\n banned_index_list.append(index_4)\n true_positive_found = True\n continue\n\n current_false_positive_count = len(result_list) - current_true_positive_count\n\n overall_true_positive_count += current_true_positive_count\n overall_false_positive_count += current_false_positive_count\n\n percentage = 100 * float(overall_true_positive_count) / (overall_target_count)\n\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")\n TargetDetectionTesterLogger.log(\"Total True Positive Count: \" + str(overall_true_positive_count))\n TargetDetectionTesterLogger.log(\"Total False Positive Count: \" + str(overall_false_positive_count))\n TargetDetectionTesterLogger.log(\"Percentage of Successfully Detected Targets: \" + str(percentage) + \"%\")\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")", "def test_target_number_less_than_alp(self):\n alp = list(range(10))\n targets = generate_targets(alp, 5)\n self.assertEqual(len(targets), 5)\n self.assertEqual(len(targets), len(set(targets)))", "def exhaustive_search(data_set, target):\n shortest=2**53\n listy=[]\n for item in data_set:\n compare=metric(target, item)\n if compare<shortest:\n shortest=compare\n listy=item\n return listy, shortest", "def get_min_max_num_targets(self):\n return 1, 1", "def test_target_greater_than_alp(self):\n alp = list(range(5))\n targets = generate_targets(alp, 10)\n self.assertEqual(len(targets), 10)\n\n counts = Counter(targets)\n\n for item in alp:\n self.assertEqual(counts[item], 2)", "def solve(self):\n\n self.remove_impossible_targets()\n random.shuffle(self.targets)\n best_move = list(self.targets)\n best_perf = self.compute_performance()\n for i in range(settings.MAX_RANDOM_PLANNER_ITERATION):\n random.shuffle(self.state)\n perf = self.compute_performance()\n if perf < best_perf:\n best_move = list(self.state)\n\n self.state = best_move", "def best_tests():\n return [\n LvqParams(sigma=.2, prototypes_per_class=8, batch_size=256, epochs=4),\n LvqParams(sigma=6, prototypes_per_class=12, batch_size=128, epochs=4),\n LvqParams(sigma=6, prototypes_per_class=12, batch_size=16, epochs=12),\n LvqParams(sigma=1, prototypes_per_class=3, batch_size=8, epochs=4),\n LvqParams(sigma=5, prototypes_per_class=10, batch_size=32, epochs=8),\n LvqParams(sigma=.2, prototypes_per_class=10, batch_size=32, epochs=8),\n LvqParams(sigma=6, prototypes_per_class=10, batch_size=128, epochs=2),\n LvqParams(sigma=3, prototypes_per_class=10, batch_size=8, epochs=4),\n LvqParams(sigma=1, prototypes_per_class=12, batch_size=128, epochs=12),\n LvqParams(sigma=.2, prototypes_per_class=8, batch_size=256, epochs=1)\n ]", "def get_test_suite():\n # max for a and p\n MAX = 2**31 - 1 # INT32_MAX, max value for a and p\n sqrt_MAX = floor(sqrt(MAX)) # max for n\n \n # first test suite\n a_list = [0, 0, 0, 1, 1, 2, 7, 2, 1, 0, 0, 3, 1, 0, 0, 0, 1]\n p_list = [5, 3, 3, 0, 0, 0, 8, 1, 1, 0, 0, 0, 0, 1, 2, 0, 1]\n n_list = [7, 2, 2, 7, 3, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1]\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n \n # second test suite\n a_list = [3, 5, 23, 25, 100, 200, MAX, MAX-1, MAX]\n p_list = [10, 5, 23, 25, 100, 200, 1000, 100, 500]\n n_list = [23, 1, 0, 7, 1, 100, sqrt_MAX, 3, 23]\n \n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n\n # third test suite\n a_list = []\n p_list = []\n n_list = []\n\n # keep a = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n # keep p = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep n = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(0)\n # keep a = 0 and p = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep all non-zero\n for _ in range(30):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite", "def longest_run(l):\n maxed = 0\n ct = 1\n for i in range(1, len(l)):\n if l[i] != l[i-1]:\n if ct > maxed:\n maxed = ct\n ct = 1\n else:\n ct += 1\n if len(l) > 0 and ct > maxed:\n maxed = ct\n return maxed", "def result(self, test_data):\n \n test_results = [int(np.argmax(self.learning_model(x)) == y)\n for (x, y) in test_data]\n return sum(test_results)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Max Sliding Count testcases
def test_max_sliding_count(self): self.assertTrue(geneutil.maxSlidingCount('AAAAA','A')==5) self.assertTrue(geneutil.maxSlidingCount('AAAAA','Q')==0) self.assertTrue(geneutil.maxSlidingCount('AAATAA','A')==4) self.assertTrue(geneutil.maxSlidingCount('AAATTAA','A')==3) self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','M',10)==10) self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','C',10)==3)
[ "def test_max_run_end():\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert max_run(1, state) == 9", "def test_max_run_start():\n state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run(1, state) == 6", "def get_highest(self, test):\n return", "def max_repeats(seq):\n # item_count = [seq.count(item) for item in seq]\n # print seq\n # print \"item_count:\", item_count\n # print \"max(item_count):\", max(item_count)\n # return max(item_count)\n outcomes = get_outcomes(6)\n\n max_times = [seq.count(value) for value in outcomes]\n # print \"max_times:\", max_times\n # print \"max value:\", max_times.index(max(max_times)) + 1\n return max_times", "def max_of(callback):\n return max((callback(job) for job in state.run_list), default=0)", "def getMaxTaskCount():", "def test_max_at_the_end(self):\n tlist = [0, 0, 1]\n self.assertEqual(max_integer(tlist), 1)", "def max(self) -> int:", "def give_max_score(results):\n\n final_score = 0\n\n # uses a for loop to determine if final_score is the maximum value\n for counter in results:\n if counter > final_score:\n final_score = counter\n\n return final_score", "def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)", "def select_max_test_case(self, leftover, test_cases, stmt):\n max_tc_pos = 0\n max_intersection = 0\n for pos, tc in enumerate(test_cases):\n if stmt:\n if len(leftover.intersection(tc[1])) > max_intersection:\n max_tc_pos = pos\n max_intersection = len(leftover.intersection(tc[1]))\n else:\n if len(leftover.intersection(tc[2])) > max_intersection:\n max_tc_pos = pos\n max_intersection = len(leftover.intersection(tc[2]))\n return test_cases.pop(max_tc_pos)", "def _sample_max(values):\n return math_ops.reduce_max(values, reduction_indices=[0])", "def test_max_middle(self):\n test_list = [1, 2, 10, 4, 5]\n self.assertEqual(max_integer(test_list), 10)", "def get_max_control_iterations():\n\treturn dsslib.SolutionI(ctypes.c_int32(24), ctypes.c_int32(0))", "def max_n(self, state, player):\n if state.cutoff_test():\n end = self.utility(state)\n ##print(end)\n return (end, None)\n\n v_max = np.full(3, float(-10000))\n best_a = None\n\n for action in available_actions(state):\n (v, irrelevant) = self.max_n(state.result(player, action), next_turn(player))\n if v[_PLAYERS[player]] > v_max[_PLAYERS[player]]:\n v_max = v\n best_a = action\n\n ##if best_a is not None and best_a[0] == EXIT:\n ##print(v_max)\n #print(best_a)\n return (v_max, best_a)", "def max_repeats(seq):\r\n item_count = [seq.count(item) for item in seq]\r\n print item_count\r\n return max(item_count)", "def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)", "def max_of(callback):\n return max((callback(job) for job in job_list), default=0)", "def _calculate_backtest_count(parameters: List[OptimizationParameter]) -> int:\n steps_per_parameter = [round((p.max - p.min) / p.step) + 1 for p in parameters]\n return int(functools.reduce(operator.mul, steps_per_parameter, 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine what moves are safe for a player to make. Returns a list of valid actions that player p can make in the given state.
def safe_moves(p, state): x, y = state['players'][p]['x'], state['players'][p]['y'] moves = [] actions = [(1, 0, 'east'), (-1, 0, 'west'), (0, -1, 'north'), (0, 1, 'south')] for dx, dy, move in actions: tx, ty = str(x + dx), str(y + dy) if tx not in state['cells'] or ty not in state['cells'][tx]: moves.append(move) return moves
[ "def _get_valid_actions(self, player, state):\n # create an empty list to hold all the available actions for a give player\n available_actions = []\n # 0 is sys_player and 1 is env player\n if player == 0:\n t = 1\n else:\n t = 0\n for k, v in self.transition_dict[(state[0], state[1], t)].items():\n if v is not None:\n available_actions.append(k)\n return available_actions", "def actions(self, state):\n return [boardUtil.boardUtil.getLegalCaptureMoves(state[0], state[1], state[2]),\n boardUtil.boardUtil.getLegalMoves(state[0], state[1], state[2], state[3])]", "def get_valid_actions(self, state, player):\n raise NotImplementedError", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def getPossibleActions(self, state):\n if self.weHaveBall(state):\n return [('hold',)] + [('pass', i) for i in range(1, self.keeperNum)]\n else:\n return [None]", "def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]", "def legal_moves(state, color):\n legal = []\n for r, row in enumerate(state):\n for c, tile in enumerate(row):\n if flips_something(state, r, c, color) and tile is '.':\n legal.append((r, c))\n if not legal:\n return ['pass']\n else:\n return legal", "def actions(self, state) :\n return getValidActions(state)", "def get_legal_actions(self, state):\n return self.action_fn(state)", "def legal_moves(state, color):\n # TODO You have to write this", "def legal_moves(state, color):\n moves = []\n for r in range(0, 8):\n for c in range(0, 8):\n if state[r][c] == '.':\n if flips_something(state, r, c, color):\n moves.append((r, c))\n if len(moves) == 0:\n return ['pass']\n return moves", "def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves", "def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())", "def get_player_legal_moves(self, player):\n valid_moves = []\n\n for row in range(len(self.board)):\n for col in range(8):\n move = Move(row, col, player)\n if self.move_is_legal(move):\n valid_moves.append(move)\n\n return valid_moves", "def get_possible_moves(self) -> List[int]:\n return [s for s in range(1, int(self.current_state_number + 1)) if\n s ** (1/2) % 1 == 0]", "def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list", "def getLegalMovingActions(state,agent):\n actions = state.getLegalActions(agent)\n # Removing 'Stop'\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n return actions", "def policy_f(state):\n actions = state.get_allowed_actions()\n winners = [[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 4, 8],\n [6, 4, 2],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8]]\n if any([all([state.board[pos] == -state.playerTurn for pos in winner]) for winner in winners]):\n value = -1\n elif any([all([state.board[pos] == state.playerTurn for pos in winner]) for winner in winners]):\n value = 1\n else:\n value = 0\n if len(actions) == 0:\n return [], value\n else:\n prob = 1/len(actions)\n return [(action, prob) for action in actions], value", "def getLegalMoves(self, state: GameState) -> np.ndarray:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start the client listening to the game. Pass in a function that accepts the available actions and the current state of the game, and returns the action to take. The SDK will handle the rest. Checks if any commandline arguments are passed when running, if there are any, they are assumed to be client keys that are sent to the server for connecting.
def start(turn_handler): if os.environ.get('BOTBOX_SECRET'): print('Using env secret:', os.environ['BOTBOX_SECRET']) headers = {'Authorization': os.environ['BOTBOX_SECRET']} elif len(sys.argv) > 1: print('Using cli secret:', sys.argv[1]) headers = {'Authorization': sys.argv[1]} else: print('Using no authentication') headers = [] # get the URL for the server from an environment variable if it is set, # otherwise use the default localhost if os.environ.get('BOTBOX_SERVER'): url = (WS_SERVER_SCHEME + '://' + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT) else: url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT print("Connecting to:", url) ws = websocket.WebSocketApp( url, on_open = _on_open, on_message = lambda ws, msg: _on_message(ws, msg, turn_handler), on_error = _on_error, on_close = _on_close, header = headers ) ws.run_forever()
[ "def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()", "def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")", "def start():\n if not cfg.irc:\n logging.warning(\"Skipping IRC module: no configuration provided\")\n return\n\n server = cfg.irc.server\n port = cfg.irc.port\n ssl = cfg.irc.ssl\n nick = cfg.irc.nick\n channels = cfg.irc.channels\n\n logging.info(\n \"Starting IRC client: server=%r port=%d ssl=%s nick=%r \" \"channels=%r\",\n server,\n port,\n ssl,\n nick,\n channels,\n )\n\n bot = Bot(cfg.irc)\n utils.DaemonThread(target=bot.start).start()\n\n evt_target = EventTarget(bot)\n events.dispatcher.register_target(evt_target)\n utils.DaemonThread(target=evt_target.run).start()", "def start(self, args = []):\n pass", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "def game_start(self):\r\n\t\tself._comm_server.broadcast_message(\"game-start\")\r\n\t\tself._is_game_started = True\r\n\t\tself._handlers[\"game-start\"].invoke()\r\n\t\t_logger.info(\"Game is started.\")", "def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()", "def start(self):\n self._out.put(self._message(message.MSG_START, self._client_qname))\n response = self._getresponse()\n self._gameid = response.game\n self._state = ON", "def CallClient(self, action_name, request=None, next_state=None,\n client_id=None, request_data=None, start_time=None, **kwargs):\n if client_id is None:\n client_id = self.args.client_id\n\n if client_id is None:\n raise FlowRunnerError(\"CallClient() is used on a flow which was not \"\n \"started with a client.\")\n\n if not isinstance(client_id, rdfvalue.ClientURN):\n # Try turning it into a ClientURN\n client_id = rdfvalue.ClientURN(client_id)\n\n # Retrieve the correct rdfvalue to use for this client action.\n try:\n action = actions.ActionPlugin.classes[action_name]\n except KeyError:\n raise RuntimeError(\"Client action %s not found.\" % action_name)\n\n if action.in_rdfvalue is None:\n if request:\n raise RuntimeError(\"Client action %s does not expect args.\" %\n action_name)\n else:\n if request is None:\n # Create a new rdf request.\n request = action.in_rdfvalue(**kwargs)\n else:\n # Verify that the request type matches the client action requirements.\n if not isinstance(request, action.in_rdfvalue):\n raise RuntimeError(\"Client action expected %s but got %s\" % (\n action.in_rdfvalue, type(request)))\n\n outbound_id = self.GetNextOutboundId()\n\n # Create a new request state\n state = rdfvalue.RequestState(\n id=outbound_id,\n session_id=self.session_id,\n next_state=next_state,\n client_id=client_id)\n\n if request_data is not None:\n state.data = rdfvalue.Dict(request_data)\n\n # Send the message with the request state\n msg = rdfvalue.GrrMessage(\n session_id=utils.SmartUnicode(self.session_id), name=action_name,\n request_id=outbound_id, priority=self.args.priority,\n require_fastpoll=self.args.require_fastpoll,\n queue=client_id.Queue(), payload=request)\n\n if self.context.remaining_cpu_quota:\n msg.cpu_limit = int(self.context.remaining_cpu_quota)\n\n cpu_usage = self.context.client_resources.cpu_usage\n if self.context.args.cpu_limit:\n msg.cpu_limit = max(\n self.context.args.cpu_limit - cpu_usage.user_cpu_time -\n cpu_usage.system_cpu_time, 0)\n\n if msg.cpu_limit == 0:\n raise FlowRunnerError(\"CPU limit exceeded.\")\n\n if self.context.args.network_bytes_limit:\n msg.network_bytes_limit = max(self.context.args.network_bytes_limit -\n self.context.network_bytes_sent, 0)\n if msg.network_bytes_limit == 0:\n raise FlowRunnerError(\"Network limit exceeded.\")\n\n state.request = msg\n\n self.QueueRequest(state, timestamp=start_time)", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")", "def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)", "def main():\n lets_Have_a_game=Game_Controller()\n lets_Have_a_game.start_game()", "def run(self):\r\n self.client.connect()\r\n self.client.run()", "def start(self) -> None:\n\n while not self.stop_listening:\n if self.world_rank == 0:\n command = MDI_Recv_Command(self.comm)\n else:\n command = None\n if self.world_rank == 0:\n print(\"MDI command received: \" + str(command))\n\n # Search for this command in self.commands\n found_command = False\n for supported_command in self.commands:\n if not found_command and command == supported_command:\n # Run the function corresponding to this command\n self.commands[supported_command]()\n found_command = True\n if not found_command:\n raise Exception(\"Unrecognized command: \" + str(command))", "def start(self, args):\n self.validateInput(args)\n\n self.thread = threading.Thread(target=self.main)\n self.thread.start()", "def cmd_start(update: Update, context: CallbackContext):\n logger.info(\"/start is issued\")\n update.message.reply_text('Hi!')", "def main():\n return run_server(**parse_server_args())", "def start_server() -> None:\n filename = (\n sys.argv[1] if len(sys.argv) == 2 else\n pkg_resources.resource_filename(__name__, \"config.yml\")\n )\n config = load_config(filename)\n\n initialise_logging(config.get('logging'))\n app = create_application(config)\n start_http_server(app, config['app'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a private method that handles incoming messages from the websocket, passes the turn information to an agent's turn handler, and then passes the result back to the server.
def _on_message(ws, msg, turn_handler): def x(): parsed = json.loads(msg) player = parsed['player'] actions = parsed['actions'] state = parsed['state'] action = turn_handler(player, actions, state) response = {"action":action} ws.send(json.dumps(response)) _thread.start_new_thread(x, ())
[ "def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # specified game are provided. Otherwise, ESPN's websockets include\n # noisy league-wide information.\n elif \"pl\" in message:\n if message[\"pl\"] != \"0\" and message[\"tc\"] == self.channel:\n decoded = self.decode_message(message)\n self.write_message(wsobj, decoded)", "async def _onMessage(self, msg, websocket):\n\n if msg == 'stop':\n logger.info('Websocket connection stopped')\n websocket.close()\n return\n\n if msg.startswith('GET schema.json'):\n logger.info('Got websocket schema request ({})'.format('GET schema.json'))\n # immediately respond\n data = schema_list(self.server.params)\n msg = 'POST schema.json?schema={}'.format(json.dumps(data))\n logger.debug('Websocket schema request response: ({})'.format(msg))\n await websocket.send(msg)\n return\n\n # POST <param-path>?value=<value>\n if msg.startswith('POST /') and '?value=' in msg:\n no_prefix = msg[len('POST '):] # assume no query in the url\n path, val = no_prefix.split('?value=')\n logger.info('Value received via websocket: {} = {}'.format(path, val))\n self.remote.incoming.valueEvent(path, val)\n return\n\n logger.warning('Received unknown websocket message: {}'.format(msg))", "def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()", "async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )", "async def handle_websocket(self, websocket: Any):", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "async def _handle_agent_message(self, envelope: Envelope) -> None:\n destination = envelope.to\n\n if destination not in self._out_queues.keys():\n msg = OEFMessage(\n type=OEFMessage.Type.DIALOGUE_ERROR,\n id=STUB_DIALOGUE_ID,\n dialogue_id=STUB_DIALOGUE_ID,\n origin=destination,\n )\n msg_bytes = OEFSerializer().encode(msg)\n error_envelope = Envelope(\n to=envelope.sender,\n sender=DEFAULT_OEF,\n protocol_id=OEFMessage.protocol_id,\n message=msg_bytes,\n )\n await self._send(error_envelope)\n return\n else:\n await self._send(envelope)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def handle_agent_credentials_callback(agent, topic, payload):\n # handle callbacks during credential exchange protocol handshake\n # - update credential status\n test = settings.REVOCATION\n\n state = payload[\"state\"]\n\n cred_exch_id = payload[\"credential_exchange_id\"]\n connection_id = payload[\"connection_id\"]\n\n print(\">>> callback:\", agent.agent_name, topic, state, cred_exch_id)\n\n connection = AgentConnection.objects.filter(agent=agent, guid=connection_id).get()\n\n cred_exches = AgentConversation.objects.filter(connection__agent=agent, guid=cred_exch_id).all()\n\n if state == \"offer_received\":\n # holder receives a credential offer - create a new AgentConversation\n conversation = AgentConversation(\n connection = connection,\n conversation_type = CRED_EXCH_CONVERSATION,\n guid = cred_exch_id,\n status = state)\n conversation.save()\n\n elif state == \"basicmessages\":\n # issuer receives a credential request (no action, we have \"auto submit\")\n conversation = cred_exches[0]\n\n# elif state == \"request_received\":\n# # issuer receives a credential request (no action, we have \"auto submit\")\n# print('request_received', cred_exches)\n# conversation = cred_exches[0]\n# conversation.status = state\n# conversation.save()\n\n elif state == \"revocation_registry\":\n # holder receives a credential (no action; \"auto store\")\n conversation = cred_exches[0]\n conversation.status = state\n conversation.save()\n\n elif state == \"credential_received\":\n # holder receives a credential (no action; \"auto store\")\n conversation = cred_exches[0]\n conversation.status = state\n conversation.save()\n\n elif state == \"credential_issued\":\n # holder receives a credential (no action; \"auto store\")\n conversation = cred_exches[0]\n conversation.status = state\n conversation.save()\n\n elif state == \"credential_acked\":\n # issuer receives an acknowledgement that the credential was recevied (no action)\n\n if test == True:\n conversation = AgentConversation(\n connection=connection,\n conversation_type=CRED_EXCH_CONVERSATION,\n guid=cred_exch_id,\n status=state,\n rev_reg_id = payload[\"revoc_reg_id\"],\n cred_rev_id = payload[\"revocation_id\"])\n conversation.save()\n else:\n conversation = cred_exches[0]\n conversation.status = state\n conversation.save()\n \n elif state == \"proposal_received\":\n # holder save a credential propose (no action; \"auto store\")\n conversation = AgentConversation(\n connection = connection,\n conversation_type = CRED_EXCH_CONVERSATION,\n guid = cred_exch_id,\n status = \"proposal_received\")\n conversation.save()\n \n else:\n # ignore all other statuses (but update state)\n if 0 < len(cred_exches):\n conversation = cred_exches[0]\n conversation.status = state\n conversation.save()\n\n return Response(\"{}\")", "def listen_to_agents(self):\n while True:\n msg = self.queue.get()\n uid = msg.pop('uid')\n\n # Do something with message (for now, send it to the game router)\n msg_sig = sorted(msg.keys())\n \n # Find appropriate handler\n for handler in self.gr.handlers:\n hand_sig = sorted(handler.signature)\n if msg_sig == sorted(handler.signature):\n # Package msg as a Message object\n message = Message(msg, source=uid)\n handler.respond(message)", "def handle_message(self, message):\n if not self._connected:\n print('CommandForwarder received message \"{}\" but nobody is connected', message)\n return\n if 'command' not in message:\n print('CommandForwarder no command in command message')\n return\n\n if message['command'] not in self.VALID_COMMANDS:\n print(\n 'CommandForwarder unknown command: \"{command}\"'.format(\n command=message['command']\n )\n )\n return\n\n try:\n if message['command'] == 'line-up':\n # TODO: Right now, line-up just means start recording the camera\n pass\n else:\n self._connection.sendall(message['command'].encode('utf-8'))\n\n if message['command'] == 'stop':\n # TODO: We also want to stop the camera when someone clicks stop\n pass\n\n except Exception as exc:\n print(\n 'Unable to forward command \"{}\": {}'.format(\n message['command'],\n exc\n )\n )", "async def on_receive(self, websocket: WebSocket, data: typing.Any) -> None:", "async def _handle_battle_message(self, split_messages: List[List[str]]) -> None:\n # Battle messages can be multiline\n if (\n len(split_messages) > 1\n and len(split_messages[1]) > 1\n and split_messages[1][1] == \"init\"\n ):\n battle_info = split_messages[0][0].split(\"-\")\n battle = await self._create_battle(battle_info)\n else:\n battle = await self._get_battle(split_messages[0][0])\n\n for split_message in split_messages[1:]:\n if len(split_message) <= 1:\n continue\n elif split_message[1] in self.MESSAGES_TO_IGNORE:\n pass\n elif split_message[1] == \"request\":\n if split_message[2]:\n request = orjson.loads(split_message[2])\n battle._parse_request(request)\n if battle.move_on_next_request:\n await self._handle_battle_request(battle)\n battle.move_on_next_request = False\n elif split_message[1] == \"win\" or split_message[1] == \"tie\":\n if split_message[1] == \"win\":\n battle._won_by(split_message[2])\n else:\n battle._tied()\n await self._battle_count_queue.get()\n self._battle_count_queue.task_done()\n self._battle_finished_callback(battle)\n async with self._battle_end_condition:\n self._battle_end_condition.notify_all()\n elif split_message[1] == \"error\":\n self.logger.log(\n 25, \"Error message received: %s\", \"|\".join(split_message)\n )\n if split_message[2].startswith(\n \"[Invalid choice] Sorry, too late to make a different move\"\n ):\n if battle.trapped:\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Unavailable choice] Can't switch: The active Pokémon is \"\n \"trapped\"\n ) or split_message[2].startswith(\n \"[Invalid choice] Can't switch: The active Pokémon is trapped\"\n ):\n battle.trapped = True\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to an active \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to a fainted \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: Invalid target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can't choose a target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: \"\n ) and split_message[2].endswith(\"needs a target\"):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif (\n split_message[2].startswith(\"[Invalid choice] Can't move: Your\")\n and \" doesn't have a move matching \" in split_message[2]\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Incomplete choice: \"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Unavailable choice]\"\n ) and split_message[2].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\"[Invalid choice]\") and split_message[\n 2\n ].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You sent more choices than unfainted\"\n \" Pokémon.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can only Terastallize once per battle.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n else:\n self.logger.critical(\"Unexpected error message: %s\", split_message)\n elif split_message[1] == \"turn\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle)\n elif split_message[1] == \"teampreview\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle, from_teampreview_request=True)\n elif split_message[1] == \"bigerror\":\n self.logger.warning(\"Received 'bigerror' message: %s\", split_message)\n else:\n battle._parse_message(split_message)", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def _websocket_message(self, msg):\n if msg is None:\n self._logger.warn(\"Websocket server disconnected!\")\n if not self._disconnect_issued:\n if self._ws is not None:\n self._ws.close()\n self._ws = None\n yield self._connect(reconnecting=True)\n return\n try:\n msg = json.loads(msg)\n self._logger.debug(\"Message received: %s\", msg)\n msg_id = str(msg['id'])\n if msg_id.startswith('redis-pubsub'):\n self._process_redis_message(msg, msg_id)\n elif msg_id.startswith('redis-reconnect'):\n # only resubscribe to namespaces, the server will still\n # publish sensor value updates to redis because the client\n # did not disconnect, katportal lost its own connection\n # to redis\n yield self._resend_subscriptions()\n else:\n self._process_json_rpc_message(msg, msg_id)\n except Exception:\n self._logger.exception(\n \"Error processing websocket message! {}\".format(msg))\n if self._on_update:\n self._io_loop.add_callback(self._on_update, msg)\n else:\n self._logger.warn('Ignoring message (no on_update_callback): %s',\n msg)", "def handle_websocket():\n\n websocket = request.environ.get('wsgi.websocket')\n if not websocket:\n abort(400, 'Expected WebSocket request.')\n\n print('connection recieved')\n\n websocket_metadata = {}\n\n while not websocket.closed:\n try:\n message = websocket.receive()\n if message is None:\n continue\n\n decoded_message = json.loads(message)\n message_type = decoded_message.get('messageType', None)\n if message_type not in _ws_routes:\n print('Unrecognized message_type %s' % message_type)\n continue\n\n _ws_routes[message_type](decoded_message, websocket,\n websocket_metadata)\n except WebSocketError:\n break\n\n # If we have the API key, we can waste a little less time searching for the\n # WebSocket.\n ws_api_key = websocket_metadata.get('apiKey', '')\n if websocket in _web_ui_ws_connections.get(ws_api_key, []):\n _web_ui_ws_connections[ws_api_key].remove(websocket)\n # ... Otherwise we have to search everywhere to find and delete it.\n else:\n for api_key, websockets_for_api_key in _web_ui_ws_connections.items():\n if websocket in websockets_for_api_key:\n websockets_for_api_key.remove(websocket)\n break\n\n for api_key, websockets in list(_web_ui_ws_connections.items()):\n if not websockets:\n del _web_ui_ws_connections[api_key]", "def handle_handshake(self, message):\n message_type = messages.get_message_type(message)\n if message_type == \"OFPT_HELLO\":\n self.hello_received = True\n if message_type == \"OFPT_FEATURES_REPLY\":\n self.features_reply_received = True\n self.dpid = message.datapath_id\n if self.features_reply_received and self.hello_received:\n #print \"Switch on: %s:%s has the datapath ID: %s\" % (\n # self.address, self.port, self.dpid)\n if self.needs_migration:\n #print \"Migrating switch...\"\n self.handle_migration(message)\n else:\n self.activate_controller()\n self.controller.start_sending_to_switch()", "def handle(self):\n for request in self._each_msg():\n r_len, r_type = struct.unpack_from('> I B', request)\n\n if r_type == self.SSH2_AGENTC_REQUEST_IDENTITIES:\n response = self._merge_identities(request)\n elif r_type == self.SSH2_AGENTC_SIGN_REQUEST:\n # Extract key blob from request\n key_blob_len = struct.unpack_from('> I', request, 5)[0]\n key_blob = request[9:9 + key_blob_len]\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n\n agent = self._identity_map[hex_blob]\n\n if agent:\n if agent == self.server.alternate_agent:\n key_digest = self._key_digest(key_blob)\n LOG.info(\"identity %s used by %s: %s\", key_digest,\n self.username, self.process_info)\n\n response = agent.forward_request(request)\n else:\n response = \\\n self.server.default_agent.forward_request(request)\n else:\n response = self.server.default_agent.forward_request(request)\n\n self.request.sendall(response)", "async def handle_websocket(self, request):\n print('got sth from websocket')\n self.ws = web.WebSocketResponse()\n await self.ws.prepare(request)\n\n async for msg in self.ws:\n if msg.type == aiohttp.WSMsgType.TEXT:\n if msg.data == 'close':\n await self.ws.close()\n elif msg.data == 'ready-for-update':\n asyncio.ensure_future(self.start_update())\n elif msg.type == aiohttp.WSMsgType.ERROR or\\\n msg.type == aiohttp.WSMsgType.CLOSING or\\\n msg.type == aiohttp.WSMsgType.CLOSE or\\\n msg.type == aiohttp.WSMsgType.closed:\n await self.ws.close()\n print(\"websocket closed\")\n\n return self.ws" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tile an image to a given width and height.
def tile_image( im: Image.Image, width: int, height: int, mode: Optional[str] = "RGB", **kwargs: Any ) -> Image.Image: im_out = Image.new(mode, (width, height), **kwargs) h_tiles = ceil(width / im.width) v_tiles = ceil(height / im.height) for i in range(v_tiles): y = im.height * i for j in range(h_tiles): x = im.width * j im_out.paste(im, box=(x, y)) return im_out
[ "def tile(img):\n rows, cols, res = img.rows, img.cols, img.res\n pixels, pixsize = img.pixels, channels[img.pixtype] # assumes 8-bit channels\n width, height = cols/res, rows/res\n\n def tiled(x, y):\n h = (x + width/2.0) % width # horz, vert offset from top left\n v = (height/2.0 - y) % height \n r, c = int(v*res), int(h*res)\n offset = (cols*r + c)*pixsize\n return pixels[offset:offset+pixsize]\n return (tiled, img.pixtype)", "def create_image(self, image_location, width, height):\n tile_image = pygame.image.load(image_location).convert_alpha()\n # The tile is a square and the height is expected to be smaller than the width\n tile_width = width\n tile_height = height\n tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))\n\n # The self.image attribute expects a Surface, so we can manually create one and \"blit\" the tile image onto the surface (i.e. paint an image onto a surface).\n # We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)\n # Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)\n image = pygame.Surface((width, height))\n blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]\n image.blits(blits_data)\n\n return image", "def stitch(self):\n\n image = Image.new('RGB', (self.width * TILE_SIZE, self.height * TILE_SIZE))\n for x in range(0, self.width):\n for y in range(0, self.height):\n image.paste(self.maptiles[x][y].image, (x * TILE_SIZE, y * TILE_SIZE))\n self.image = image", "def generateImage(self, **kwargs):\n\n start_x = kwargs.get('start_x', None)\n start_y = kwargs.get('start_y', None)\n tile_width = kwargs.get('tile_width', 5)\n tile_height = kwargs.get('tile_height', 5)\n\n # Check that we have x and y tile coordinates\n if start_x == None or start_y == None:\n start_x, start_y = self.getXY()\n\n # Determine the size of the image\n width, height = 256 * tile_width, 256 * tile_height\n\n # Create a new image of the size require\n map_img = Image.new('RGB', (width, height))\n print (tile_width, tile_height)\n for x in range(0, tile_width):\n for y in range(0, tile_height):\n # url = 'https://mt0.google.com/vt/lyrs=y&hl=en&x=' + str(start_x + x) + '&y=' + str(\n # start_y + y) + '&z=' + str(self._zoom)\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(\n self._zoom)\n print (x, y, url)\n current_tile = str(x) + '-' + str(y)\n urllib.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n map_img.paste(im, (x * 256, y * 256))\n\n os.remove(current_tile)\n\n return map_img", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def test_smoke(self):\n image_data = Image.open(\"test_photo.jpg\")\n num_tiles = 10\n max_shift = 30\n new_image_data = tileify(image_data, num_tiles, max_shift)\n assert new_image_data", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def get_and_save_image(pano_id, identif, size, vertical_tiles, horizontal_tiles, out_path, cropped=False, full=True):\n \n first_url_img = f'http://cbk0.google.com/cbk?output=tile&panoid={pano_id}&zoom=5&x={0}&y={0}'\n first = Image.open(requests.get(first_url_img, stream=True).raw)\n first_vert = False\n \n print(f'Starting image {identif}')\n\n for y in range(1, vertical_tiles):\n #new_img = Image.open(f'./images/test_x0_y{y}.png')\n url_new_img = f'http://cbk0.google.com/cbk?output=tile&panoid={pano_id}&zoom=5&x={0}&y={y}'\n new_img = Image.open(requests.get(url_new_img, stream=True).raw)\n first = ImageTool.concat_vertically(first, new_img)\n first_slice = first\n\n for x in range(1, horizontal_tiles):\n #first = Image.open(f'./images/test_x{x}_y0.png')\n first_url_img = f'http://cbk0.google.com/cbk?output=tile&panoid={pano_id}&zoom=5&x={x}&y={0}'\n first = Image.open(requests.get(first_url_img, stream=True).raw)\n \n for y in range(1, vertical_tiles):\n #new_img = Image.open(f'./images/test_x{x}_y{y}.png')\n url_new_img = f'http://cbk0.google.com/cbk?output=tile&panoid={pano_id}&zoom=5&x={x}&y={y}'\n new_img = Image.open(requests.get(url_new_img, stream=True).raw)\n first = ImageTool.concat_vertically(first, new_img)\n\n new_slice = first\n first_slice = ImageTool.concat_horizontally(first_slice, new_slice)\n\n first_slice.thumbnail(size, Image.ANTIALIAS)\n name = f'{out_path}PANORAMA_{identif}'\n if full:\n first_slice.save(f'{name}.jpg')\n if cropped:\n first_slice.crop((0, 0, size[1], size[1])).save(f'{name}_p1.jpg')\n first_slice.crop((size[1], 0, size[0], size[1])).save(f'{name}_p2.jpg')\n \n return identif", "def im_tilecut(img, tile_no=None, tile_size=None):\n dx,dy = img.shape[0:2]\n if tile_no is not None and tile_size is None:\n nx,ny = entuple(tile_no)\n wx,wy = ceil(dx/nx),ceil(dy/ny)\n elif tile_no is None and tile_size is not None:\n wx,wy = entuple(tile_size)\n nx,ny = ceil(dx/wx),ceil(dy/wy)\n else:\n return None\n sx,sy = (dx-wx)//(nx-1),(dy-wy)//(ny-1) # TODO: fix a problem when nx=1 or ny=1\n for i in range(0,dx,sx):\n for j in range(0,dy,sy):\n if i+wx>=dx or j+wy>=dy: continue\n yield img[i:i+wx,j:j+wy]", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def blit_source_image(output, template, image, panel):\n img = Image.open(image)\n screen = TEMPLATES[template]['screen']\n factor = float(screen[0]) / float(img.size[0])\n dimensions = [int(i * factor) for i in img.size]\n if panel:\n dimensions[1] -= TEMPLATES[template]['panel']\n img = img.resize(dimensions, Image.ANTIALIAS)\n img = img.crop([0, 0] + [min(*i) for i in zip(dimensions, screen)])\n offset = list(TEMPLATES[template]['offset'])\n if panel:\n offset[1] += TEMPLATES[template]['panel']\n output.paste(img, tuple(offset))", "def picture_as_tile(self):\n any_valid_tile = list(self.tilebag.values())[0]\n individual_tile_dimension = any_valid_tile.picture_dimension()\n tile_dimension = individual_tile_dimension * self.max_dimension\n print(f\"we will create a tile of {tile_dimension}x{tile_dimension}\")\n\n t = Tile(42, tile_size=tile_dimension)\n\n target_x = 0\n target_y = 0\n\n # so now we need to iterate through each row of placed tiles..\n for tile_y in range(self.min_y, self.max_y + 1):\n # for this row we need to build up per-line wide tile values..\n for in_tile_y in range(individual_tile_dimension):\n # now iterate across each tile in the board\n target_x = 0\n for tile_x in range(self.min_x, self.max_x + 1):\n # and through each tile\n for in_tile_x in range(individual_tile_dimension):\n # get this value and add it to the row\n if self.board[(tile_x, tile_y)].is_picture_populated(\n in_tile_x, in_tile_y\n ):\n t.set_populated(target_x, target_y)\n # next x\n target_x += 1\n # and next y\n target_y += 1\n print(f\"Target line is now: {target_y}\")\n\n return t", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)", "def openImage(self, fn):\n\n #get the image, and make sure it's pixel dimensions are consistent\n #tilesets have 1 spacing between each tile,\n #so adding 1 should give a multiple of the tilesize+1\n tilesetImage = data.getImage(fn)\n tilesetImage.set_colorkey(self.transparency)\n \n data.check(((tilesetImage.get_width()+1)%(self.tileSize[0]+1))==0, fn)\n data.check(((tilesetImage.get_height()+1)%(self.tileSize[1]+1))==0, fn)\n dimensions = ((tilesetImage.get_width()+1)/(self.tileSize[0]+1),\n (tilesetImage.get_height()+1)/(self.tileSize[1]+1))\n\n #iterate over each tile, cutting it out and adding to our list\n #go across each row in turn to get index numbering correct\n self.tiles = []\n for y in range(0, dimensions[1]):\n for x in range(0, dimensions[0]):\n tile = tilesetImage.subsurface((x*(self.tileSize[0]+1), y*(self.tileSize[1]+1), self.tileSize[0], self.tileSize[1]))\n self.tiles.append(tile)\n\n #calculate offset\n self.tileOffset = ((globs.TILESIZE[0]-self.tileSize[0])/2,\n globs.TILESIZE[1]-self.tileSize[1])", "def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')", "def _tile_image(self, plane, image_shape):\n assert 3 <= len(image_shape) <= 4\n plane = tf.convert_to_tensor(plane)\n plane = tf.expand_dims(plane, -1)\n channels = image_shape[-1]\n image = tf.tile(plane, (1, 1, channels))\n\n if len(image_shape) == 4:\n batch_size = image_shape[0]\n image = tf.expand_dims(image, 0)\n image = tf.tile(image, (batch_size, 1, 1, 1))\n\n return image", "def tile_images(imgs, rows, cols, padding=0):\n assert imgs.dim()==4\n N,C,H,W = list(imgs.size())\n assert C in [3,1]\n assert N==rows*cols\n tiled = imgs.new().resize_(C, (H+padding)*rows-padding, (W+padding)*cols-padding).fill_(0)\n for i in xrange(N):\n x = (i % cols)*(W+padding)\n y = (i / cols)*(H+padding)\n tiled[:, y:y+H, x:x+W] = imgs[i]\n return tiled", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch an image from a given URL.
def fetch_image(url: str) -> Image.Image: r = httpx.get(url) if not r.status_code == httpx.codes.OK: raise HTTPException(r.status_code, detail=r.reason_phrase) f = BytesIO(r.content) im = handle_image_file(f) return im
[ "def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content", "def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img", "def get_image(url):\n img = urlretrieve(url)\n with open(img[0], \"rb\") as f:\n return f.read()", "def fetch_image(url: str) -> BytesIO:\n http = urllib3.PoolManager()\n return BytesIO(http.request(\"GET\", url).data)", "def download_pil_image(self, url):\r\n return Image.open(urlopen(url))", "def http_get_image(url_in):\n try:\n req = urllib.request.Request(url_in)\n req.add_header('User-Agent', USER_AGENT)\n req.add_header('Referer', HOST_NAME) # Need referer\n req = urllib.request.urlopen(req)\n image = req.read()\n req.close()\n except urllib.error.HTTPError as error:\n image = None\n print('[{}] image request {} failed...'.format(error.code, url_in))\n return image", "def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)", "def read_image(url):\n f = urllib2.urlopen(url)\n img = StringIO(f.read())\n return Image.open(img)", "def open_image_url(url):\n response = requests.get(url)\n img = Image.open(urlopen(Request(url, headers={'User-Agent': 'Mozilla/5.0'})))\n width, height = img.size\n\n return img, width, height", "def __fetch_image(self):\n \n # Note: this can raise a Value Error\n response = urllib2.urlopen(self._image_url)\n \n # Create image file in memory so PIL can use it\n self._image = StringIO.StringIO(response.read())", "def load_image(url):\n\tfd = urllib2.urlopen(url)\n\treturn StringIO.StringIO(fd.read())", "def download_image(url):\n request = urllib.request.Request(\n url, headers={'Authorization': 'Bearer %s' % ACCESS_TOKEN})\n return urllib.request.urlopen(request).read()", "def download_image(self, image_url):\n response = requests.get(image_url, verify=True)\n response.raw.decode_content = True\n image_data = io.BytesIO(response.content)\n return Image.open(image_data)", "def download_image(url):\n request = urllib.request.Request(url, headers={'Authorization': 'Bearer %s' % BOT_TOKEN})\n return urllib.request.urlopen(request).read()", "def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img", "def load_remote_image(image_url):\n response = requests.get(image_url, stream=True)\n img = Image.open(BytesIO(response.content))\n image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n return image", "def getResponseFromHttpRequest(url):\n try:\n response = HTTP.Request(url, headers = {'User-agent': USER_AGENT, 'Accept': 'image/jpeg'})\n return response\n except:\n Log.Debug('Error fetching URL: \"%s\".' % url)\n return None", "def download_img(self, url):\n filename = url.split('/')[-1]\n dest = os.path.join(self.images_dir, filename)\n # http://stackoverflow.com/questions/13137817/how-to-download-image-using-requests\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(dest, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n print 'Downloaded image from', url, 'to', dest\n return True\n else:\n # @todo should note failure in db.images\n # otherwise we'll just perpetually try to download\n # the image over & over, always failing\n print 'ERROR: HTTP Response != 200 from', url\n return False", "def set_image_from_url(self, url: str):\n response = httpx.get(url)\n if response.status_code == 200:\n file = ContentFile(response.content)\n file.name = \"url-\" + shortuuid.uuid()\n self.image = file\n self.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for command_trigger_webhook_post Launch a command via a Trigger
def test_command_trigger_webhook_post(self): pass
[ "async def test_webhook_endpoint_generates_telegram_command_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_message_command,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_command\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_message_command)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"command\"] == update_message_command[\"message\"][\"text\"]", "def test_api_onfido_webhook_post(self):\n pass", "def test_create_webhook(self):\n pass", "def test_event_post_git_command(self, setup: Any, capsys: pytest.CaptureFixture[str]) -> None:\n\n @Event.PostGitCommand.subscribe\n def hook(command: GitCommand) -> None:\n print(command.largs, file=sys.stderr)\n\n assert Event.PostGitCommand.validate()\n\n GitCommand(\"--version\").execute()\n\n assert \"--version\" in capsys.readouterr().err", "def trigger_build(self, postdata):\n pass", "def test_webhooks_create(self):\n pass", "def triggerHook(self, *args, **kwargs):\n\n return self._makeApiCall(self.funcinfo[\"triggerHook\"], *args, **kwargs)", "def test_update_webhook(self):\n pass", "def menu_send_job(slack_webhook_url, host):\n try:\n menu = Menu.objects.get(published_date=datetime.date.today())\n\n menu_url = host + 'menu/'+ menu.uuid\n\n text = f\"Hello! This is a reminder for today's Menu! <{menu_url}|Click here> for details!\"\n\n payload = {\"text\": text}\n\n response = requests.post(slack_webhook_url, data=json.dumps(payload))\n\n logger.info('Cronjob message status=%s', response.status_code)\n except Menu.DoesNotExist:\n logger.error('Cronjob message status=failed')", "async def trigger_build(self, *, branch=None, message=None):", "def test_post_to_procedures_endpoint_sends_git_arguments(client):\n spec = {\n topics.request.procedure.create: [\n ([topics.procedure.lifecycle.created], dict(result=CREATE_GIT_SUMMARY))\n ],\n }\n helper = PubSubHelper(spec)\n\n client.post(PROCEDURES_ENDPOINT, json=CREATE_GIT_JSON)\n\n # verify message sequence and topics\n assert helper.topic_list == [\n topics.request.procedure.create, # procedure creation requested\n topics.procedure.lifecycle.created, # CREATED ProcedureSummary returned\n ]\n\n # now verify arguments were extracted from JSON and passed into command\n expected_cmd = PrepareProcessCommand(\n script=CREATE_GIT_SUMMARY.script,\n init_args=CREATE_SUMMARY.script_args[0].fn_args,\n )\n assert helper.messages[0][1][\"cmd\"] == expected_cmd", "def test_get_webhook(self):\n pass", "def handle_github_webhook():\n\n verify_signature(request)\n logger.info(\"Received webhook\")\n\n if should_deploy(request):\n schedule_deploy()\n\n return \"\"", "def test_webhook():\n data = {\n \"username\": CONFIG['USERNAME'],\n \"avatar_url\": CONFIG['AVATAR_URL'],\n \"embeds\": [{\n \"title\": \"Testing Webhook\",\n \"description\": \"This is just a quick test to ensure the webhook works. Thanks again for using these monitors!\",\n \"color\": int(CONFIG['COLOUR']),\n \"footer\": {'text': 'Made by Yasser'},\n \"timestamp\": str(datetime.utcnow())\n }]\n }\n\n result = requests.post(CONFIG['WEBHOOK'], data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except requests.exceptions.HTTPError as err:\n logging.error(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def test_post_to_procedures_endpoint_sends_default_git_arguments(client):\n summary = copy.deepcopy(CREATE_GIT_SUMMARY)\n summary.script.git_args = GitArgs()\n spec = {\n topics.request.procedure.create: [\n ([topics.procedure.lifecycle.created], dict(result=summary))\n ],\n }\n helper = PubSubHelper(spec)\n\n summary_json = copy.deepcopy(CREATE_GIT_JSON)\n del summary_json[\"script\"][\"git_args\"]\n\n client.post(PROCEDURES_ENDPOINT, json=summary_json)\n\n # verify message sequence and topics\n assert helper.topic_list == [\n topics.request.procedure.create, # procedure creation requested\n topics.procedure.lifecycle.created, # CREATED ProcedureSummary returned\n ]\n\n # now verify arguments were extracted from JSON and passed into command\n expected_cmd = PrepareProcessCommand(\n script=GitScript(\n CREATE_GIT_SUMMARY.script.script_uri, # pylint: disable=no-member\n git_args=GitArgs(),\n create_env=True,\n ),\n init_args=CREATE_GIT_SUMMARY.script_args[0].fn_args,\n )\n assert helper.messages[0][1][\"cmd\"] == expected_cmd", "def test_issue_post_issue_reaction(self):\n pass", "def test_webhook_build_success(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n 'build_url': 'https://example.com/build',\n 'state': 'passed',\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 200)\n\n self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)\n self.assertEqual(self.status_update.url, 'https://example.com/build')\n self.assertEqual(self.status_update.state,\n StatusUpdate.DONE_SUCCESS)", "async def test_trigger_on_running_wait_successfully(self, mock_hook, caplog, trigger):\n mock_hook.return_value.get_cloud_build.return_value = self._mock_build_result(\n Build(id=TEST_BUILD_ID, status=Build.Status.WORKING, steps=[BuildStep(name=\"ubuntu\")])\n )\n caplog.set_level(logging.INFO)\n\n task = asyncio.create_task(trigger.run().__anext__())\n await asyncio.sleep(0.5)\n\n # TriggerEvent was not returned\n assert task.done() is False\n\n assert \"Build is still running...\" in caplog.text\n assert f\"Sleeping for {TEST_POLL_INTERVAL} seconds.\" in caplog.text\n\n # Prevents error when task is destroyed while in \"pending\" state\n asyncio.get_event_loop().stop()", "def test_valid_webhook(self, mock_send):\n send_notification(\"valid_webhook\", self.message)\n mock_send.assert_called()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if specified operation is allowed on the resource.
def _operation_allowed(self, headers_dict, operation): if 'allow' in headers_dict: if operation in headers_dict['allow']: return True return False
[ "def supports_operation(self, operation: str) -> bool:\n return operation in OPERATION_SUPPORT_BY_TYPE[self.backing_type]", "def validate_operation(self, operation: 'cirq.Operation') -> None:", "def is_valid_operation(self):\n\n if self.operation_value < 100:\n return True\n else:\n return False", "def IsOperationSpecificationAllowed(specName):\n result = False\n where = \"name='%s' and type='Operation'\" % specName\n if bool(acm.FComponent.Select(where)):\n u = acm.User()\n componentType = acm.GetDomain(\"enum(ComponentType)\")\n result = u.IsAllowed(specName, componentType.Enumeration('Operation'))\n \n return result", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def is_action_valid(self, method, action):\n if method in self.api_methods:\n if action in self.api_methods[method]:\n if getattr(self, action, None):\n return True\n\n return False", "def is_authorized(self, user, operation, resource, item):\n return True", "def permit_required(self):\n return \"permission\" in self.description.lower()", "async def contains(self, operation: Operation) -> bool:\n return operation.instance_name in self.operations", "def is_action_allowed(self, action, raise_if_disallowed=False):\n if action in self._obs_state_machine.get_triggers(\n self._obs_state_machine.state\n ):\n return True\n\n if raise_if_disallowed:\n raise StateModelError(\n f\"Action {action} is not allowed in obs state {self.obs_state.name}.\"\n )\n return False", "def _is_valid_fetch_quantity_util_operation(operation):\n if operation in FetchQuantityUtil.\\\n _supported_fetch_quantity_util_operations():\n return True\n else:\n return False", "def check_rights(self, resources, request=None):\r\n if not self.auth:\r\n return True\r\n\r\n try:\r\n if not self.auth.test_rights(resources, request=request):\r\n raise AssertionError()\r\n\r\n except AssertionError, e:\r\n raise HttpError(\r\n \"Access forbiden. {0}\".format(e),\r\n status=status.HTTP_403_FORBIDDEN\r\n )", "def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False", "def _is_should(self, operation):\n return (\n isinstance(operation, OrOperation) or\n isinstance(operation, UnknownOperation) and\n self.default_operator == ElasticsearchQueryBuilder.SHOULD\n )", "def acl_check_entity(self, entity, auth_context, op, obj):\n acl_check = (\n entity.acl_check(auth_context, op, obj)\n if entity.has_acl()\n else self.default_acl.acl_check(auth_context, op, obj))\n if not acl_check:\n raise exceptions.AclError(\n 'unauthorized change to %s' % (\n entity.name,))", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True", "def can_do(user, what, data=None):\n if has_perm(user, Perms.OMNIPOTENT):\n return True\n\n if what == Actions.WRITE_SCHEDULES:\n return has_perm(user, Perms.EDIT_SCHEDULE)\n elif what == Actions.READ_ANY_PROJECT:\n return has_perm(user, Perms.READ_ANY_PROJECT)\n elif what == Actions.CREATE_VM_IN_PROJECT:\n prj = data\n return user.projects.filter(id=prj.id).count() > 0\n elif what == Actions.USE_PROVIDER:\n prov = data\n return (not prov.is_special or\n has_perm(user, Perms.USE_SPECIAL_PROVIDER))\n elif what == Actions.USE_VM_CONFIG:\n vmconf = data\n return (vmconf.is_special == False or\n has_perm(user, Perms.USE_SPECIAL_VM_CONFIG))\n elif what == Actions.POWER_ONOFF_REBOOT_DESTROY_VM_IN_PROJECT:\n prj = data\n return user.projects.filter(id=prj.id).count() > 0\n elif what == Actions.USE_SCHEDULE:\n schedule = data\n if not schedule.is_special:\n return True\n return has_perm(user, Perms.USE_SPECIAL_SCHEDULE)\n elif what == Actions.READ_ALL_AUDITS:\n return has_perm(user, Perms.READ_ALL_AUDITS)\n elif what == Actions.READ_ALL_POWER_LOGS:\n return has_perm(user, Perms.READ_ALL_POWER_LOGS)\n elif what == Actions.OVERRIDE_VM_SCHEDULE:\n vm = data\n return user.projects.filter(id=vm.project.id).count() > 0\n elif what == Actions.CHANGE_VM_SCHEDULE:\n vm, schedule = data['vm'], data['schedule']\n if user.projects.filter(id=vm.project.id).count() == 0:\n return False\n return can_do(user, Actions.USE_SCHEDULE, schedule)\n elif what == Actions.SET_ANY_EXPIRATION:\n # only omnipotent users can do this\n return False\n else:\n aud.warning('Unknown action “{}”'.format(action))\n return False", "def oper(accessing_obj, accessed_obj, *args, **kwargs):\n if not hasattr(accessing_obj, 'get_account'):\n return False\n account = accessing_obj.get_account()\n if not args or not args[0]:\n return False\n return account.operations.check(args[0])", "def is_accessible(self, user, method, resource):\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the ExtendedError object and retruns the message. Build a list of decoded messages from the extended_error using the message registries. An ExtendedError JSON object is a response from the with its own schema. This function knows how to parse the ExtendedError object and, using any loaded message registries, render an array of plain language strings that represent the response.
def _render_extended_error_message_list(self, extended_error): messages = [] if isinstance(extended_error, dict): if ('Type' in extended_error and extended_error['Type'].startswith('ExtendedError.')): for msg in extended_error['Messages']: message_id = msg['MessageID'] x = message_id.split('.') registry = x[0] msgkey = x[len(x) - 1] # if the correct message registry is loaded, # do string resolution if (registry in self.message_registries and msgkey in self.message_registries[registry]['Messages']): rmsgs = self.message_registries[registry]['Messages'] msg_dict = rmsgs[msgkey] msg_str = message_id + ': ' + msg_dict['Message'] for argn in range(0, msg_dict['NumberOfArgs']): subst = '%' + str(argn+1) m = str(msg['MessageArgs'][argn]) msg_str = msg_str.replace(subst, m) if ('Resolution' in msg_dict and msg_dict['Resolution'] != 'None'): msg_str += ' ' + msg_dict['Resolution'] messages.append(msg_str) else: # no message registry, simply return the msg object # in string form messages.append(str(message_id)) return messages
[ "def _get_extended_error(self, extended_error):\n return self._render_extended_error_message_list(extended_error)", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def convert_error(self, data, status_code):\n errors = []\n for name, value in data.items():\n if name == 'detail':\n fmt_error = self.dict_class((\n ('detail', value),\n ('status', str(status_code)),\n ))\n errors.append(fmt_error)\n elif name == '_view_extra':\n for rname, error_dict in value.items():\n assert rname != 'meta'\n for seq, seq_errors in error_dict.items():\n if seq is None:\n # TODO: diagnose how subject feature errors are\n # getting into view_extra.\n seq = 'subject'\n for fieldname, error_list in seq_errors.items():\n path = '/linked.%s.%s.%s' % (rname, seq, fieldname)\n assert isinstance(error_list, list)\n for error in error_list:\n fmt_error = self.dict_class((\n ('detail', error),\n ('path', path),\n ('status', str(status_code)),\n ))\n errors.append(fmt_error)\n else:\n for error in value:\n fmt_error = self.dict_class((\n ('status', str(status_code)),\n ('detail', error),\n ('path', '/%s' % name),\n ))\n errors.append(fmt_error)\n assert errors, data\n return self.dict_class((('errors', errors),))", "def errors(self) -> List[str]:\n return [e.get('message')\n for e in self._error.response.json().get('errors', [])]", "def __message_decoder(self, message):\n\n message_json = json.loads(message)\n message_id = message_json['id']\n message_payload = message_json['payload']\n plugin_name = message_payload['plugin_name']\n\n return [message_id, plugin_name, message_payload]", "def _decode_error(self):\r\n error_qname = self._ns_prefix + \"error\"\r\n for child in self._element:\r\n if child.tag == error_qname:\r\n self._error = StanzaErrorElement(child)\r\n return\r\n raise BadRequestProtocolError(\"Error element missing in\"\r\n \" an error stanza\")", "def __init__(self, message, code=None, params=None):\n\n # PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.\n super(ValidationError, self).__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n # PY2 has a `message` property which is always there so we can't\n # duck-type on it. It was introduced in Python 2.5 and already\n # deprecated in Python 2.6.\n elif not hasattr(message, 'message' if six.PY3 else 'code'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ExtensibleValidationError):\n # Let's pass on code and params along with messages from dict\n messages = ExtensibleValidationError(messages, code, params)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for item in message:\n if not isinstance(item, ExtensibleValidationError):\n # Normalize plain strings to instances of ValidationError.\n item = ExtensibleValidationError(item, code, params)\n if hasattr(item, 'error_dict') and hasattr(self, 'error_list'):\n # Convert self from list to dict and prepare for dict update later on\n self.error_dict = OrderedDict()\n if self.error_list:\n self.error_dict[NON_FIELD_ERRORS] = self.error_list\n del self.error_list\n if hasattr(self, 'error_list'):\n # Extend error_list with passed in item.error_list\n self.error_list.extend(item.error_list)\n elif hasattr(item, 'error_list'):\n # Extend __all__ errors with passed in item.error_list\n self.error_dict.setdefault(NON_FIELD_ERRORS, []).extend(item.error_list)\n else:\n # Concat error dictionaries\n for field, errors in item.error_dict.items():\n self.error_dict.setdefault(field, []).extend(errors)\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def errors_to_message(self, error_list):\n errors = error_list['errors']\n if not type(error_list['errors']) == list:\n errors = [error_list['errors']]\n\n # Only return the first error in list\n for error in errors:\n error_message = 'Error type: {0}, trigger: {1}, error string: {2}, field path: {3} and reason: {4}'.format(\n error['ApiError.Type'],\n error['trigger'],\n error['errorString'],\n error['fieldPath'],\n error['reason'],\n )\n\n return error_message", "def extract_error_messages(errors):\n messages = []\n if isinstance(errors, dict):\n members = []\n for e in errors.values():\n members += e\n else:\n members = errors\n for err in members:\n msg = err.get('message', '')\n if not msg:\n continue\n src = err.get('source', '')\n if src:\n msg = 'Source: {s} - {m}'.format(s=src, m=msg)\n loc = err.get('location', '')\n if loc:\n msg = '{m} - File: {f}'.format(m=msg, f=loc)\n messages.append(msg)\n return messages", "def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def _parse_errors(self, ve, params):\n error_info = {\n \"messages\": defaultdict(dict),\n \"labels\": defaultdict(dict),\n }\n\n for pname, data in ve.messages.items():\n if pname == \"_schema\":\n error_info[\"messages\"][\"schema\"] = [\n f\"Data format error: {data}\"\n ]\n continue\n if data == [\"Unknown field.\"]:\n error_info[\"messages\"][\"schema\"] = [f\"Unknown field: {pname}\"]\n continue\n param_data = utils.ensure_value_object(params[pname])\n error_labels = []\n formatted_errors = []\n for ix, marshmessages in data.items():\n error_labels.append(\n utils.filter_labels(param_data[ix], drop=[\"value\"])\n )\n formatted_errors_ix = []\n for _, messages in marshmessages.items():\n if messages:\n if isinstance(messages, list):\n formatted_errors_ix += messages\n else:\n for _, messagelist in messages.items():\n formatted_errors_ix += messagelist\n formatted_errors.append(formatted_errors_ix)\n error_info[\"messages\"][pname] = formatted_errors\n error_info[\"labels\"][pname] = error_labels\n\n self._errors.update(dict(error_info))", "def get_aggregated_exceptions(self) -> Payload:\n return Payload(aggregated_errors=list(self._aggregated_exceptions.values()))", "def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None", "def parse_message(self):\n raise NotImplementedError", "def testExtendedErrorMessageWithTree(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False, 405, message=\"Test\", tree='{\"test\": \"value\"}')\n msg = json.loads(msg)\n self.assertEqual(len(msg), 4)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"405\")\n self.assertEqual(msg[\"message\"], \"Test\")\n self.assertTrue(isinstance(msg[\"tree\"], dict))\n tree = msg[\"tree\"]\n self.assertEqual(len(tree), 1)\n self.assertEqual(tree[\"test\"], \"value\")", "def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200", "def get_json_error(self):\n return {\"errno\": self.errno, \"error\": self.message}", "def process_sub_serializer_errors(self, serializer_error_dict, error_type):\n sub_serializer_errors = serializer_error_dict.get('errors', [])\n sub_serializer_non_field_errors = serializer_error_dict.get('non_field_errors', None)\n result = []\n for sub_error in sub_serializer_errors:\n if sub_error['field'] is None:\n sub_error['field'] = error_type\n result.append(sub_error)\n if sub_serializer_non_field_errors is not None:\n result.extend(\n self.get_non_field_error_entries(sub_serializer_non_field_errors)\n )\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the list of decoded messages from the extended_error.
def _get_extended_error(self, extended_error): return self._render_extended_error_message_list(extended_error)
[ "def _render_extended_error_message_list(self, extended_error):\n messages = []\n if isinstance(extended_error, dict):\n if ('Type' in extended_error and\n extended_error['Type'].startswith('ExtendedError.')):\n for msg in extended_error['Messages']:\n message_id = msg['MessageID']\n x = message_id.split('.')\n registry = x[0]\n msgkey = x[len(x) - 1]\n\n # if the correct message registry is loaded,\n # do string resolution\n if (registry in self.message_registries and msgkey in\n self.message_registries[registry]['Messages']):\n rmsgs = self.message_registries[registry]['Messages']\n msg_dict = rmsgs[msgkey]\n msg_str = message_id + ': ' + msg_dict['Message']\n\n for argn in range(0, msg_dict['NumberOfArgs']):\n subst = '%' + str(argn+1)\n m = str(msg['MessageArgs'][argn])\n msg_str = msg_str.replace(subst, m)\n\n if ('Resolution' in msg_dict and\n msg_dict['Resolution'] != 'None'):\n msg_str += ' ' + msg_dict['Resolution']\n\n messages.append(msg_str)\n else:\n # no message registry, simply return the msg object\n # in string form\n messages.append(str(message_id))\n\n return messages", "def errors(self) -> List[str]:\n return [e.get('message')\n for e in self._error.response.json().get('errors', [])]", "def errors(self) :\n if self.encoding :\n return self.stderr.decode(self.encoding)\n return self.stderr.decode()", "def get_error_messages(self):\n print(\"Error messages \", self.error_messages)\n return self.error_messages", "def extract_error_messages(errors):\n messages = []\n if isinstance(errors, dict):\n members = []\n for e in errors.values():\n members += e\n else:\n members = errors\n for err in members:\n msg = err.get('message', '')\n if not msg:\n continue\n src = err.get('source', '')\n if src:\n msg = 'Source: {s} - {m}'.format(s=src, m=msg)\n loc = err.get('location', '')\n if loc:\n msg = '{m} - File: {f}'.format(m=msg, f=loc)\n messages.append(msg)\n return messages", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def get_msg_list(tx: dict) -> list:\n decoded_msg_list = []\n msg_list = tx[\"events\"][0][\"sub\"][0][\"additional\"][\"execute_message\"]\n for msg in msg_list:\n decoded_msg = json.loads(base64.b64decode(msg))\n decoded_msg_list.append(decoded_msg)\n\n return decoded_msg_list", "def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None", "def getTestErrorMessages(self):\n return self.testErrorMessages", "def get_encoding_errors(self):\n return self._encoding_errors", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def messages(self):\n return self._messages", "def getErrorsFromErrorQueue(self):\n \n entries = []\n while True:\n errorCheck = self._receive(':SYST:ERR?')\n errorCheckText = errorCheck.split(',')[1]\n if errorCheckText == '\"No error\"':\n break;\n entries.append(errorCheck)\n return entries", "def __message_decoder(self, message):\n\n message_json = json.loads(message)\n message_id = message_json['id']\n message_payload = message_json['payload']\n plugin_name = message_payload['plugin_name']\n\n return [message_id, plugin_name, message_payload]", "def getErrorsList(self):\n return self.__errors", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def get_message_list(self):\n (resp_message, mail_list, octets) = self.server_connection.list()\n return mail_list", "def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the PCI devices.
def _get_pci_devices(self): system = self._get_host_details() if ('links' in system['Oem']['Hp'] and 'PCIDevices' in system['Oem']['Hp']['links']): # Get the PCI URI and Settings pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href'] status, headers, pci_device_list = self._rest_get(pci_uri) if status >= 300: msg = self._get_extended_error(pci_device_list) raise exception.IloError(msg) return pci_device_list else: msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
[ "def test_get_pci_device_list(self):\n pass", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "def get_all_devices(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetAllDevices', self.handle))", "def list_devices():\r\n return sd.query_devices()", "def GetAllDevices(self):\n\n return list(self.YieldAllDevices())", "def enumerateDevices(self):\n results = []\n \n devices = usb.core.find(find_all=True, idVendor = 0x2AB9, idProduct = 0x0001)\n for device in devices:\n results.append(str(device.serial_number))\n return results", "def list_devices(cls):\n # get all matching devices\n return usb.core.find(\n find_all=True,\n custom_match=lambda dev: (\n dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids\n ),\n )", "def get_pci_inventory(self):\n result = {}\n try:\n list_pci_info = []\n chassis_url = self._find_chassis_resource()\n result_pci = self._get_collection(chassis_url + '/PCIeDevices')\n if result_pci['ret'] == False:\n # Try to find PCIeDevices under ComputerSystem.\n system_url = self._find_system_resource()\n result = self._get_url(system_url)\n if result['ret'] == False:\n return result\n if 'PCIeDevices' not in result['entries'].keys():\n return {'ret': False, 'msg': \"Failed to find 'PCIeDevices' in ComputerSystem.\"}\n for member in result['entries']['PCIeDevices']:\n result = self._get_url(member['@odata.id'])\n if result['ret'] == False:\n return result\n data_filtered = propertyFilter(result['entries'])\n list_pci_info.append(data_filtered)\n else:\n list_pci_info = propertyFilter(result_pci['entries'])\n\n for member in list_pci_info:\n if 'PCIeFunctions' in member:\n result_pci_func = self._get_collection(member['PCIeFunctions']['@odata.id'])\n if result_pci_func['ret'] == False:\n return result_pci_func\n data_filtered = propertyFilter(result_pci_func['entries'])\n member['PCIeFunctions'] = data_filtered\n return {'ret': True, 'entries': list_pci_info}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get chassis pci devices inventory. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def get_pci_list(ns, pcis, bus=0, level=0):\n if level > 99:\n return []\n result = []\n # For deeper levels, count items on the bus\n count = 0\n if level > 0:\n for p in pcis:\n if p.BusNumber == bus:\n count += 1\n # Print PCI devices\n i = 1\n for p in pcis:\n if p.BusNumber == bus:\n if level > 0:\n if i == count:\n sign = u'└─ '\n else:\n sign = u'├─ '\n i += 1\n else:\n sign = ''\n dsc_line = ' ' + ' ' * level + sign + p.DeviceID\n if p.CreationClassName == 'LMI_PCIBridge':\n dsc_line += ' %s bridge: ' % \\\n ns.LMI_PCIBridge.BridgeTypeValues.value_name(p.BridgeType)\n else:\n dsc_line += ' %s: ' % \\\n ns.LMI_PCIDevice.ClassCodeValues.value_name(p.ClassCode)\n dsc_line += p.Name\n result += [(dsc_line, '')]\n if p.CreationClassName == 'LMI_PCIBridge' and p.SecondayBusNumber:\n result += get_pci_list(ns, pcis, p.SecondayBusNumber, level + 1)\n return result", "def get_devices(self):\n e = ctypes.POINTER(rs_error)()\n n_devices = lrs.rs_get_device_count(self.ctx, ctypes.byref(e))\n _check_error(e)\n\n lrs.rs_get_device.restype = ctypes.POINTER(rs_device)\n for idx in range(n_devices):\n dev = lrs.rs_get_device(self.ctx, idx, ctypes.byref(e))\n _check_error(e)\n\n name = pp(lrs.rs_get_device_name, dev, ctypes.byref(e))\n _check_error(e)\n\n serial = pp(lrs.rs_get_device_serial, dev, ctypes.byref(e))\n _check_error(e)\n\n version = pp(lrs.rs_get_device_firmware_version, dev, ctypes.byref(e))\n _check_error(e)\n\n is_streaming = lrs.rs_is_device_streaming(dev, ctypes.byref(e))\n _check_error(e)\n\n yield {'id': idx, 'name': name, 'serial': serial,\n 'firmware': version, 'is_streaming': is_streaming}", "def get_all_pci_device_by_node(node_id):\n return _get_dbdriver_instance().get_all_pci_device_by_node(node_id)", "def get_pci_info(ns):\n result = [('PCI Devices:', '')]\n\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n if STANDALONE:\n tf.print_host(get_hostname(ns))\n\n try:\n pci_dev = get_all_instances(ns, 'LMI_PCIDevice')\n pci_br = get_all_instances(ns, 'LMI_PCIBridge')\n except Exception:\n result += [(get_colored_string('error:', RED_COLOR),\n 'Missing PCI related classes. Is openlmi-hardware package installed on the server?')]\n tf.produce_output(result)\n return []\n\n pci = pci_dev + pci_br\n pci.sort(key=lambda x: x.DeviceID)\n\n if not pci:\n result += [(' N/A', 'No PCI Device was detected on the system.')]\n tf.produce_output(result)\n return []\n\n result += get_pci_list(ns, pci)\n\n if not STANDALONE:\n result += [EMPTY_LINE]\n\n tf.produce_output(result)\n return []", "def scan_devices(self):\n return self._get_devices()", "def devices(self):\n result = self.tk.call('snack::mixer', 'devices')\n return self.tk.splitlist(result)", "def get_device_list(self):\n dl = []\n resp = self._message(b\"host:devices-l\")\n for line in resp.splitlines():\n dl.append(_device_factory(line))\n return dl", "def list_devices(self): # pylint: disable=no-self-use\n if not CAN_ENUMERATE_DEVICES:\n raise TypeError('Server does not support DAQ device enumeration')\n return list_available_devices()", "def get_all(self):\n self._api_access.logger().info(\"Getting all devices\")\n return self._api_access.http_get(\"devices\").json()", "def get_devices():\n devices = []\n for path in hookenv.action_get('osd-devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n raise Error('{}: Not absolute path.'.format(path))\n devices.append(path)\n return devices", "def retrieve_pci_addresses(self):\n debug('Retrieve PCI addresses...')\n try:\n lshw_json = self.run_ssh('lshw -json').stdout\n except SSHError:\n fatal('Cannot connect to node:', self.ip_address)\n lshw = json.loads(lshw_json)\n pci_addresses = []\n for component in lshw[\"children\"][0][\"children\"]:\n if component[\"class\"] == \"bridge\":\n for subsystem in component[\"children\"]:\n if subsystem[\"class\"] == \"network\":\n index = int(subsystem[\"id\"].split(':')[1])\n pci_addresses.append((index, subsystem[\"businfo\"]))\n pci_addresses = [v.strip('pci@') for k, v in sorted(pci_addresses)]\n # iterate over interfaces and set pci address\n i = 0\n for interface in self.interfaces:\n self.interfaces[interface]['pci_address'] = pci_addresses[i]\n i += 1\n if i >= len(pci_addresses):\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the BIOS settings resource.
def _get_bios_settings_resource(self, data): try: bios_settings_uri = data['links']['Settings']['href'] except KeyError: msg = ('BIOS Settings resource not found.') raise exception.IloError(msg) status, headers, bios_settings = self._rest_get(bios_settings_uri) if status != 200: msg = self._get_extended_error(bios_settings) raise exception.IloError(msg) return headers, bios_settings_uri, bios_settings
[ "def get_system_bios_settings( context, bios, system_id ):\n\n if \"SettingsObject\" in bios.dict[\"@Redfish.Settings\"]:\n bios_settings = context.get( bios.dict[\"@Redfish.Settings\"][\"SettingsObject\"][\"@odata.id\"] )\n else:\n if config.__workarounds__:\n warnings.warn( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor. Attempting workarounds...\".format( system_id ) )\n settings_uris = [ \"Settings\", \"SD\" ]\n for setting_ext in settings_uris:\n bios_settings = context.get( bios.dict[\"@odata.id\"] + \"/\" + setting_ext )\n if bios_settings.status == 200:\n break\n try:\n verify_response( bios_settings )\n except:\n raise RedfishSystemBiosInvalidSettingsError( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Workarounds exhausted. Contact your vendor.\".format( system_id ) ) from None\n else:\n raise RedfishSystemBiosInvalidSettingsError( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor, or retry with the '__workarounds__' flag.\".format( system_id ) )\n\n return bios_settings", "def get_system_bios( context, system_id = None ):\n\n # Locate the system\n system = get_system( context, system_id )\n\n # Get the Bios resource\n if \"Bios\" not in system.dict:\n raise RedfishSystemBiosNotFoundError( \"System '{}' does not support representing BIOS\".format( system.dict[\"Id\"] ) )\n bios = context.get( system.dict[\"Bios\"][\"@odata.id\"] )\n current_settings = bios.dict[\"Attributes\"]\n future_settings = bios.dict[\"Attributes\"]\n\n # Get the Settings object if present\n if \"@Redfish.Settings\" in bios.dict:\n try:\n bios_settings = get_system_bios_settings( context, bios, system.dict[\"Id\"] )\n future_settings = bios_settings.dict[\"Attributes\"]\n except:\n if config.__workarounds__:\n warnings.warn( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor. Workarounds exhausted for reading the settings data and falling back on using the active attributes.\".format( system_id ) )\n else:\n raise\n\n return current_settings, future_settings", "def _get_bios_setting(self, bios_property):\n headers, bios_uri, bios_settings = self._check_bios_resource([\n bios_property])\n return bios_settings[bios_property]", "def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings", "def get_pending_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n settings = sushy_system.bios.pending_attributes\n except sushy.exceptions.SushyError as e:\n msg = (self._('The pending BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n return settings", "def settings(self):\r\n return SettingResource(self)", "def get_bios_config(module, bmc):\n if not module.params['bios_config']:\n debug(module, \"No BIOS settings requested\")\n # Return an empty config object.\n config = BIOSConfig({}, False)\n config.process({})\n return config\n\n debug(module, \"Checking BIOS settings\")\n try:\n bios_settings = bmc.list_bios_settings()\n unfinished_jobs = bmc.list_jobs(only_unfinished=True)\n except drac_exc.BaseClientException as e:\n module.fail_json(msg=\"Failed to list BIOS settings: %s\" % repr(e))\n\n settings_descs = {key: {\"current\": value.current_value,\n \"pending\": value.pending_value}\n for key, value in bios_settings.items()}\n debug(module, \"Existing BIOS settings: %s\" % repr(settings_descs))\n\n committed_job = has_committed_bios_job(unfinished_jobs)\n config = BIOSConfig(bios_settings, committed_job)\n try:\n config.validate(module.params['bios_config'])\n except UnknownSetting as e:\n module.fail_json(msg=repr(e))\n config.process(module.params['bios_config'])\n\n debug(module, \"Created BIOS config: state=%s\" % config.state)\n\n return config", "def set_system_bios( context, settings, system_id = None ):\n\n # Locate the system\n system = get_system( context, system_id )\n\n # Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings object\n if \"Bios\" not in system.dict:\n raise RedfishSystemBiosNotFoundError( \"System '{}' does not support representing BIOS\".format( system.dict[\"Id\"] ) )\n bios_uri = system.dict[\"Bios\"][\"@odata.id\"]\n bios = context.get( bios_uri )\n etag = bios.getheader( \"ETag\" )\n if \"@Redfish.Settings\" in bios.dict:\n bios_settings = get_system_bios_settings( context, bios, system.dict[\"Id\"] )\n bios_uri = bios_settings.dict[\"@odata.id\"]\n etag = bios_settings.getheader( \"ETag\" )\n\n # Update the settings\n payload = { \"Attributes\": settings }\n headers = None\n if etag is not None:\n headers = { \"If-Match\": etag }\n response = context.patch( bios_uri, body = payload, headers = headers )\n verify_response( response )\n return response", "def fabric_settings(self) -> Optional[Sequence['outputs.SettingsSectionDescriptionResponse']]:\n return pulumi.get(self, \"fabric_settings\")", "def settings(self):\n if self._settings is None:\n self._settings = ServerSettings(\"%s/settings\" % self._href, self.rsapi)\n return self._settings", "def get_one(self, setting_name):\n cdict = api.request.context.to_policy_values()\n policy.authorize('baremetal:node:bios:get', cdict, cdict)\n\n node = api_utils.get_rpc_node(self.node_ident)\n try:\n setting = objects.BIOSSetting.get(api.request.context, node.id,\n setting_name)\n except exception.BIOSSettingNotFound:\n raise exception.BIOSSettingNotFound(node=node.uuid,\n name=setting_name)\n\n return {setting_name: BIOSSetting.convert_with_links(setting,\n node.uuid)}", "def _get_profile(self):\n return {\n \"name\": \"autoscaleSettingsProfile_%s\" % self._heat_resource_name,\n \"capacity\": self._get_capacity(),\n \"rules\": self._get_rules(),\n }", "def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)", "def advanced_settings(self):\n settings = ADVANCEDSETTINGS()\n ckresult(_dll.FMOD_System_GetAdvancedSettings(self._ptr, byref(settings)))\n return settings", "def get_settings(self):\n return self.__settings", "def get_settings(self):\n return self.settings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the PATCH Operation is allowed on the resource.
def _validate_if_patch_supported(self, headers, uri): if not self._operation_allowed(headers, 'PATCH'): msg = ('PATCH Operation not supported on the resource ' '"%s"' % uri) raise exception.IloError(msg)
[ "def test_PATCH(self):\n if not self.url:\n return\n response = self.client.patch(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def test_client_can_do_patch_request(self):\n response = self.httpbin_4.test_requests_patch_method()\n self.assertEqual(response.request.method, 'PATCH')\n self.assertEqual(response.status_code, 200)", "def _check_iscsi_rest_patch_allowed(self):\n\n headers, bios_uri, bios_settings = self._check_bios_resource()\n # Check if the bios resource exists.\n\n if('links' in bios_settings and 'iScsi' in bios_settings['links']):\n iscsi_uri = bios_settings['links']['iScsi']['href']\n status, headers, settings = self._rest_get(iscsi_uri)\n\n if status != 200:\n msg = self._get_extended_error(settings)\n raise exception.IloError(msg)\n\n if not self._operation_allowed(headers, 'PATCH'):\n headers, iscsi_uri, settings = (\n self._get_iscsi_settings_resource(settings))\n self._validate_if_patch_supported(headers, iscsi_uri)\n\n return iscsi_uri\n\n else:\n msg = ('\"links/iScsi\" section in bios'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)", "def _check_patch_requirements(region_name,\n applied_patches=None,\n available_patches=None):\n\n api_token = None\n if applied_patches:\n patches_applied = patch_api.patch_is_applied(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=applied_patches\n )\n if not patches_applied:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be applied before doing \"\n \"the kubernetes upgrade: %s\" % applied_patches))\n\n if available_patches:\n patches_available = patch_api.patch_is_available(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=available_patches\n )\n if not patches_available:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be available before doing \"\n \"the kubernetes upgrade: %s\" %\n available_patches))", "def can_be_modified(self):\n return self.state in {RequestState.pending, RequestState.accepted}", "def has_update_permissions(self, obj):\n return True", "def validate_patch(patch):\n\n if not isinstance(patch, list):\n patch = [patch]\n\n for p in patch:\n path_pattern = re.compile(\"^/[a-zA-Z0-9-_]+(/[a-zA-Z0-9-_]+)*$\")\n\n if not isinstance(p, dict) or \\\n any(key for key in [\"path\", \"op\"] if key not in p):\n raise wsme.exc.ClientSideError(_(\"Invalid patch format: %s\")\n % str(p))\n\n path = p[\"path\"]\n op = p[\"op\"]\n\n if op not in [\"add\", \"replace\", \"remove\"]:\n raise wsme.exc.ClientSideError(_(\"Operation not supported: %s\")\n % op)\n\n if not path_pattern.match(path):\n raise wsme.exc.ClientSideError(_(\"Invalid path: %s\") % path)\n\n if op == \"add\":\n if path.count('/') == 1:\n raise wsme.exc.ClientSideError(_(\"Adding an additional \"\n \"attribute (%s) to the \"\n \"resource is not allowed\")\n % path)", "def can_edit(self):\n utility = self.utility\n poll = self.poll()\n return utility.allowed_to_edit(poll)", "def has_modify_permissions(self, request, obj, *args, **kwargs):\n return False", "def can_update_comments(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_update_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True", "def can_modify(self):\n return self._can_modify", "def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)", "def _operation_allowed(self, headers_dict, operation):\n\n if 'allow' in headers_dict:\n if operation in headers_dict['allow']:\n return True\n return False", "def is_catastrophic(self):\n if (self.request.method.upper() == 'PUT'\n and 'PLURAL_PUT' not in self.http_methods) \\\n or (self.request.method.upper() == 'DELETE'\n and 'PLURAL_DELETE' not in self.http_methods):\n return True\n return False", "def test_put_method_not_allowed(self):\n url = self.setup_http_not_allowed_item_test(self.user)\n\n with override_feature_checks(self.override_features):\n self.api_put(url, {}, expected_status=405)", "def test_review_list_other_method_not_allowed(self):\n client = Client()\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.put('/api/review/')\n\n self.assertEqual(response.status_code, 405)", "def can_batch_update(self):\n # type: () -> bool\n return self.batch_flags & BatchFlags.BATCH_UPDATE", "def patch(self):\n\n @ErrorHandler\n def fn_patch(_id, request_doc=None):\n \"\"\"Handles PATCH requests to the given resource.\"\"\"\n\n if request.method == \"OPTIONS\":\n return\n\n request_doc = request_doc or json.loads(request.body.getvalue())\n JsonAPIValidator.validate_patch(\n request_doc,\n _id,\n self.model._meta.name\n )\n\n self.listener.before_patch(request_doc)\n\n entry = self.model.select().where(\n self.model._meta.primary_key == _id\n ).get()\n\n if \"attributes\" in request_doc[\"data\"]:\n # each attribute that is present will be updated\n for key, val in request_doc[\"data\"][\"attributes\"].iteritems():\n setattr(entry, key, val)\n\n entry.save()\n\n if \"relationships\" in request_doc[\"data\"]:\n # patch given relationships\n self.__patch_relationships(\n _id,\n request_doc[\"data\"][\"relationships\"]\n )\n\n if self.listener.after_patch(response):\n # if the listener changed something else then return the object\n return self.__get(_id)\n\n else:\n # nothing changed, we return a 204 No Content status\n response.status = 204\n\n return fn_patch", "def _check_mutating_imethod(self):\n if self._properties(_props.ERROR, True) == _props.ERROR:\n raise RuntimeError(\"Operation failed\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves bios settings of the server.
def _get_bios_setting(self, bios_property): headers, bios_uri, bios_settings = self._check_bios_resource([ bios_property]) return bios_settings[bios_property]
[ "def get_system_bios_settings( context, bios, system_id ):\n\n if \"SettingsObject\" in bios.dict[\"@Redfish.Settings\"]:\n bios_settings = context.get( bios.dict[\"@Redfish.Settings\"][\"SettingsObject\"][\"@odata.id\"] )\n else:\n if config.__workarounds__:\n warnings.warn( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor. Attempting workarounds...\".format( system_id ) )\n settings_uris = [ \"Settings\", \"SD\" ]\n for setting_ext in settings_uris:\n bios_settings = context.get( bios.dict[\"@odata.id\"] + \"/\" + setting_ext )\n if bios_settings.status == 200:\n break\n try:\n verify_response( bios_settings )\n except:\n raise RedfishSystemBiosInvalidSettingsError( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Workarounds exhausted. Contact your vendor.\".format( system_id ) ) from None\n else:\n raise RedfishSystemBiosInvalidSettingsError( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor, or retry with the '__workarounds__' flag.\".format( system_id ) )\n\n return bios_settings", "def _get_bios_settings_resource(self, data):\n try:\n bios_settings_uri = data['links']['Settings']['href']\n except KeyError:\n msg = ('BIOS Settings resource not found.')\n raise exception.IloError(msg)\n\n status, headers, bios_settings = self._rest_get(bios_settings_uri)\n if status != 200:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n return headers, bios_settings_uri, bios_settings", "def get_system_bios( context, system_id = None ):\n\n # Locate the system\n system = get_system( context, system_id )\n\n # Get the Bios resource\n if \"Bios\" not in system.dict:\n raise RedfishSystemBiosNotFoundError( \"System '{}' does not support representing BIOS\".format( system.dict[\"Id\"] ) )\n bios = context.get( system.dict[\"Bios\"][\"@odata.id\"] )\n current_settings = bios.dict[\"Attributes\"]\n future_settings = bios.dict[\"Attributes\"]\n\n # Get the Settings object if present\n if \"@Redfish.Settings\" in bios.dict:\n try:\n bios_settings = get_system_bios_settings( context, bios, system.dict[\"Id\"] )\n future_settings = bios_settings.dict[\"Attributes\"]\n except:\n if config.__workarounds__:\n warnings.warn( \"System '{}' BIOS resource contains the settings term, but no 'SettingsObject'. Contact your vendor. Workarounds exhausted for reading the settings data and falling back on using the active attributes.\".format( system_id ) )\n else:\n raise\n\n return current_settings, future_settings", "def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes", "def get_all(self):\n cdict = api.request.context.to_policy_values()\n policy.authorize('baremetal:node:bios:get', cdict, cdict)\n\n node = api_utils.get_rpc_node(self.node_ident)\n settings = objects.BIOSSettingList.get_by_node_id(\n api.request.context, node.id)\n return BIOSSettingsCollection.collection_from_list(self.node_ident,\n settings)", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def fetch_settings(self) -> None:\n self.server_settings.fetch()", "def get_bios_config(module, bmc):\n if not module.params['bios_config']:\n debug(module, \"No BIOS settings requested\")\n # Return an empty config object.\n config = BIOSConfig({}, False)\n config.process({})\n return config\n\n debug(module, \"Checking BIOS settings\")\n try:\n bios_settings = bmc.list_bios_settings()\n unfinished_jobs = bmc.list_jobs(only_unfinished=True)\n except drac_exc.BaseClientException as e:\n module.fail_json(msg=\"Failed to list BIOS settings: %s\" % repr(e))\n\n settings_descs = {key: {\"current\": value.current_value,\n \"pending\": value.pending_value}\n for key, value in bios_settings.items()}\n debug(module, \"Existing BIOS settings: %s\" % repr(settings_descs))\n\n committed_job = has_committed_bios_job(unfinished_jobs)\n config = BIOSConfig(bios_settings, committed_job)\n try:\n config.validate(module.params['bios_config'])\n except UnknownSetting as e:\n module.fail_json(msg=repr(e))\n config.process(module.params['bios_config'])\n\n debug(module, \"Created BIOS config: state=%s\" % config.state)\n\n return config", "def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings", "def settings(self):\n if self._settings is None:\n self._settings = ServerSettings(\"%s/settings\" % self._href, self.rsapi)\n return self._settings", "def test_get_server_settings(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/api/v1/server_settings',\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_pending_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n settings = sushy_system.bios.pending_attributes\n except sushy.exceptions.SushyError as e:\n msg = (self._('The pending BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n return settings", "def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')", "def set_system_bios( context, settings, system_id = None ):\n\n # Locate the system\n system = get_system( context, system_id )\n\n # Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings object\n if \"Bios\" not in system.dict:\n raise RedfishSystemBiosNotFoundError( \"System '{}' does not support representing BIOS\".format( system.dict[\"Id\"] ) )\n bios_uri = system.dict[\"Bios\"][\"@odata.id\"]\n bios = context.get( bios_uri )\n etag = bios.getheader( \"ETag\" )\n if \"@Redfish.Settings\" in bios.dict:\n bios_settings = get_system_bios_settings( context, bios, system.dict[\"Id\"] )\n bios_uri = bios_settings.dict[\"@odata.id\"]\n etag = bios_settings.getheader( \"ETag\" )\n\n # Update the settings\n payload = { \"Attributes\": settings }\n headers = None\n if etag is not None:\n headers = { \"If-Match\": etag }\n response = context.patch( bios_uri, body = payload, headers = headers )\n verify_response( response )\n return response", "def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))", "def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings", "def get_one(self, setting_name):\n cdict = api.request.context.to_policy_values()\n policy.authorize('baremetal:node:bios:get', cdict, cdict)\n\n node = api_utils.get_rpc_node(self.node_ident)\n try:\n setting = objects.BIOSSetting.get(api.request.context, node.id,\n setting_name)\n except exception.BIOSSettingNotFound:\n raise exception.BIOSSettingNotFound(node=node.uuid,\n name=setting_name)\n\n return {setting_name: BIOSSetting.convert_with_links(setting,\n node.uuid)}", "def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def print_system_bios( current_settings, future_settings ):\n\n print( \"\" )\n print( \"BIOS Settings:\" )\n\n bios_line_format = \" {:30s} | {:30s} | {:30s}\"\n print( bios_line_format.format( \"Attribute Name\", \"Current Setting\", \"Future Setting\" ) )\n for attribute, value in sorted( current_settings.items() ):\n if attribute in future_settings:\n print( bios_line_format.format( attribute, str( current_settings[attribute] ), str( future_settings[attribute] ) ) )\n else:\n print( bios_line_format.format( attribute, str( current_settings[attribute] ), str( current_settings[attribute] ) ) )\n\n print( \"\" )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the iscsi settings resoure.
def _get_iscsi_settings_resource(self, data): try: iscsi_settings_uri = data['links']['Settings']['href'] except KeyError: msg = ('iscsi settings resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, iscsi_settings = self._rest_get(iscsi_settings_uri) if status != 200: msg = self._get_extended_error(iscsi_settings) raise exception.IloError(msg) return headers, iscsi_settings_uri, iscsi_settings
[ "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def settings(self):\n if self._settings is None:\n self._settings = ServerSettings(\"%s/settings\" % self._href, self.rsapi)\n return self._settings", "def get_settings(self):\n return self.settings", "def get_settings(self):\n return self.__settings", "def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))", "def get_network_settings():\n return _read_file(_SUSE_NETWORK_FILE)", "def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)", "def _get_conf(self):\n self.press_conf = self.sysconf['PressureRegulators']\n return self.press_conf['PressureRegulator%d' % self.id_]", "def fabric_settings(self) -> Optional[Sequence['outputs.SettingsSectionDescriptionResponse']]:\n return pulumi.get(self, \"fabric_settings\")", "def iscsi(self):\n return self._iscsi", "def get_thermoregulator_settings(self) -> dict:\n command = Command(\"get-thermoregulator-settings\")\n value = self.scheme_manager.execute([command])\n return self.parser.parse_thermoregulator_settings(value)", "def _get_bios_settings_resource(self, data):\n try:\n bios_settings_uri = data['links']['Settings']['href']\n except KeyError:\n msg = ('BIOS Settings resource not found.')\n raise exception.IloError(msg)\n\n status, headers, bios_settings = self._rest_get(bios_settings_uri)\n if status != 200:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n return headers, bios_settings_uri, bios_settings", "def get_raw(self):\n return self.settings", "def test_get_antivirus_settings(self):\n pass", "def interrupt_settings(self):\n return self._interrupt_settings", "def getResolution(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n resolution = projectSettingsDB[\"Resolution\"]\n return resolution\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None", "def getUserInterfaceSettings(self):\n data = locals().copy()\n del data[\"self\"]\n resp = client.make_request(self.url_path, \"SettingsRouter\", \"getUserInterfaceSettings\", [data])\n return create_response(resp)", "def settings(self):\n return self._settings", "def settings(self) -> BaseSettings:\n return self._context.settings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Boot resource like BootSources.
def _get_bios_boot_resource(self, data): try: boot_uri = data['links']['Boot']['href'] except KeyError: msg = ('Boot resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, boot_settings = self._rest_get(boot_uri) if status != 200: msg = self._get_extended_error(boot_settings) raise exception.IloError(msg) return boot_settings
[ "def scan_source_boot_loader_configuration():\n\n boot_loader_configuration = SourceBootLoaderConfiguration(\n entries=scan_boot_entries()\n )\n\n api.produce(boot_loader_configuration)", "def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bios, bios_uri, bios_settings = self._check_bios_resource()\n\n # Get the Boot resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n\n # Get the BootSources resource\n try:\n boot_sources = boot_settings['BootSources']\n except KeyError:\n msg = (\"BootSources resource not found.\")\n raise exception.IloError(msg)\n\n try:\n boot_order = boot_settings['PersistentBootConfigOrder']\n except KeyError:\n msg = (\"PersistentBootConfigOrder resource not found.\")\n raise exception.IloCommandNotSupportedError(msg)\n\n return boot_sources, boot_order", "def Sources():\n return _sources", "def get_resource_loader(self):\n return self.game.resource_loader", "def GetBootDisk(self) -> 'GoogleComputeDisk':\n\n for disk in self.GetValue('disks'):\n if disk['boot']:\n disk_name = disk['source'].split('/')[-1]\n return GoogleCloudCompute(self.project_id).GetDisk(disk_name=disk_name)\n raise errors.ResourceNotFoundError(\n 'Boot disk not found for instance {0:s}'.format(self.name), __name__)", "def boot_configuration(self):\n bootconfs = self.get_logical_configuration(gdef.BOOT_LOG_CONF)\n if not bootconfs:\n return bootconfs\n assert len(bootconfs) == 1 # Only one boot configuration can exist for each device instance.\n return bootconfs[0]", "def data_sources():\n\n return __sources", "def boot_chip(self):\n return self._chips[0, 0]", "def get_resources(self):\n\n # Initialise the empty dictionary\n sources = {}\n # Loop through the resources\n for resource in self.descriptor['resources']:\n # Create a resource dictionary\n source = {'location':\n # Location is url path or None (in that order)\n resource.get('url', resource.get('path', None)),\n 'encoding':\n # The encoding of the file - defaults to utf-8\n resource.get('encoding','utf-8'),\n 'fields':\n # Fields are found in schema.fields\n resource.get('schema', {}).get('fields', [])\n }\n # Add the resource to the resource dictionary collection\n sources[resource.get('id', u'')] = source\n\n # Return the resource collection\n return sources", "def bootstrap_servers(self):\n return self._bootstrap_servers", "def resource_loader(self):\n return self._loader", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def _get_source_rd(self):\n return self.__source_rd", "def lookup(self):\r\n return resources.Lookup(self)", "def get_system_boot_once(self, type='current'):\n result = {}\n try:\n if type == None:\n type = 'current'\n if type not in ['current', 'allow']:\n return {'ret': False, 'msg': \"Type '%s' is not correct.\" % type}\n system_url = self._find_system_resource()\n result = self._get_url(system_url)\n if result['ret'] == False:\n return result\n boot_once = {}\n boot_info = result['entries']['Boot']\n if type == 'current':\n for key in boot_info.keys():\n if 'BootSourceOverride' in key and '@Redfish' not in key:\n boot_once[key] = boot_info[key]\n return {'ret': True, 'entries': boot_once}\n else:\n allow_value = boot_info['BootSourceOverrideTarget@Redfish.AllowableValues']\n return {'ret': True, 'entries': allow_value}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get system's boot once info. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def source(self) -> XMLResource:\n return self.schema.source", "def get_bokeh_resources() -> TemplateResourcesData:\n template_resources = TemplateResourcesData()\n template_resources.js = CDN.js_files[0]\n template_resources.css = CDN.css_files[0]\n\n return template_resources", "def get_resources(self) -> Dict[Reference, Resources]:\n if self._allocations is None:\n raise RuntimeError(\n 'Tried to get resources but we are running without'\n ' --start-all')\n\n return self._allocations", "def getresource(self, schemacls, name):\n return self.builders[name].getresource(schemacls=schemacls)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Mappings resource.
def _get_bios_mappings_resource(self, data): try: map_uri = data['links']['Mappings']['href'] except KeyError: msg = ('Mappings resource not found.') raise exception.IloCommandNotSupportedError(msg) status, headers, map_settings = self._rest_get(map_uri) if status != 200: msg = self._get_extended_error(map_settings) raise exception.IloError(msg) return map_settings
[ "def get_mapping(self, ksf: str) -> InfoResMapping:\n irm = self.InfoResMapping(self, ksf)\n return irm", "def MappingApi(self):\n return self.__MappingApi", "def mappings(self):\n return self._mappings", "def get_mapping(self):\n if self.role:\n return self.role.get_mapping(self.mapping)\n\n return self.mapping", "def map( self ) :\n\n self.readMap( )\n\n return( self.__map )", "def Mapping(self):\n return self.__Mapping", "def map(self) -> ba.Map:\n if self._map is None:\n raise NotFoundError\n return self._map", "def MappingFiles(self):\n return self.__MappingFiles", "def readMap( self ) :\n\n if self.__map is None:\n mapFilePath = pathlib.Path(self.path)\n if not mapFilePath.is_absolute():\n mapFilePath = self.derivedPath / mapFilePath\n self.__map = Map.readXML_file(mapFilePath)\n self.__map.setAncestor(self)\n\n return self.__map", "def GetMappingFromPackage(self):\n callResult = self._Call(\"GetMappingFromPackage\", )\n\n if callResult is None:\n return None\n\n\n objId = callResult\n classInstance = globals()[self._xmlRpc.ObjectApi.GetClassNameForId(objId)]\n return classInstance(self._xmlRpc, objId)", "def mappings(self) -> Sequence[OpMapping]:\n self._finalize_pending()\n return self._mappings", "def get(self):\n maps = Map.all()\n results = [map_object.serialize() for map_object in maps]\n return results, status.HTTP_200_OK", "def request_map():\n\n rospy.loginfo(\"Requesting the map\")\n rospy.wait_for_service('dynamic_map')\n getMap = rospy.ServiceProxy('dynamic_map', GetMap)\n g = getMap().map\n\n return g", "def map(self, resources: Set[Resource], add_prefixes: bool = True) -> List[Dict]:\n return []", "def GetAlternativeMapping(self):\n callResult = self._Call(\"GetAlternativeMapping\", )\n\n if callResult is None:\n return None\n\n\n objId = callResult\n classInstance = globals()[self._xmlRpc.ObjectApi.GetClassNameForId(objId)]\n return classInstance(self._xmlRpc, objId)", "def mappings(self, group_id):\n hashmap = db_api.get_instance()\n mapping_list = []\n mappings_uuid_list = hashmap.list_mappings(group_uuid=group_id)\n for mapping_uuid in mappings_uuid_list:\n mapping_db = hashmap.get_mapping(uuid=mapping_uuid)\n mapping_list.append(mapping_models.Mapping(\n **mapping_db.export_model()))\n res = mapping_models.MappingCollection(mappings=mapping_list)\n return res", "def mapping(self, map=None):\n return self._sweeper.mapping(map=map)", "def maps(self) -> np.ndarray:\n n_maps = self.n_maps\n # force 2D array\n # healpy tried to be smart and return 1D array only if there's only 1 map\n return (\n hp.ma(hp.read_map(self.path)).reshape(1, -1)\n ) if n_maps == 1 else (\n hp.ma(hp.read_map(self.path, field=range(n_maps)))\n )", "def get_resources(self):\n\n # Initialise the empty dictionary\n sources = {}\n # Loop through the resources\n for resource in self.descriptor['resources']:\n # Create a resource dictionary\n source = {'location':\n # Location is url path or None (in that order)\n resource.get('url', resource.get('path', None)),\n 'encoding':\n # The encoding of the file - defaults to utf-8\n resource.get('encoding','utf-8'),\n 'fields':\n # Fields are found in schema.fields\n resource.get('schema', {}).get('fields', [])\n }\n # Add the resource to the resource dictionary collection\n sources[resource.get('id', u'')] = source\n\n # Return the resource collection\n return sources" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if patch is supported on iscsi.
def _check_iscsi_rest_patch_allowed(self): headers, bios_uri, bios_settings = self._check_bios_resource() # Check if the bios resource exists. if('links' in bios_settings and 'iScsi' in bios_settings['links']): iscsi_uri = bios_settings['links']['iScsi']['href'] status, headers, settings = self._rest_get(iscsi_uri) if status != 200: msg = self._get_extended_error(settings) raise exception.IloError(msg) if not self._operation_allowed(headers, 'PATCH'): headers, iscsi_uri, settings = ( self._get_iscsi_settings_resource(settings)) self._validate_if_patch_supported(headers, iscsi_uri) return iscsi_uri else: msg = ('"links/iScsi" section in bios' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
[ "def _validate_if_patch_supported(self, headers, uri):\n if not self._operation_allowed(headers, 'PATCH'):\n msg = ('PATCH Operation not supported on the resource '\n '\"%s\"' % uri)\n raise exception.IloError(msg)", "def _check_supported(kernel_info):\n try:\n # import module\n op_name = kernel_info['op_info']['name']\n impl_path = build_in_impl_path\n custom_flag = False\n if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None:\n op_impl_path = os.path.realpath(kernel_info['impl_path'])\n if os.path.isfile(op_impl_path):\n path, file_name = os.path.split(op_impl_path)\n op_name, _ = os.path.splitext(file_name)\n impl_path = path\n custom_flag = True\n if impl_path not in sys.path:\n sys.path.insert(0, impl_path)\n\n if custom_flag:\n op_module = __import__(op_name)\n else:\n op_module = __import__(\"impl.\" + op_name, globals(), locals(), [op_name], 0)\n # get function\n if not hasattr(op_module, \"check_supported\"):\n return \"\"\n op_func = getattr(op_module, \"check_supported\", None)\n\n # call function\n inputs_args = get_args(kernel_info['op_info'], 'inputs')\n outputs_args = get_args(kernel_info['op_info'], 'outputs')\n attrs_args = get_args(kernel_info['op_info'], 'attrs')\n kernel_name = kernel_info['op_info']['kernel_name']\n ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name)\n\n except Exception as e:\n raise TBEException(str(e))\n\n return ret", "def support_eip(self) -> bool:\n return pulumi.get(self, \"support_eip\")", "def is_supported_volume(self):\n\n\t\tsignature = self.volume_start_data[3 : 7]\n\t\treturn signature in [ b'NTFS', b'ReFS' ] # This is a relaxed check.", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_guest_os(self):\n self.check_guest_os()", "def test_supported(self):\n self.logTestName()\n\n for dev in self.dev.values():\n ops = dev.operations\n exps = dev.expectations\n self.assertTrue(len(ops) > 0)\n self.assertTrue(len(exps) > 0)\n\n for op in ops.union(exps):\n self.assertTrue(dev.supported(op))", "def is_ida_version_supported():\n major, minor = map(int, idaapi.get_kernel_version().split(\".\"))\n if major >= 7:\n return True\n print(\"GhIDA:: [!] IDA Pro 7.xx supported only\")\n return False", "def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True", "def is_ctu_capable():\n\n context = package_context.get_context()\n ctu_func_map_cmd = context.ctu_func_map_cmd\n try:\n version = subprocess.check_output([ctu_func_map_cmd, '-version'])\n except (subprocess.CalledProcessError, OSError):\n version = 'ERROR'\n return version != 'ERROR'", "def test_patch_hyperflex_capability_info(self):\n pass", "def testCheckAvailable(self):\n img = self.img\n img.inspect()\n with converter.RootMounted(img.converter._h,\n '/dev/VolGroup00/LogVol00'):\n c = img.converter\n installer = redhat.LocalInstaller(\n c._h, '/dev/VolGroup00/LogVol00',\n db.DB(['{}/conf/guestconv.db'.format(env.topdir)]),\n log.get_logger_object(test_helper.logger)\n )\n\n kernel = redhat.Package('kernel',\n version='2.6.9', release='89.EL',\n arch='i686')\n self.assertTrue(installer.check_available([kernel]))", "def supported():\n return os.path.isfile(OPENCOR)", "def should_upgrade_firmware(request):\n yield request.config.getoption(\"--force-upgrade\")", "def check(self):\r\n return os.path.isfile('/etc/fedora-release')", "def Supported(self) -> bool:", "def isSupported(fs):\n return fs in ProcessingInfo.getFileSystems()", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True", "def _check_patch_requirements(region_name,\n applied_patches=None,\n available_patches=None):\n\n api_token = None\n if applied_patches:\n patches_applied = patch_api.patch_is_applied(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=applied_patches\n )\n if not patches_applied:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be applied before doing \"\n \"the kubernetes upgrade: %s\" % applied_patches))\n\n if available_patches:\n patches_available = patch_api.patch_is_available(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=available_patches\n )\n if not patches_available:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be available before doing \"\n \"the kubernetes upgrade: %s\" %\n available_patches))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change secure boot settings on the server.
def _change_secure_boot_settings(self, property, value): system = self._get_host_details() # find the BIOS URI if ('links' not in system['Oem']['Hp'] or 'SecureBoot' not in system['Oem']['Hp']['links']): msg = (' "SecureBoot" resource or feature is not ' 'supported on this system') raise exception.IloCommandNotSupportedError(msg) secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href'] # Change the property required new_secure_boot_settings = {} new_secure_boot_settings[property] = value # perform the patch status, headers, response = self._rest_patch( secure_boot_uri, None, new_secure_boot_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg) # Change the bios setting as a workaround to enable secure boot # Can be removed when fixed for Gen9 snap2 val = self._get_bios_setting('CustomPostMessage') val = val.rstrip() if val.endswith(" ") else val+" " self._change_bios_setting({'CustomPostMessage': val})
[ "def set_secure_boot_mode(self, secure_boot_enable):\n sushy_system = self._get_sushy_system()\n try:\n sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)\n except exception.InvalidInputError as e:\n msg = (self._('Invalid input. Error %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to set secure '\n 'boot settings on the server. Error: %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)", "def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)", "def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_secure_boot\")", "def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None", "def secure_boot_level(self, secure_boot_level):\n allowed_values = [\"NO_SECURITY\", \"MEDIUM_SECURITY\", \"FULL_SECURITY\", \"NOT_SUPPORTED\", \"UNKNOWN\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and secure_boot_level not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `secure_boot_level` ({0}), must be one of {1}\" # noqa: E501\n .format(secure_boot_level, allowed_values)\n )\n\n self._secure_boot_level = secure_boot_level", "def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled", "def secure_mode(self, secure_mode):\n\n self._secure_mode = secure_mode", "def start_salt():\n with fabric_settings(warn_only=True):\n if env.host == env.master_server.public_ip:\n sudo(\"systemctl start salt-master\")\n time.sleep(3)\n sudo(\"systemctl start salt-minion\")", "def node_change_secure_boot(task, secure_boot_target):\n try:\n secure_boot_current = task.driver.management.get_secure_boot_state(\n task)\n except Exception as exc:\n secure_boot_current = None\n LOG.warning('Unexpected exception when trying to detect secure_boot '\n 'state while changing secure_boot for node '\n '%(node)s. %(class)s: %(exc)s',\n {'node': task.node.uuid,\n 'class': type(exc).__name__, 'exc': exc},\n exc_info=not isinstance(exc, exception.IronicException))\n\n if (secure_boot_current is not None\n and secure_boot_target == secure_boot_current):\n LOG.info(\"Target secure_boot state '%(target)s', and current \"\n \"secure_boot state '%(current)s' are identical. \"\n \"No change being made for node %(node)s\",\n {'target': secure_boot_target,\n 'current': secure_boot_current,\n 'node': task.node.uuid})\n return\n try:\n task.driver.management.set_secure_boot_state(task, secure_boot_target)\n except Exception as exc:\n LOG.error('Unexpected exception when trying to change secure_boot '\n 'to %(target)s for node %(node)s. %(class)s: %(exc)s',\n {'node': task.node.uuid, 'target': secure_boot_target,\n 'class': type(exc).__name__, 'exc': exc},\n exc_info=not isinstance(exc, exception.IronicException))\n task.node.last_error = (\n \"Failed to change secure_boot state to '%(target)s': %(err)s\" % {\n 'target': secure_boot_target, 'err': exc})\n task.node.save()\n else:\n LOG.info(\"Changed secure_boot state to %(state)s for node %(node)s\",\n {'state': secure_boot_target, 'node': task.node.uuid})\n task.node.secure_boot = secure_boot_target\n task.node.save()", "def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)", "def setprivileged(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)", "async def _hardcore_setheist(self, ctx):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n\r\n if config[\"Hardcore\"]:\r\n config[\"Hardcore\"] = False\r\n msg = \"Hardcore mode now OFF.\"\r\n else:\r\n config[\"Hardcore\"] = True\r\n msg = \"Hardcore mode now ON! **Warning** death will result in credit **and chip wipe**.\"\r\n await self.thief.config.guild(guild).Config.set(config)\r\n await ctx.send(msg)", "def test_update_bios_boot_mode(self):\n pass", "def bootconfig(self, value): # pylint: disable-msg=E0102,E0202\n del self.bootconfig\n if value[\"kernel\"] != self.bootconfig[\"kernel\"]:\n self._logger.info(\"Setting kernel partition to mtd%s\"\n % value[\"kernel\"])\n self.conn.cmd(\"bootconfig set-kernel mtd%d\" % value[\"kernel\"])\n\n if value[\"rootfs\"] != self.bootconfig[\"rootfs\"]:\n self._logger.info(\"Setting rootfs partition to mtd%s\"\n % value[\"rootfs\"])\n self.conn.cmd(\"bootconfig set-rootfs mtd%d\" % value[\"rootfs\"])\n del self.bootconfig", "def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']", "def secure(self, secure):\n\n self._secure = secure", "def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled", "def boot(self, boot):\n\n self._boot = boot" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the system is in uefi boot mode.
def _is_boot_mode_uefi(self): boot_mode = self.get_current_boot_mode() if boot_mode == 'UEFI': return True else: return False
[ "def is_bootable(self):\n return self.bootable_flag == 0x80", "def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode", "def test_update_bios_boot_mode(self):\n pass", "def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def test_patch_bios_boot_mode(self):\n pass", "def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'", "def get_bootable(self) -> bool:\n try:\n return self.session.xenapi.VBD.get_bootable(self.vbd)\n except Exception as e:\n print(\"VBD.get_bootable Exception\", e)\n return False", "def boot_lun(self) -> bool:\n return pulumi.get(self, \"boot_lun\")", "def non_root_available(self):\n return self._adb_available and self._dev_emu", "def system_valid(self):\n return self.udev.devices_exist", "def test_get_bios_boot_mode_by_moid(self):\n pass", "def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False", "def sstbf_enabled():\n return common.SSTBF_CAP in SYSTEM_CAPS", "def _is_device_available():\n result = get_serial_no()\n if result[1].strip() == \"unknown\":\n return False\n else:\n return True", "def test_get_bios_boot_mode_list(self):\n pass", "def needs_reboot():\n # Initialize the PyCom system\n with salt.utils.winapi.Com():\n # Create an AutoUpdate object\n try:\n obj_sys = win32com.client.Dispatch(\"Microsoft.Update.SystemInfo\")\n except pywintypes.com_error as exc:\n _, msg, _, _ = exc.args\n log.debug(\"Failed to create SystemInfo object: %s\", msg)\n return False\n return salt.utils.data.is_true(obj_sys.RebootRequired)", "def safe_boot_disabled(self):\n return self._safe_boot_disabled", "def is_boot_code_present(self):\n\n\t\treturn struct.unpack('<H', self.boot_sector_data[0 : 2])[0] != 0 and struct.unpack('<H', self.boot_sector_data[510 : 512])[0] == 0xAA55", "def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of secure boot.
def get_secure_boot_mode(self): system = self._get_host_details() if ('links' not in system['Oem']['Hp'] or 'SecureBoot' not in system['Oem']['Hp']['links']): msg = ('"SecureBoot" resource or feature is not supported' ' on this system') raise exception.IloCommandNotSupportedError(msg) secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href'] # get the Secure Boot object status, headers, secure_boot_settings = self._rest_get(secure_boot_uri) if status >= 300: msg = self._get_extended_error(secure_boot_settings) raise exception.IloError(msg) return secure_boot_settings['SecureBootCurrentState']
[ "def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled", "def current_secure_boot(self):\n return self._get_val_int(_BP_CURR_SECURE_BOOT, default=0)", "def get_secure_boot_state(self, task):\n return irmc_common.get_secure_boot_mode(task.node)", "def pending_secure_boot(self):\n return self._get_val_int(_BP_PENDING_SECURE_BOOT, default=0)", "def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_secure_boot\")", "def get_one_time_boot(self):\n data = self._execute_command(\n 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read')\n return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE']", "def boot_progress(self):\n self._get_data()\n if self._boot_progress:\n return self._boot_progress\n return \"booted\" if self.power else \"powered off\"", "def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)", "def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault", "def get_system_boot_once(self, type='current'):\n result = {}\n try:\n if type == None:\n type = 'current'\n if type not in ['current', 'allow']:\n return {'ret': False, 'msg': \"Type '%s' is not correct.\" % type}\n system_url = self._find_system_resource()\n result = self._get_url(system_url)\n if result['ret'] == False:\n return result\n boot_once = {}\n boot_info = result['entries']['Boot']\n if type == 'current':\n for key in boot_info.keys():\n if 'BootSourceOverride' in key and '@Redfish' not in key:\n boot_once[key] = boot_info[key]\n return {'ret': True, 'entries': boot_once}\n else:\n allow_value = boot_info['BootSourceOverrideTarget@Redfish.AllowableValues']\n return {'ret': True, 'entries': allow_value}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get system's boot once info. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def get_system_status(self):\n return self._request_withdraw_api('get', 'systemStatus.html')", "def wait_boot(self) -> int:\n return self._data[ATTR_WAIT_BOOT]", "def safe_boot_disabled(self):\n return self._safe_boot_disabled", "def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info", "def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)", "def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()", "def _service_state(self):\n cmd = 'shell dumpsys telephony.registry'\n res = self.adb.cmd(*cmd.split()).communicate()[0]\n if 'mServiceState=0' in res:\n return 'InService'\n if 'mServiceState=1' in res:\n return 'OutOfService'\n if 'mServiceState=2' in res:\n return 'EmergencyOnly'\n if 'mServiceState=3' in res:\n return 'NetworkOff'\n return None", "def status(self):\n return self.microblaze.state", "def read_Load_Shave_Status(self):\n bitstream = self._port.read_holding_registers(0x01B2, 1) # 0x017E Low Battery Cut Out Delay uint16 r/w\n result = unpack('<H', pack('<H', bitstream[0]))[0]\n if result == 0:\n return str('Disable')\n if result == 1:\n return str('Enable')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset secure boot keys to manufacturing defaults.
def reset_secure_boot_keys(self): if self._is_boot_mode_uefi(): self._change_secure_boot_settings('ResetToDefaultKeys', True) else: msg = ('System is not in UEFI boot mode. "SecureBoot" related ' 'resources cannot be changed.') raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def reset(ctx, force):\n force or click.confirm(\n \"WARNING! This will delete all stored PIV data and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n click.echo(\"Resetting PIV data...\")\n ctx.obj[\"session\"].reset()\n\n click.echo(\"Success! All PIV data have been cleared from the YubiKey.\")\n click.echo(\"Your YubiKey now has the default PIN, PUK and Management Key:\")\n click.echo(\"\\tPIN:\\t123456\")\n click.echo(\"\\tPUK:\\t12345678\")\n click.echo(\"\\tManagement Key:\\t010203040506070801020304050607080102030405060708\")", "def set_old_defaults(self):\n if not self.public_key_file and not self.secret_key_file:\n self.public_key_file = os.path.join(self.directory_effective, 'pubring.gpg')\n self.secret_key_file = os.path.join(self.directory_effective, 'secring.gpg')", "def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")", "def reset_from_flash(self):\n self._enable_boot0(False)\n self._reset()", "def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False", "def reset_secret_key(self):\n self.secret_key = utils.generate_uuid(3)\n self.save()", "def reboot_system():\n reboot()", "def restoreKbStgs(old_settings): #restore KeyBoard Settings\n\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)", "def stop_crypto(self):\n self.clear_bitmask(0x08, 0x08)\n self.authed = False", "def reboot_fpga(self):\n log.info(\"Booting FPGA from SPI prom\")\n self.set(\"FPGA_CTRL\", \"boot_fpga\", 1);", "def reset(ctx, force):\n\n conn = ctx.obj[\"conn\"]\n\n if isinstance(conn, CtapPcscDevice): # NFC\n readers = list_ccid(conn._name)\n if not readers or readers[0].reader.name != conn._name:\n raise CliFail(\"Unable to isolate NFC reader.\")\n dev = readers[0]\n logger.debug(f\"use: {dev}\")\n is_fips = False\n\n def prompt_re_insert():\n click.echo(\n \"Remove and re-place your YubiKey on the NFC reader to perform the \"\n \"reset...\"\n )\n\n removed = False\n while True:\n sleep(0.5)\n try:\n with dev.open_connection(FidoConnection):\n if removed:\n sleep(1.0) # Wait for the device to settle\n break\n except CardConnectionException:\n pass # Expected, ignore\n except NoCardException:\n removed = True\n return dev.open_connection(FidoConnection)\n\n else: # USB\n n_keys = len(list_ctap_devices())\n if n_keys > 1:\n raise CliFail(\"Only one YubiKey can be connected to perform a reset.\")\n is_fips = is_yk4_fips(ctx.obj[\"info\"])\n\n ctap2 = ctx.obj.get(\"ctap2\")\n if not is_fips and not ctap2:\n raise CliFail(\"This YubiKey does not support FIDO reset.\")\n\n def prompt_re_insert():\n click.echo(\"Remove and re-insert your YubiKey to perform the reset...\")\n\n removed = False\n while True:\n sleep(0.5)\n keys = list_ctap_devices()\n if not keys:\n removed = True\n if removed and len(keys) == 1:\n return keys[0].open_connection(FidoConnection)\n\n if not force:\n click.confirm(\n \"WARNING! This will delete all FIDO credentials, including FIDO U2F \"\n \"credentials, and restore factory settings. Proceed?\",\n err=True,\n abort=True,\n )\n if is_fips:\n destroy_input = click_prompt(\n \"WARNING! This is a YubiKey FIPS device. This command will also \"\n \"overwrite the U2F attestation key; this action cannot be undone and \"\n \"this YubiKey will no longer be a FIPS compliant device.\\n\"\n 'To proceed, please enter the text \"OVERWRITE\"',\n default=\"\",\n show_default=False,\n )\n if destroy_input != \"OVERWRITE\":\n raise CliFail(\"Reset aborted by user.\")\n\n conn = prompt_re_insert()\n\n try:\n with prompt_timeout():\n if is_fips:\n fips_reset(conn)\n else:\n Ctap2(conn).reset()\n logger.info(\"FIDO application data reset\")\n except CtapError as e:\n if e.code == CtapError.ERR.ACTION_TIMEOUT:\n raise CliFail(\n \"Reset failed. You need to touch your YubiKey to confirm the reset.\"\n )\n elif e.code in (CtapError.ERR.NOT_ALLOWED, CtapError.ERR.PIN_AUTH_BLOCKED):\n raise CliFail(\n \"Reset failed. Reset must be triggered within 5 seconds after the \"\n \"YubiKey is inserted.\"\n )\n else:\n raise CliFail(f\"Reset failed: {e.code.name}\")\n except ApduError as e: # From fips_reset\n if e.code == SW.COMMAND_NOT_ALLOWED:\n raise CliFail(\n \"Reset failed. Reset must be triggered within 5 seconds after the \"\n \"YubiKey is inserted.\"\n )\n else:\n raise CliFail(\"Reset failed.\")\n except Exception:\n raise CliFail(\"Reset failed.\")", "def reset(self):\n\t\tfor i in range(0, len(self.reg_numbers)):\n\t\t\tself.regs[self.reg_numbers[i]] = BitArray(uint=self.reg_defaults[i], length=8)", "def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)", "def set_secure_boot_mode(self, secure_boot_enable):\n sushy_system = self._get_sushy_system()\n try:\n sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)\n except exception.InvalidInputError as e:\n msg = (self._('Invalid input. Error %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to set secure '\n 'boot settings on the server. Error: %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()", "def system_reset(self):\n self.send([SYSTEM_RESET])", "def reboot(self):\n self._control( self._get_enums(3))", "def hotkeys_resetAll():\n _set = validate_hotkeySet(False)\n log.warning(\"All hotkeys on '{0}' set reset to maya defaults\".format(_set))\n mc.hotkey(fs = True )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform requested power operation.
def _perform_power_op(self, oper): power_settings = {"Action": "Reset", "ResetType": oper} systems_uri = "/rest/v1/Systems/1" status, headers, response = self._rest_post(systems_uri, None, power_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def power_on(self):\n print(\"Cpu.power_on()\")", "def get_setPower(self):\n self.read(\":POW?\")", "def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self", "def calculate_power(self):\n # Convert Command Speed, Current Speed, and Setpoint Speed into m/s\n self.convert_command_speed = self.command_speed * Converters.KmHr_to_mps\n self.convert_current_speed = self.current_speed * Converters.MPH_to_mps\n self.convert_setpoint_speed = self.setpoint_speed * Converters.MPH_to_mps\n\n # Find Verror depending on mode\n velocity_error = 0\n if self.mode == 0: # Automatic Mode\n velocity_error = self.convert_command_speed - self.convert_current_speed\n else: # Manual Mode\n velocity_error = self.convert_setpoint_speed - self.convert_current_speed\n\n # Set ek as the kth sample of velocity error\n self.ek = velocity_error\n\n # Determine uk as shown in slide 65 of lecture 2\n if self.power_command < MAX_POWER:\n # Find uk\n self.uk = self.uk1 + (T/2) * (self.ek + self.ek1)\n else:\n self.uk = self.uk1\n\n # Set previous power command\n self.previous_power_command = self.power_command\n\n # Find power command\n if self.service_brake == True or self.emergency_brake == True:\n self.power_command = 0\n self.uk = 0\n self.ek = 0\n else:\n power1 = (self.kp * self.ek) + (self.ki * self.uk)\n power2 = (self.kp * self.ek) + (self.ki * self.uk)\n power3 = (self.kp * self.ek) + (self.ki * self.uk)\n # If all three power commands are different, turn the emergency brake on\n if (power1 != power2) or (power1 != power3) or (power2 != power3):\n self.emergency_brake == True\n else:\n self.power_command = (self.kp * self.ek) + (self.ki * self.uk)\n # Cut off power at appropriate limits\n if(self.power_command > 120000):\n self.power_command = 120000\n elif(self.power_command < -120000):\n self.power_command = -120000\n\n # Set past values of uk and ek\n self.uk1 = self.uk\n self.ek1 = self.ek", "def Incrpower(self, increment):\n self.power += increment", "def power(num, P):\n return num**P", "def __pow__(self,*args):\r\n pass", "def human_test_get_power(self):\n print(\"testing the antenna power measure!\")\n result = self.communicator.get_power()\n print(result)\n time.sleep(1)", "def get_power(self):\r\n return self._api.get_power()", "def power(self, host, action):\n response = self.put(self.foreman_url(\"hosts/%s/power\" % host), data=json.dumps({\"power_action\": action}))\n print(response.text)", "def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)", "def power_on(self):\n self.send_command(0x11, 0x1)", "def get_power(self):\r\n return self.p", "def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()", "def pow(self, exponent: float) -> PhysicalQuantity:\n return self.__pow__(exponent)", "def vm_power(self, name, action):\n if action not in [\"start\", \"stop\"]:\n raise OpenStackConnectorIntegrationTestSuiteException(f\"The requested action {action} is incorrect. Must be either start or stop\")\n \n logging.info(f\"Received the power change request ({action}) for the virtual machine {name}\")\n connection = self._openstack_connect()\n if action == \"start\":\n connection.vm_power(name, \"on\")\n else:\n connection.vm_power(name, \"off\")\n # Closing connection and deleting the openstacksdk instance to avoid timeout errors\n connection.close_connection()\n del connection\n logging.info(f\"Power state of the virtual machine {name} was successfully changed\")\n\n return True", "def power(x, y):\n\n return x**y", "def power(num, exponent):\n return num ** exponent", "def _ipmi_poweron(self):\n import os\n self.class_logger.info(\"Performing PowerON via IPMI\")\n\n return os.system(\"ipmitool -H %s -U %s -P %s raw %s\" % (self.ipmi[\"ipmi_host\"],\n self.ipmi[\"ipmi_user\"],\n self.ipmi[\"ipmi_pass\"],\n self.ipmi[\"ipmi_on_cmd\"].format(slot_id=self.ipmi[\"ipmi_slot\"])))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulates a physical press of the server power button.
def press_pwr_btn(self): self._press_pwr_btn()
[ "def press_power_button():\r\n Android.send_adb_command(command='adb shell input keyevent 26')\r\n logging.info('ADB command to press power button has been sent')", "def press_pwr_btn(self):\n data = self._execute_command('PRESS_PWR_BTN', 'SERVER_INFO', 'write')\n return data", "def power_on(self):\n self.send_command(0x11, 0x1)", "def double_click_power(self):\n get_power_event_cmd = (\"getevent -pl 2>&1 | sed -n \"\n \"'/^add/{h}/KEY_POWER/{x;s/[^/]*//p}'\")\n input_event = self.adb.exec_adb_cmd(\n \"shell '{cmd}'\".format(cmd=get_power_event_cmd)).communicate()[0]\n\n self.android_device_driver.adb.exec_adb_cmd(\"shell '{cmd}'\".format(\n cmd=DOUBLE_CLICK_POWER_EVENT_TEMPLATE.format(input_event=input_event)))", "def hold_pwr_btn(self):\n dic = {'TOGGLE': 'NO'}\n data = self._execute_command(\n 'HOLD_PWR_BTN', 'SERVER_INFO', 'write', dic)\n return data", "def press(self, key: list, t):\n if not self.ser.alive:\n return\n k = '00'\n for v in key:\n k = hex(int(v, 16) ^ int(k, 16))\n if len(k) == 3:\n k = k.replace('0x', '0x0')\n if \"-\" in t:\n val = t.split(\"-\")\n delay = round(random.uniform(float(val[0]), float(val[1])), 4)\n else:\n delay = float(t)\n k = k.replace('0x', '')\n # close relay\n self.ser.write(k.encode('utf-8'), isHex=True)\n # How long do you need to press\n self.log.info('button press time={}'.format(delay))\n time.sleep(delay)\n # release relay\n self.ser.write('00'.encode('utf-8'), isHex=True)", "def press(self):\n self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id,\n self.component_number, Button._ACTION_PRESS)", "def power_on(self):\n print(\"Cpu.power_on()\")", "def ctl_power_on(self):\n # start bluetoothctl, then input 'power on'\n exp = os.path.join(os.path.dirname(__file__), \"files/power_on.exp\")\n target_ip = self.target.ip\n status, output = shell_cmd_timeout('expect %s %s' % (exp, target_ip), timeout=200)\n if type(output) is bytes:\n output = output.decode(\"ascii\")\n assert status == 2, \"power on command fails: %s\" % output", "def power(self, host, action):\n response = self.put(self.foreman_url(\"hosts/%s/power\" % host), data=json.dumps({\"power_action\": action}))\n print(response.text)", "def power_server(self, id, name, action):\n\n return None", "def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()", "def power_cycle(self):\n self.power_off()\n time.sleep(self.powercycle_timeout)\n self.power_on()", "def press(state, button): \n \n\n # Press the button\n state[button] = True\n send_state(state)\n\n # Wait\n time.sleep(0.5)\n\n # Release the button\n state[button] = False\n send_state(state)", "def press(self, button, wait=0.0, port=0):\n self._check_button_args(\"press\", button, port, wait=wait)\n\n log_message = \"pressing button {} on port {} and waiting {}s - begin\".format(\n button, port, wait)\n self.add_log_note(log_message)\n self._button_list[port].press(button, wait)\n log_message = \"pressing button {} on port {} and waiting {}s - end\".format(\n button, port, wait)\n self.add_log_note(log_message)", "def test_api_ucs_power(self):\n # first power off all servers\n self.set_all_server_power_state(\"off\")\n # verify power state is down\n self.check_all_server_power_state(\"down\")\n # now power on the servers\n self.set_all_server_power_state(\"on\")\n # verify power state is up\n self.check_all_server_power_state(\"up\")", "def _windows_power_control(self):\n\n os_power_command = 'shutdown /r /t 3' if self._power_event_type == 'restart' \\\n else 'shutdown /h /t 3'\n\n exit_code, out = self._staf_start_proc(os_power_command,\n self._sut.bespoke_root,\n self._command_timeout,\n location = self._sut.network_address)\n\n if exit_code != 0:\n raise CoreError('Power control event \"{0}\" failed: {1}'.format(self._name, out))", "def tap(self):\n self.press()\n self.release()", "async def sendKeyPress(self, key):\n key = str(key)\n await self.director.sendPostRequest(\n \"/api/v1/items/{}/commands\".format(self.item_id),\n \"KEY_PRESS\",\n {\"KeyName\": key},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request the http boot url from system in uefi boot mode.
def get_http_boot_url(self): if(self._is_boot_mode_uefi() is True): return self._get_bios_setting('UefiShellStartupUrl') else: msg = 'get_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def set_http_boot_url(self, url):\n if(self._is_boot_mode_uefi() is True):\n self._change_bios_setting({'UefiShellStartupUrl': url})\n else:\n msg = 'set_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)", "def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri", "def set_http_boot_uri(self, url):\n try:\n sushy_system = self._get_sushy_system()\n sushy_system.http_boot_uri.set_http_boot_uri(url)\n except sushy.exceptions.SushyError as e:\n msg = (self._('Unable to set HTTP Boot URI. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'", "def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings", "def test_boot_from_network(self):\n testflow.step(\"Run once vm - boot from network\")\n assert helper.run_once_with_boot_dev(\n config.ENUMS['boot_sequence_network']\n )", "def elim_bootstrap_fetch(tree):\n\n boot = tree.find('.//target[@name=\"boot\"]')\n for child in boot.findall(\"./exec\"):\n boot.remove(child)\n echo = boot.find(\"./echo\")\n echo.attrib[\"message\"] = \"Not fetching bootstrap libraries in the Fedora build\"", "def fetch_firmware(self, args):\n print(\"[+] Please browse to http://builder1204.mios.com/mt7620a_betafirmware/ for up-to-date firmware\")", "def flashUboot(self):\n\t\tif self.settings.getKeyValue('flash.uboot?') == 'y':\n\t\t\tloadAddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\t\tcmd = self.settings.getKeyValue('u-boot.load.command')\n\t\t\tcmd = cmd.replace('<u-boot>', 'u-boot.bin.12x.2430')\n\t\t\tself.socket.send(cmd, 5)\n\t\t\t#self.socket.send('protect off 1:0-1\\r', 2)\n\t\t\t#self.socket.send('erase 1:0-1\\r', 2)\n\t\t\t#self.socket.send('cp.b 80000000 %s 2ffff\\r' % loadAddress)\n\t\t\treturn None\n\t\t\t#cmd = cmd.replace('<u-bootloadadress>', self.u-bootloadaddress)", "def _get_boot_path():\n boot_path = _get_default_kernel()\n return boot_path[: boot_path.rindex(\"/\")] or \"/\"", "def boot(self):\n\t\tmesslen, received = self.socket.send('bootm\\r', 25)\t\t\n\t\treturn None", "def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault", "def bootloader() -> NoReturn:", "def test_get_bios_boot_mode_by_moid(self):\n pass", "def boot(self):\n pass", "def actionFromweb(self):\n print(\"Grabbing %x firmware.\" % self.dev_id)\n print(\"%s\" % firmware[self.dev_id])\n fn=\"/tmp/.goodfet.hex\"\n os.system(\"curl %s >%s\" % (firmware[self.dev_id],fn))\n\n fw=Memory(fn)\n #fw.loadIhex(open(fn,\"rb\"))\n\n sys.stderr.write(\"Program ...\\n\")\n sys.stderr.flush()\n self.programData(fw, self.ACTION_PROGRAM | self.ACTION_VERIFY)\n sys.stderr.write(\"%i bytes programmed.\\n\" % self.byteCtr)\n sys.stderr.flush()", "def bootloader():\n pass", "def get_start_on_boot(host,guest):\n return run(['xec-vm', '-n', guest, 'get', 'start-on-boot'], host=host)", "def boot(self):\n\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set url to the UefiShellStartupUrl to the system in uefi boot mode.
def set_http_boot_url(self, url): if(self._is_boot_mode_uefi() is True): self._change_bios_setting({'UefiShellStartupUrl': url}) else: msg = 'set_http_boot_url is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def set_http_boot_uri(self, url):\n try:\n sushy_system = self._get_sushy_system()\n sushy_system.http_boot_uri.set_http_boot_uri(url)\n except sushy.exceptions.SushyError as e:\n msg = (self._('Unable to set HTTP Boot URI. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def get_http_boot_url(self):\n if(self._is_boot_mode_uefi() is True):\n return self._get_bios_setting('UefiShellStartupUrl')\n else:\n msg = 'get_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)", "def set_home_url(self, url: str):\n ok = _api.SciterSetHomeURL(self.hwnd, url)\n if not ok:\n raise sciter.SciterError(\"Could not set home url \" + str(url))\n return self", "def set_HomepageURL(self, value):\n super(UpdateConnectAppInputSet, self)._set_input('HomepageURL', value)", "def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri", "def go_to_setup_home(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/SetupOneHome/home\")\n self.wait_until_loading_is_complete()", "def setUbootFlashAddress(self):\n\t\tself.ubootflashaddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\treturn None", "def _set_url(self, full_url):\r\n self._chrome_driver.get(full_url)", "def os_set(self):\n if self.mod:\n path_startup = fr\"C:\\Users\\{environ['USER']}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\"\n copyfile(self.path_file, path_startup)", "def add_base_url(cli, container_name):\n edit_config = cli.exec_create(container=container_name,\n cmd = 'bash -exec \\'echo c.Application.base_url = \\\\\\\"/%s/jupyter\\\\\\\" >> /srv/jupyterhub_config.py\\'' % (container_name, ))\n return cli.exec_start(edit_config.get('Id'))", "def set_url(self, url):\n self.base_url = url\n self.set_option(pycurl.URL, self.base_url)", "def __init__(self, uid=None, name=\"RW.MC.UI\", config_ready=True,\n recovery_action=core.RecoveryType.FAILCRITICAL.value,\n start=True, data_storetype=core.DataStore.NOSTORE.value,\n mode_active=True,\n ):\n super(UIServerLauncher, self).__init__(\n uid=uid,\n name=name,\n exe=\"./usr/share/rw.ui/skyquake/scripts/launch_ui.sh\",\n config_ready=config_ready,\n recovery_action=recovery_action,\n start=start,\n data_storetype=data_storetype,\n mode_active=mode_active,\n )", "def pibooth_startup(cfg, app):", "def _set_stub_link_onstartup(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"stub-link-onstartup\", rest_name=\"stub\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise maximum metric in Router LSA for stub links', u'alt-name': u'stub'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"stub_link_onstartup must be of a type compatible with empty\"\"\",\n 'defined-type': \"empty\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"stub-link-onstartup\", rest_name=\"stub\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise maximum metric in Router LSA for stub links', u'alt-name': u'stub'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='empty', is_config=True)\"\"\",\n })\n\n self.__stub_link_onstartup = t\n if hasattr(self, '_set'):\n self._set()", "def update_user_environment(c, config):\n user_env = config['userEnvironment']\n\n # Set default application users are launched into\n if user_env['defaultApp'] == 'jupyterlab':\n c.Spawner.default_url = '/lab'\n elif user_env['defaultApp'] == 'nteract':\n c.Spawner.default_url = '/nteract'", "def _set_url(self): \n self.url = self.geturl()", "def SetBootloaderEnv(script, name, val):\n script.AppendExtra('set_bootloader_env(\"%s\", \"%s\");' % (name, val))", "def flashUboot(self):\n\t\tif self.settings.getKeyValue('flash.uboot?') == 'y':\n\t\t\tloadAddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\t\tcmd = self.settings.getKeyValue('u-boot.load.command')\n\t\t\tcmd = cmd.replace('<u-boot>', 'u-boot.bin.12x.2430')\n\t\t\tself.socket.send(cmd, 5)\n\t\t\t#self.socket.send('protect off 1:0-1\\r', 2)\n\t\t\t#self.socket.send('erase 1:0-1\\r', 2)\n\t\t\t#self.socket.send('cp.b 80000000 %s 2ffff\\r' % loadAddress)\n\t\t\treturn None\n\t\t\t#cmd = cmd.replace('<u-bootloadadress>', self.u-bootloadaddress)", "def goto_environment_url(self):\n try:\n self._browser.get(self._environment.url)\n except Exception as e:\n self.logger.error(\"Error going to environment '\" + self._environment.url + \"' : \" + str(e))\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set iscsi details of the system in uefi boot mode. The iSCSI initiator is identified by the MAC provided. The initiator system is set with the target details like IQN, LUN, IP, Port etc.
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address, port='3260', auth_method=None, username=None, password=None): if(self._is_boot_mode_uefi() is True): iscsi_info = {} iscsi_info['iSCSITargetName'] = target_name iscsi_info['iSCSIBootLUN'] = lun iscsi_info['iSCSITargetIpAddress'] = ip_address iscsi_info['iSCSITargetTcpPort'] = int(port) iscsi_info['iSCSITargetInfoViaDHCP'] = False iscsi_info['iSCSIBootEnable'] = 'Enabled' if (auth_method == 'CHAP'): iscsi_info['iSCSIAuthenticationMethod'] = 'Chap' iscsi_info['iSCSIChapUsername'] = username iscsi_info['iSCSIChapSecret'] = password self._change_iscsi_settings(mac.upper(), iscsi_info) else: msg = 'iscsi boot is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def unset_iscsi_boot_info(self, mac):\n if(self._is_boot_mode_uefi() is True):\n iscsi_info = {'iSCSIBootEnable': 'Disabled'}\n self._change_iscsi_settings(mac.upper(), iscsi_info)\n else:\n msg = 'iscsi boot is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)", "def iscsi(self, iscsi):\n self._iscsi = iscsi", "def test_update_bios_boot_mode(self):\n pass", "def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result", "def set_boot_options(self, image_name, **vendor_specifics):\n current_boot = self.show(\"show running-config | inc ^boot system \")\n file_system = vendor_specifics.get(\"file_system\")\n if file_system is None:\n file_system = self._get_file_system()\n\n file_system_files = self.show(f\"dir {file_system}\")\n if re.search(image_name, file_system_files) is None:\n log.error(\"Host %s: File not found error for image %s.\", self.host, image_name)\n raise NTCFileNotFoundError(\n # TODO: Update to use hostname\n hostname=self.host,\n file=image_name,\n directory=file_system,\n )\n\n current_images = current_boot.splitlines()\n commands_to_exec = [f\"no {image}\" for image in current_images]\n commands_to_exec.append(f\"boot system {file_system}/{image_name}\")\n self.config(commands_to_exec)\n\n self.save()\n if self.boot_options[\"sys\"] != image_name:\n log.error(\"Host %s: Setting boot command did not yield expected results\", self.host)\n raise CommandError(\n command=f\"boot system {file_system}/{image_name}\",\n message=\"Setting boot command did not yield expected results\",\n )\n\n log.info(\"Host %s: boot options have been set to %s\", self.host, image_name)", "def set_interface_mac(device, mac):\n # turn off device\n cmd = \"ip link set {} down\".format(device)\n subprocess.call(cmd.split())\n print('Shutting Down Interface...')\n sleep(1)\n # print (cmd)\n # set mac\n cmd = \"ip link set {} address {}\".format(device, mac)\n subprocess.call(cmd.split())\n print('Setting MAC Address....!!!')\n sleep(1)\n # print (cmd)\n # turn on device\n cmd = \"ip link set {} up\".format(device)\n subprocess.call(cmd.split())\n print('Switching ON Interface...')\n sleep(1)\n # print (cmd)", "def set_interface_mac(ihost, interface):\n selected_mac = None\n selected_ifname = None\n if interface['iftype'] == constants.INTERFACE_TYPE_VIRTUAL:\n selected_mac = constants.ETHERNET_NULL_MAC\n available_macs = _get_lower_interface_macs(ihost, interface)\n if interface['iftype'] == constants.INTERFACE_TYPE_AE:\n boot_interface = _get_boot_interface(ihost)\n if boot_interface:\n boot_ifname = boot_interface['ifname']\n boot_uuid = boot_interface['uuid']\n if (any(x in interface['uses'] for x in [boot_ifname, boot_uuid])):\n selected_mac = boot_interface['imac']\n selected_ifname = boot_interface['ifname']\n else:\n LOG.warn(\"No boot interface found for host {}\".format(\n ihost['hostname']))\n\n # If one of the interfaces in the bond is the original mgmt mac\n # of controller-0, select that interface and its mac.\n if (ihost['hostname'] == constants.CONTROLLER_0_HOSTNAME and\n ihost['mgmt_mac'] is not None and\n interface['ifclass'] == constants.INTERFACE_CLASS_PLATFORM):\n for ifname in interface['uses']:\n if ihost['mgmt_mac'] == available_macs[ifname]:\n selected_ifname = ifname\n selected_mac = available_macs[selected_ifname]\n break\n if not selected_mac:\n # Fallback to selecting the first interface in the list.\n selected_ifname = sorted(available_macs)[0]\n selected_mac = available_macs[selected_ifname]\n if interface.get('imac') != selected_mac:\n interface['imac'] = selected_mac\n LOG.info(\"Setting MAC of interface {} to {}; taken from {}\".format(\n interface['ifname'], interface['imac'], selected_ifname))\n return interface", "def setmac(self, ifname, mac):\n # untested, perhaps I should use /bin/ip or ioctl instead.\n if len(mac) == 6:\n mac = strmac(mac)\n self.run_vyatta_interfaces(ifname, \"--set-mac\", mac)", "def iscsi_target(self, iscsi_target):\n\n self._iscsi_target = iscsi_target", "def mgmt_mac_set_by_ihost(self, context, host, mgmt_mac):\n if (os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG) and\n host.hostname == constants.CONTROLLER_0_HOSTNAME):\n\n self.dbapi.ihost_update(host.uuid,\n {'mgmt_mac': mgmt_mac})\n else:\n LOG.error(\"Received a request to update management mac for host \"\n \"%s under the wrong condition.\" % host.hostname)", "def test_patch_bios_boot_mode(self):\n pass", "def set_vif_host_backend_ethernet_config(conf, tapname):\n\n conf.net_type = \"ethernet\"\n conf.target_dev = tapname\n conf.script = \"\"", "def set_os_mtu(self, iface=None, mtu=None):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def target_hciconfig_init(self):\n (status, output) = self.target.run('hciconfig hci0 reset')\n assert status == 0, \"reset hci0 fails, please check if your BT device exists\"\n time.sleep(1)\n self.target.run('hciconfig hci0 up')\n self.target.run('hciconfig hci0 piscan')\n self.target.run('hciconfig hci0 noleadv')\n time.sleep(1)", "def set_boot_sel():\n\n bus = SMBus(1)\n\n # reset BOOT_SEL\n bus.write_byte_data(0x20, 3, 0xFE)\n bus.write_byte_data(0x20, 7, 0xFF)\n\n state = {}\n for i in [2, 6]:\n state[i] = bus.read_byte_data(0x20, i)\n\n # start reset\n data_to_write = 0x00 | (state[2] & 0xDF)\n bus.write_byte_data(0x20, 2, data_to_write)\n data_to_write = 0x00 | (state[6] & 0xDF)\n bus.write_byte_data(0x20, 6, data_to_write)\n # set BOOT_SEL high\n data_to_write = 0x01\n bus.write_byte_data(0x20, 3, data_to_write)\n data_to_write = 0xFE\n bus.write_byte_data(0x20, 7, data_to_write)\n # stop reset\n data_to_write = 0x20 | (state[2] & 0xDF)\n bus.write_byte_data(0x20, 2, data_to_write)\n data_to_write = 0x20 | (state[6] & 0xDF)\n bus.write_byte_data(0x20, 6, data_to_write)", "def setupInterrupts(self):\n self.cpu.createInterruptController()\n self.cpu.interrupts[0].pio = self.membus.master\n self.cpu.interrupts[0].int_master = self.membus.slave\n self.cpu.interrupts[0].int_slave = self.membus.master", "def evemu_target_setup(self, ic = None):\n assert ic == None or isinstance(ic, tc.target_c)\n target = self.target\n\n # Download the new evemu binaries with statically linked\n # libraries.\n output = target.shell.run(\"evemu-event --fifo || true\", output = True)\n # if evemu-event is installed, it will print it's help\n ## Usage: evemu-event [--sync] <device> --type <type> --code <code> --value <value>\n #\n # if the --fifo extensions are installed, there will be a\n # --fifo=<file_name> in there\n if '<device>' in output:\n # evemu-event is installed\n self.evemu_event = \"evemu-event\"\n self.evemu_device = \"evemu-device\"\n if '--fifo=<file_name>' in output:\n # evemu-event has --fifo support\n self.evemu_event_fifo = \"evemu-event --fifo=\"\n target.report_info(\"INPUT/evemu: distro's with --fifo\")\n else:\n # upload helper\n with msgid_c():\n target.shell.string_copy_to_file(\n # Each line is in the format:\n #\n # - <DEVICE> <TYPE> <CODE> <VALUE> [SYNC]\n # - WAIT <MILLISECS>\n # - empty (ignored)\n #\n # the helper is a wee hack since it has more\n # overhead with evemu with the support\n \"\"\"\\\n #! /bin/bash\n rm -f $1; mkfifo $1\n tail -f $1 | while read dev typetime code value sync; do\n [ \"$dev\" == \"WAIT\" ] && sleep $typetime && continue\n [ \"${sync:-}\" == SYNC ] && sync=\"--sync\"\n echo evemu-event ${sync:-} /dev/input/$dev --type $type --code $code --value $value\n done\n \"\"\",\n \"/usr/local/bin/evemu-event-fifo\")\n target.shell.run(\"chmod a+x /usr/local/bin/evemu-event-fifo\")\n self.evemu_event_fifo = \"/usr/local/bin/evemu-event-fifo \"\n target.report_info(\n \"INPUT/evemu: distro's with FIFO shell helper\")\n\n else:\n with msgid_c():\n arch = target.shell.run(\"uname -m\", output = True,\n trim = True).strip()\n # There is no evemu in the system, so let's upload our\n # semistatic build from the POS cache.\n rsync_server = target.kws.get(\n 'pos_rsync_server',\n ic.kws.get('pos_rsync_server', None))\n if rsync_server == None:\n raise tc.error_e(\n \"INPUT/evemu: there is no place where to download\"\n \" evemu.bin.%s.tar.gz for, need the\"\n \" target or interconnect to export\"\n \" *pos_rsync_server* with the location\" % arch)\n http_server = \"http://\" \\\n + rsync_server.replace(\"::images\", \"/ttbd-images-misc\")\n target.shell.run(\n \"curl --noproxy '*' -sk %s/evemu.bin.%s.tar.gz\"\n \" --output /tmp/evemu.bin.tar.gz\" % (http_server, arch))\n target.shell.run(\n \"tar xvvf /tmp/evemu.bin.tar.gz --overwrite -C /\")\n self.evemu_event = \"/opt/evemu/bin/evemu-event\"\n self.evemu_device = \"/opt/evemu/bin/evemu-device\"\n self.evemu_event_fifo = \"/opt/evemu/bin/evemu-event --fifo=\"\n target.report_info(\n \"INPUT/evemu: TCF's static build w/ --fifo\")\n\n self.evemu_create_device(\"default_mouse\", descriptor_mouse)\n self.evemu_create_device(\"default_keyboard\", descriptor_kbd)\n # start the FIFO pipe\n target.shell.run(\"nohup %s/tmp/evemu.fifo >& /tmp/evemu.log &\"\n % self.evemu_event_fifo)\n target.shell.run(\"chmod a+rw /tmp/evemu.fifo\")", "def test_node_set_bootdev(self):\n\n api.node_register('node-99', obm={\n \"type\": \"http://schema.massopencloud.org/haas/v0/obm/ipmi\",\n \"host\": \"ipmihost\",\n \"user\": \"root\",\n \"password\": \"tapeworm\"})\n\n # throw BadArgumentError for an invalid bootdevice\n with pytest.raises(api.BadArgumentError):\n api.node_set_bootdev('node-99', 'invalid-device')", "def login_iscsi_target(self, portal_config, target_config):\n ip = portal_config.get('ip')\n port = portal_config.get('port')\n iqn = target_config.get('iqn')\n if ip and port and iqn:\n command = 'iscsiadm -m node -l -T %s -p %s:%d' % (iqn, ip, port)\n self.cmd(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disable iscsi boot option in uefi boot mode.
def unset_iscsi_boot_info(self, mac): if(self._is_boot_mode_uefi() is True): iscsi_info = {'iSCSIBootEnable': 'Disabled'} self._change_iscsi_settings(mac.upper(), iscsi_info) else: msg = 'iscsi boot is not supported in the BIOS boot mode' raise exception.IloCommandNotSupportedInBiosError(msg)
[ "def disable_start_on_boot(host,guest):\n run(['xec-vm', '-n', guest, 'set', 'start-on-boot', 'false'], host=host)", "def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)", "def test_patch_bios_boot_mode(self):\n pass", "def test_update_bios_boot_mode(self):\n pass", "def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled", "def test_negative_boot_from_network(self):\n testflow.step(\n \"run once VM %s without nic, boot from network\", config.VM_RUN_ONCE\n )\n assert not helper.run_once_with_boot_dev(\n config.ENUMS['boot_sequence_network']\n )", "def unconfigure_global_dual_active_recovery_reload_disable(device):\n # build a list of commands to send\n # Add stackwise-virtual as first element in the list\n # Enables dual-active recovery-reload\n command_list = ['stackwise-virtual']\n command_list.append(f'no dual-active recovery-reload-disable')\n try:\n output = device.configure(command_list)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to Disable global stackwise-virtual dual-active recovery-reload')\n return output", "def disableSledIO(self):\n resp = self.sledIOCmdProxy('set mode', 'off')", "def _disable_emulation(self):\n self.command(\"EMUL 0\", check_errors=False)", "def safe_boot_disabled(self):\n return self._safe_boot_disabled", "def iscsi_interface_disable(self, interface_name):\n return self.request( \"iscsi-interface-disable\", {\n 'interface_name': [ interface_name, 'interface-name', [ basestring, 'None' ], False ],\n }, {\n } )", "def configure_global_dual_active_recovery_reload_disable(device):\n # build a list of commands to send\n # Add stackwise-virtual as first element in the list\n # Disables dual-active recovery-reload\n command_list = ['stackwise-virtual']\n command_list.append(f'dual-active recovery-reload-disable')\n try:\n output = device.configure(command_list)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to Enable global stackwise-virtual dual-active recovery-reload')\n return output", "def _DisableRootFsVerification(self):\n # 2 and 4 are the kernel partitions.\n for partition in [2, 4]:\n self.RunCmdOnDevice(['/usr/share/vboot/bin/make_dev_ssd.sh',\n '--partitions', str(partition),\n '--remove_rootfs_verification', '--force'])\n\n # Restart, wait a bit, and re-establish the SSH master connection.\n # We need to close the connection gracefully, then run the shutdown command\n # without using a master connection. port_forward=True bypasses the master\n # connection.\n self.CloseConnection()\n self.RunCmdOnDevice(['reboot'], port_forward=True)\n time.sleep(30)\n self.OpenConnection()", "def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')", "def disable_backlight(self):\n\n self.write_byte(0x00)\n self.backlight = False", "def hal_backlight_off(self):\n self.i2c.writeto(self.i2c_addr, bytes([0x00]))", "def enable_start_on_boot(host,guest):\n run(['xec-vm', '-n', guest, 'set', 'start-on-boot', 'true'], host=host)", "def setDetachLoadOff(self):\n self.querier.setMsgHandler(DefaultMsgHandler(\"Set Detach Load Off\\n\"))\n return self.querier.queryext(0x20, 0x1A, [0x00, 0x00, 0x00]);", "def disable_irq() -> bool:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the current boot mode of the server.
def get_current_boot_mode(self): boot_mode = self._get_bios_setting('BootMode') if boot_mode == 'LegacyBios': boot_mode = 'legacy' return boot_mode.upper()
[ "def get_current_boot_mode(self):\n data = self._execute_command(\n 'GET_CURRENT_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_CURRENT_BOOT_MODE']['BOOT_MODE']['VALUE']", "def get_supported_boot_mode(self):\n data = self._execute_command(\n 'GET_SUPPORTED_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_SUPPORTED_BOOT_MODE']['SUPPORTED_BOOT_MODE']['VALUE']", "def bootmode(self):\n return self._get_val_str(_BP_BOOT_MODE)", "def get_pending_boot_mode(self):\n data = self._execute_command(\n 'GET_PENDING_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_PENDING_BOOT_MODE']['BOOT_MODE']['VALUE']", "def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode", "def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()", "def get_one_time_boot(self):\n data = self._execute_command(\n 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read')\n return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE']", "def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']", "def node_get_boot_mode(task):\n task.driver.management.validate(task)\n return task.driver.management.get_boot_mode(task)", "def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled", "def get_bios_bootmode(self):\n result = {}\n try:\n result_bios = self.get_all_bios_attributes()\n if result_bios['ret'] == False:\n return result_bios\n\n attributes = result_bios['entries']\n bios_attribute = {}\n attribute_bootmode = None\n \n # firstly, search boot mode name which match with key exactly. \n for attribute in attributes.keys():\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n \n # secondly, if no key matchs perfectly, then search the attribute which contain boot mode name \n if attribute_bootmode == None:\n for attribute in attributes.keys():\n if \"SystemBootMode\" in attribute or \"Boot Mode\" in attribute or \"Boot_Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n return {'ret': False, 'msg': \"Failed to find BootMode attribute in BIOS attributes.\"}\n bios_attribute[attribute_bootmode] = attributes[attribute_bootmode]\n return {'ret': True, 'entries': bios_attribute}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get boot mode. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletThermalImaging.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def get_mode(self) -> str:\n try:\n return self.session.xenapi.VBD.get_mode(self.vbd)\n\n except Exception as e:\n print(\"VBD.get_mode Exception\", e)\n return None", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')", "def test_get_bios_boot_mode_by_moid(self):\n pass", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletXMC1400Breakout.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def get_mode(hass):\n return hass.data[DOMAIN]['mode']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the pending boot mode of the server. Gets the boot mode to be set on next reset.
def get_pending_boot_mode(self): headers, uri, bios_settings = self._check_bios_resource(['BootMode']) _, _, settings = self._get_bios_settings_resource(bios_settings) boot_mode = settings.get('BootMode') if boot_mode == 'LegacyBios': boot_mode = 'legacy' return boot_mode.upper()
[ "def get_pending_boot_mode(self):\n data = self._execute_command(\n 'GET_PENDING_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_PENDING_BOOT_MODE']['BOOT_MODE']['VALUE']", "def get_current_boot_mode(self):\n data = self._execute_command(\n 'GET_CURRENT_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_CURRENT_BOOT_MODE']['BOOT_MODE']['VALUE']", "def bootmode(self):\n return self._get_val_str(_BP_BOOT_MODE)", "def get_supported_boot_mode(self):\n data = self._execute_command(\n 'GET_SUPPORTED_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_SUPPORTED_BOOT_MODE']['SUPPORTED_BOOT_MODE']['VALUE']", "def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode", "def get_current_boot_mode(self):\n boot_mode = self._get_bios_setting('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n\n return boot_mode.upper()", "def node_get_boot_mode(task):\n task.driver.management.validate(task)\n return task.driver.management.get_boot_mode(task)", "def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def get_one_time_boot(self):\n data = self._execute_command(\n 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read')\n return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE']", "def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']", "def get_secure_boot_state(self, task):\n return irmc_common.get_secure_boot_mode(task.node)", "def get_mode(self) -> str:\n try:\n return self.session.xenapi.VBD.get_mode(self.vbd)\n\n except Exception as e:\n print(\"VBD.get_mode Exception\", e)\n return None", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletThermalImaging.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletXMC1400Breakout.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')", "def get_bios_bootmode(self):\n result = {}\n try:\n result_bios = self.get_all_bios_attributes()\n if result_bios['ret'] == False:\n return result_bios\n\n attributes = result_bios['entries']\n bios_attribute = {}\n attribute_bootmode = None\n \n # firstly, search boot mode name which match with key exactly. \n for attribute in attributes.keys():\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n \n # secondly, if no key matchs perfectly, then search the attribute which contain boot mode name \n if attribute_bootmode == None:\n for attribute in attributes.keys():\n if \"SystemBootMode\" in attribute or \"Boot Mode\" in attribute or \"Boot_Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n return {'ret': False, 'msg': \"Failed to find BootMode attribute in BIOS attributes.\"}\n bios_attribute[attribute_bootmode] = attributes[attribute_bootmode]\n return {'ret': True, 'entries': bios_attribute}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get boot mode. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the boot mode of the system for next boot.
def set_pending_boot_mode(self, boot_mode): boot_mode = boot_mode.lower() if boot_mode not in ['uefi', 'legacy']: msg = 'Invalid Boot mode specified' raise exception.IloInvalidInputError(msg) boot_properties = {'BootMode': boot_mode} if boot_mode == 'legacy': boot_properties['BootMode'] = 'LegacyBios' else: # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first. boot_properties['UefiOptimizedBoot'] = "Enabled" # Change the Boot Mode self._change_bios_setting(boot_properties)
[ "def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')", "def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletThermalImaging.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletXMC1400Breakout.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def boot(self, boot):\n\n self._boot = boot", "def node_set_boot_mode(task, mode):\n if task.node.provision_state == states.ADOPTING:\n return\n\n task.driver.management.validate(task)\n try:\n supported_boot_modes = (\n task.driver.management.get_supported_boot_modes(task)\n )\n except exception.UnsupportedDriverExtension:\n LOG.debug(\n \"Cannot determine supported boot modes of driver \"\n \"%(driver)s. Will make an attempt to set boot mode %(mode)s\",\n {'driver': task.node.driver, 'mode': mode})\n supported_boot_modes = ()\n\n if supported_boot_modes and mode not in supported_boot_modes:\n msg = _(\"Unsupported boot mode %(mode)s specified for \"\n \"node %(node_id)s. Supported boot modes are: \"\n \"%(modes)s\") % {'mode': mode,\n 'modes': ', '.join(supported_boot_modes),\n 'node_id': task.node.uuid}\n raise exception.InvalidParameterValue(msg)\n\n task.driver.management.set_boot_mode(task, mode=mode)", "def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable", "def _set_mode(self, mode):\n self._spi_device.mode = mode", "def test_update_bios_boot_mode(self):\n pass", "def set_system_boot_source(self, device, mode=None,\n enabled=constants.BOOT_SOURCE_ENABLED_ONCE):\n json = {\n 'Boot': {\n 'BootSourceOverrideTarget': device,\n 'BootSourceOverrideEnabled': enabled,\n }\n }\n if mode:\n json['Boot']['BootSourceOverrideMode'] = mode\n\n self._conn.make_req('PATCH', self._system_url(), json=json)\n LOG.debug('Set boot device for iBMC finished')", "def node_change_boot_mode(task, target_boot_mode):\n try:\n current_boot_mode = task.driver.management.get_boot_mode(task)\n except Exception as exc:\n current_boot_mode = None\n LOG.warning('Unexpected exception when trying to detect boot_mode '\n 'while changing boot mode for node '\n '%(node)s. %(class)s: %(exc)s',\n {'node': task.node.uuid,\n 'class': type(exc).__name__, 'exc': exc},\n exc_info=not isinstance(exc, exception.IronicException))\n\n if (current_boot_mode is not None\n and target_boot_mode == current_boot_mode):\n LOG.info(\"Target boot mode '%(target)s', and current boot mode \"\n \"'%(current)s' are identical. No change being made \"\n \"for node %(node)s\",\n {'target': target_boot_mode, 'current': current_boot_mode,\n 'node': task.node.uuid})\n return\n try:\n task.driver.management.set_boot_mode(task, mode=target_boot_mode)\n except Exception as exc:\n LOG.error('Unexpected exception when trying to change boot_mode '\n 'to %(target)s for node %(node)s. %(class)s: %(exc)s',\n {'node': task.node.uuid, 'target': target_boot_mode,\n 'class': type(exc).__name__, 'exc': exc},\n exc_info=not isinstance(exc, exception.IronicException))\n task.node.last_error = (\n \"Failed to change boot mode to '%(target)s: %(err)s\" % {\n 'target': target_boot_mode, 'err': exc})\n task.node.save()\n else:\n LOG.info(\"Changed boot_mode to %(mode)s for node %(node)s\",\n {'mode': target_boot_mode, 'node': task.node.uuid})\n task.node.boot_mode = target_boot_mode\n task.node.save()", "def set_boot_sel():\n\n bus = SMBus(1)\n\n # reset BOOT_SEL\n bus.write_byte_data(0x20, 3, 0xFE)\n bus.write_byte_data(0x20, 7, 0xFF)\n\n state = {}\n for i in [2, 6]:\n state[i] = bus.read_byte_data(0x20, i)\n\n # start reset\n data_to_write = 0x00 | (state[2] & 0xDF)\n bus.write_byte_data(0x20, 2, data_to_write)\n data_to_write = 0x00 | (state[6] & 0xDF)\n bus.write_byte_data(0x20, 6, data_to_write)\n # set BOOT_SEL high\n data_to_write = 0x01\n bus.write_byte_data(0x20, 3, data_to_write)\n data_to_write = 0xFE\n bus.write_byte_data(0x20, 7, data_to_write)\n # stop reset\n data_to_write = 0x20 | (state[2] & 0xDF)\n bus.write_byte_data(0x20, 2, data_to_write)\n data_to_write = 0x20 | (state[6] & 0xDF)\n bus.write_byte_data(0x20, 6, data_to_write)", "def bootmode(self):\n return self._get_val_str(_BP_BOOT_MODE)", "def test_patch_bios_boot_mode(self):\n pass", "def set_vehicle_mode(self, new_mode: str):\n logging.info(\"Set vehice mode: %s\", new_mode)\n self.vehicle.mode = VehicleMode(new_mode)\n while self.vehicle.mode.name != new_mode:\n time.sleep(1)", "def __update_saved_mode(self, value):\n self._current_mode = value\n self.adb._current_mode = value\n self.shell._current_mode = value\n self.fastboot._current_mode = value", "def set_mode(self, Mode):\n if Mode == MrBit_PID.AUTOMATIC:\n self.inAuto = True\n #self.reinitialse()\n elif Mode == MrBit_PID.MANUAL:\n self.inAuto = False", "def boot(self):\n\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the iLO password.
def reset_ilo_credential(self, password): acc_uri = '/rest/v1/AccountService/Accounts' for status, hds, account, memberuri in self._get_collection(acc_uri): if account['UserName'] == self.login: mod_user = {} mod_user['Password'] = password status, headers, response = self._rest_patch(memberuri, None, mod_user) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) return msg = "iLO Account with specified username is not found." raise exception.IloError(msg)
[ "def reset_ilo_credential(self, password):\n\n dic = {'USER_LOGIN': self.login}\n root = self._create_dynamic_xml(\n 'MOD_USER', 'USER_INFO', 'write', dic)\n\n element = root.find('LOGIN/USER_INFO/MOD_USER')\n etree.SubElement(element, 'PASSWORD', VALUE=password)\n d = self._request_ilo(root)\n self._parse_output(d)", "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def change_password(self):\n self.password = utils.generate_password()", "def reset(self):\r\n self.email.text = \"\"\r\n self.password.text = \"\"", "def SetPassword(self, password):\r\n self.ims_password = password", "def reset_password(newpass, challenge):", "def _setNewPass(self, np):\n op = self._oldPass\n self._oldPass = None\n self.askForAuth(b\"password\", b\"\\xff\" + NS(op) + NS(np))", "def reset_password(self):\n # choose random password\n rng = random.SystemRandom()\n pw = ''.join(\n rng.choice(string.ascii_letters + string.digits)\n for _ in range(0, 16)\n )\n self.pwsalted = hash_secret_strong(pw)\n return pw", "def clear_password(self, e):\n\n self.password.label.config(show='*')\n if self.password.get() == 'Enter Enovia Password':\n self.password.clear()", "def change_password(self, password):\n \n self.password = self.hash_password(password)", "def reset_password(self, password):\n self.set_password(password)\n self.save()\n AccessToken.objects.filter(user=self).delete()", "def reset(ctx, force):\n force or click.confirm(\n \"WARNING! This will delete all stored PIV data and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n click.echo(\"Resetting PIV data...\")\n ctx.obj[\"session\"].reset()\n\n click.echo(\"Success! All PIV data have been cleared from the YubiKey.\")\n click.echo(\"Your YubiKey now has the default PIN, PUK and Management Key:\")\n click.echo(\"\\tPIN:\\t123456\")\n click.echo(\"\\tPUK:\\t12345678\")\n click.echo(\"\\tManagement Key:\\t010203040506070801020304050607080102030405060708\")", "def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result", "def reset(self):\n self._i2c.send(6, 0x00)", "def set_password(self, password):\n\t\tself.password = password", "def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()", "def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):", "def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")", "def go_to_change_password(self):\n self.clear_frame()\n self.change_password_panel()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the BIOS settings to default values.
def reset_bios_to_default(self): # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the BaseConfig resource. try: base_config_uri = bios_settings['links']['BaseConfigs']['href'] except KeyError: msg = ("BaseConfigs resource not found. Couldn't apply the BIOS " "Settings.") raise exception.IloCommandNotSupportedError(msg) # Check if BIOS resource supports patch, else get the settings if not self._operation_allowed(headers_bios, 'PATCH'): headers, bios_uri, _ = self._get_bios_settings_resource( bios_settings) self._validate_if_patch_supported(headers, bios_uri) status, headers, config = self._rest_get(base_config_uri) if status != 200: msg = self._get_extended_error(config) raise exception.IloError(msg) new_bios_settings = {} for cfg in config['BaseConfigs']: default_settings = cfg.get('default', None) if default_settings is not None: new_bios_settings = default_settings break else: msg = ("Default Settings not found in 'BaseConfigs' resource.") raise exception.IloCommandNotSupportedError(msg) request_headers = self._get_bios_hash_password(self.bios_password) status, headers, response = self._rest_patch(bios_uri, request_headers, new_bios_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def set_bios_default(self):\n result = {}\n try:\n system_url = self._find_system_resource()\n result = self._get_url(system_url + '/Bios')\n if result['ret'] == False:\n return result\n\n reset_bios_url = result['entries']['Actions']['#Bios.ResetBios']['target']\n body = {}\n # get parameter requirement if ActionInfo is provided\n if \"@Redfish.ActionInfo\" in result['entries'][\"Actions\"][\"#Bios.ResetBios\"]:\n actioninfo_url = result['entries'][\"Actions\"][\"#Bios.ResetBios\"][\"@Redfish.ActionInfo\"]\n result_actioninfo = self._get_url(actioninfo_url)\n if result_actioninfo['ret'] == False:\n return result_actioninfo\n if \"Parameters\" in result_actioninfo['entries']:\n for parameter in result_actioninfo['entries'][\"Parameters\"]:\n if (\"Name\" in parameter) and (\"AllowableValues\" in parameter):\n body[parameter[\"Name\"]] = parameter[\"AllowableValues\"][0]\n\n # Reset bios default\n headers = {\"Content-Type\":\"application/json\"}\n if body:\n response = self.post(reset_bios_url, body=body, headers=headers)\n elif \"settings\" in reset_bios_url:\n body = {\"ResetType\": \"default\"}\n response = self.post(reset_bios_url, body=body, headers=headers)\n else:\n response = self.post(reset_bios_url, headers=headers, body=body)\n if response.status in [200, 204]:\n return {'ret': True, 'msg': 'Succeed to reset bios attributes.'}\n else:\n LOGGER.error(str(response))\n return {'ret': False, 'msg': \"Failed to reset bios attributes. Error code is %s. Error message is %s.\" % \\\n (response.status, response.text)}\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to reset bios attributes. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def resetToDefault(self):\n \n pass", "def reset(self):\n\t\tfor i in range(0, len(self.reg_numbers)):\n\t\t\tself.regs[self.reg_numbers[i]] = BitArray(uint=self.reg_defaults[i], length=8)", "def system_reset(self):\n self.send([SYSTEM_RESET])", "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def hard_reset(self):\n self._get_controller().hard_reset()", "def reset_settings():\n settings = Settings()\n settings.reset()\n settings.save()", "def reset():\n reload(config)\n reload(main)\n reload(dv)", "def reset(useDefaults=True):\n configs.clear()\n if useDefaults:\n update(default_global)", "def reset_stage_to_defaults(self):\r\n self.KCube.CC_ResetStageToDefaults(self.serial)", "def reset( self ):\n self.conf = self.defaults", "def reset(self):\n\n game.reset()\n sm.get_screen('game_screen').reset()", "def reset(self):\n self.v = bytearray(Config.NUMBER_OF_REGISTERS)\n self.pc = Config.PROGRAM_COUNTER\n self.sp = Config.STACK_POINTER\n self.i = 0\n self.timer_dt = 0\n self.timer_st = 0\n self.memory = bytearray(Config.MAX_MEMORY)\n self.load_fontset()", "def resetBoard(self):\n #Reset registers\n self.femb.write_reg( self.REG_RESET, 2)\n time.sleep(1.)\n\n #Reset state machines\n self.femb.write_reg( self.REG_RESET, 4)\n time.sleep(1.)\n\n #Reset reset register to 0\n self.femb.write_reg( self.REG_RESET, 0)\n time.sleep(0.2)", "def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()", "def reset_from_flash(self):\n self._enable_boot0(False)\n self._reset()", "def reset_to_factory(self):\n self._log_msg_start(\"Reset to factory settings\")\n # Order of execution is clear, save, load. This will copy the factory default\n # settings from ROM to flash, load from flash, and activate.\n device_mask_dict = dict(\n deviceDevBbr=1, # devSpiFlash device battery backed RAM\n deviceDevFlash=1, # device Flash\n deviceDevEeprom=1, # device EEPROM\n deviceDeviceSpiFlash=1, # device SPI Flash\n )\n # self._ubx.send(\n # \"CFG-CFG\",\n # clearMask=0xFFFF,\n # saveMask=0xFFFF,\n # loadMask=0xFFFF,\n # deviceMask=device_mask_dict,\n # )\n self._ubx.send(\n \"CFG-CFG\",\n clearMask=0xFFFF,\n saveMask=0x0000,\n loadMask=0xFFFF,\n deviceMask=device_mask_dict,\n )\n self._ubx.send(\n \"CFG-CFG\",\n clearMask=0x0000,\n saveMask=dict(\n msgConf=1,\n ),\n loadMask=dict(),\n deviceMask=device_mask_dict,\n )", "def revertToDefaults(self):\n for setting in self.settings.values():\n setting.revertToDefault()", "def _reset_settings(self):\n self.settingsHandler.reset_to_original(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the ilo firmware version for server capabilities
def _get_ilo_firmware_version(self): manager, reset_uri = self._get_ilo_details() ilo_firmware_version = manager['Firmware']['Current']['VersionString'] return {'ilo_firmware_version': ilo_firmware_version}
[ "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def firmware_version(self):\n try:\n return self._fw_version\n except AttributeError:\n self._fw_version = self.conn.cmd( # pylint: disable-msg=W0201\n \"lsb_release -r | awk '{ print $2 }'\" )[1].strip()\n return self._fw_version", "def GetFirmwareVersion(self):\n return self.SerialSendReceive(self.CMD_GET_FIRMWARE_VERSION,\n expect_in='Ver',\n msg='getting firmware version')", "def readFirmwareVersion(self):\n rtn = self.lw.ctrl_transfer(bmRequestType=0xC0, bRequest=34,\n wValue=0, wIndex=0,\n data_or_wLength=8, timeout=USB_TIMEOUT)\n version = rtn.pop()\n return str((version & 0xF0) >> 4 )+\".\" + str((version & 0x0F))", "def get_firmware_version(self):\n\t\treturn self._firmware", "def firmware_version(self) -> str:\n return \"unknown\" if self._fwversion is None else self._fwversion", "def hardware_version(self):\n return self.data.get('hw_ver')", "def firmware_version(self):\n return self._fw_version", "def hardware_version(self):\n\t\thex_data = char_read(self.mac, _MI_BATTERY_AND_VERSION)\n\t\treturn hex_to_ascii(hex_data[6:])", "def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass", "def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)", "def firmware_version(self) -> str:\n resp = self._mycam.devicemgmt.GetDeviceInformation()\n return resp.FirmwareVersion", "def hardware_version(self) -> str:\n return self.camera_info[\"main_hw_version\"]", "def get_hardware_version(self):\n return self._hardware_version", "def firmware_version(self):\n return self._firmware_version", "def getFirmwareVersion(self):\n if (self._belt_connection_state != BeltConnectionState.CONNECTED or\n self._belt_mode == BeltMode.UNKNOWN):\n return None\n else:\n return self._belt_firm_version", "def hcl_firmware_version(self):\n return self._hcl_firmware_version", "def get_software_version():\n return get_byte(0x80000BC)", "def firmware_revision(self):\n self.pi.i2c_write_device(self._h, [0x84, 0xB8])\n c, rev = self.pi.i2c_read_device(self._h, 1) # rev\n return rev[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return sriov enabled or not
def _is_sriov_enabled(self): return (self._get_bios_setting('Sriov') == 'Enabled')
[ "def swo_enabled(self):\n return self._swo_enabled", "def _get_redist_enabled(self):\n return self.__redist_enabled", "def ms_get_rstp_enabled(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n dropdown_value = page_utils.get_dropdown_value(\n self.get_page(),\n var_id='node_group_use_stp')\n return dropdown_value == 'Enable RSTP'", "def Enabled(self) -> bool:", "def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True", "def get_isenabled(self):\n return self.isenabled", "def getEnabled(self):\r\n return self.isEnabled", "def isSirenActive(self) -> bool:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.isSirenActive()\r\n return False", "def roi_enabled(self):\n return bool(self.get('roi_enabled'))", "def has_sriovdp_enabled(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == helm_common.LABEL_SRIOVDP and label.label_value:\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the sriovdp node key. Return False\n return False", "def shortenable(s):\n return s, True", "def enabled(self) -> bool:\n return self._controller[\"enabled\"]", "def getEnabled(self):\n return self.isEnabled", "def is_i2s_enabled(self):\n return ((self.get_control() & CONTROL_ENABLE) > 0)", "def is_enabled(self):\n\t\treturn bool(call_sdk_function('PrlShare_IsEnabled', self.handle))", "def is_sensitive(self):\n return self._is_sensitive", "async def enabled(self) -> bool:\n response = await self.adguard.request(\"parental/status\")\n return response[\"enabled\"]", "def get_is_show_powered_by(self):\n return self.is_show_powered_by", "def event_status_enable(self):\n return self.query_int('*ESE?')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the given virtual media device status and device URI
def _get_vm_device_status(self, device='FLOPPY'): valid_devices = {'FLOPPY': 'floppy', 'CDROM': 'cd'} # Check if the input is valid if device not in valid_devices: raise exception.IloInvalidInputError( "Invalid device. Valid devices: FLOPPY or CDROM.") manager, uri = self._get_ilo_details() try: vmedia_uri = manager['links']['VirtualMedia']['href'] except KeyError: msg = ('"VirtualMedia" section in Manager/links does not exist') raise exception.IloCommandNotSupportedError(msg) for status, hds, vmed, memberuri in self._get_collection(vmedia_uri): status, headers, response = self._rest_get(memberuri) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) if (valid_devices[device] in [item.lower() for item in response['MediaTypes']]): vm_device_uri = response['links']['self']['href'] return response, vm_device_uri # Requested device not found msg = ('Virtualmedia device "' + device + '" is not' ' found on this system.') raise exception.IloError(msg)
[ "def get_vmedia_device_uri(self, device):\n\n try:\n sushy_system = self._get_sushy_system()\n uri = utils.get_subresource_path_by(sushy_system, 'VirtualMedia')\n resp = sushy_system._conn.get(uri)\n vmedia_resp = json.loads(resp.text)\n for val in vmedia_resp.get(\"Members\"):\n for key in val:\n if device in val[key]:\n return val[key]\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find find vmedia device URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def get_vmedia_device_status(self, device=\"cd0\"):\n\n if device not in VALID_VMEDIA_DEVICES:\n raise exception.InvalidInputError(\n \"Invalid device. Valid devices: cd0 or cd1 or hd0 or hd1.\")\n sushy_system = self._get_sushy_system()\n device = VALID_VMEDIA_DEVICES.get(device)\n\n vmedia_device_uri = self.get_vmedia_device_uri(device)\n\n try:\n resp = sushy_system._conn.get(vmedia_device_uri)\n return resp.text\n except sushy.exceptions.SushyError as e:\n msg = (self._('Error: %(error)s') %\n {'error': str(e)})\n raise exception.SDFlexError(msg)", "def insert_virtual_media(self, url, device='FLOPPY'):\n dic = {\n 'DEVICE': device.upper(),\n 'IMAGE_URL': url,\n }\n data = self._execute_command(\n 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic)\n return data", "def getMediaObjectStatus( mediaObjectId, params ) :\n\n\turl = params['endpoint_base'] + '/' + mediaObjectId # endpoint url\n\n\tendpointParams = dict() # parameter to send to the endpoint\n\tendpointParams['fields'] = 'status_code' # fields to get back\n\tendpointParams['access_token'] = params['access_token'] # access token\n\n\treturn makeApiCall( url, endpointParams, 'GET' ) # make the api call", "def get_status(self) -> MagneticStatus:", "def get_camera_status():\n\n\ttarget = send_command('getstatus cam')\n\tsplit_ans = target.split()\n\t\n\treturn split_ans", "def _media_status(self):\n media_status = self.media_status\n media_status_received = self.media_status_received\n\n if (\n media_status is None\n or media_status.player_state == MEDIA_PLAYER_STATE_UNKNOWN\n ):\n groups = self.mz_media_status\n for k, val in groups.items():\n if val and val.player_state != MEDIA_PLAYER_STATE_UNKNOWN:\n media_status = val\n media_status_received = self.mz_media_status_received[k]\n break\n\n return (media_status, media_status_received)", "def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data", "def guess_vserver_device():\n\n s = commands.getoutput('/bin/mount | /bin/grep tagxid | /usr/bin/head -n 1')\n device = s.split()[0]\n\n return device", "def dev_status(self):\n self.debug_stream(\"In dev_status()\")\n argout = \"\"\n #----- PROTECTED REGION ID(SynchroMotorDS.Status) ENABLED START -----#\n self.argout = \"Status is ON\"\n #----- PROTECTED REGION END -----#\t//\tSynchroMotorDS.Status\n self.set_status(self.argout)\n self.__status = PyTango.Device_4Impl.dev_status(self)\n return self.__status", "def _manufacturer_from_status(status: dict[str, str]) -> str | None:\n return (\n status.get(\"device.mfr\")\n or status.get(\"ups.mfr\")\n or status.get(\"ups.vendorid\")\n or status.get(\"driver.version.data\")\n )", "def _get_mount_status(self, vm=None):\n result = Shell.run(f\"multipass info {vm} --format=json\")\n\n if f'instance \"{vm}\" does not exist' in result:\n dict_result = {\n 'name': vm,\n 'status': \"instance does not exist\"\n }\n else:\n result = json.loads(result)\n dict_result = {\n 'name': vm,\n 'status': result[\"info\"][vm]['state'],\n 'mounts': result[\"info\"][vm]['mounts']\n }\n return dict_result", "def get_member_device(self, device):\n for vmedia_device in self.get_members():\n if device in vmedia_device.media_types:\n return vmedia_device", "def _rest_call(self, data, action):\n path = '/wm/device/?ipv4=' + data\n conn = httplib.HTTPConnection(self.host, self.port)\n conn.request('GET', path)\n response = conn.getresponse()\n ret = (response.status, response.reason, response.read())\n conn.close()\n return ret", "def media_image_url(self) -> Optional[str]:\n if self._current_device is None:\n return None\n return self._track.get('image', '')", "def _get_get_media_detail(self):\n return self.__get_media_detail", "def ProcessStatusUploadRequest(self, device_status, session_status):\n # Empty responses indicate a successful upload.\n device_status_report_response = dm.DeviceStatusReportResponse()\n session_status_report_response = dm.SessionStatusReportResponse()\n\n response = dm.DeviceManagementResponse()\n response.device_status_report_response.CopyFrom(\n device_status_report_response)\n response.session_status_report_response.CopyFrom(\n session_status_report_response)\n\n return (200, response)", "def get_virtual_media( context, system_id = None ):\n\n # Get the virtual media collection\n virtual_media_collection = get_virtual_media_collection( context, system_id = system_id )\n\n # Iterate through the members and pull out each of the instances\n virtual_media_list = []\n for member in virtual_media_collection.dict[\"Members\"]:\n virtual_media = context.get( member[\"@odata.id\"] )\n virtual_media_list.append( virtual_media.dict )\n return virtual_media_list", "async def test_camera_hass_urls(\n hass: HomeAssistant, mock_stream: None, url: str, result: int\n) -> None:\n device = (\n \"camera.test\",\n \"idle\",\n {\"friendly_name\": \"Test camera\", \"supported_features\": 3},\n )\n await async_process_ha_core_config(hass, {\"external_url\": url})\n\n appliance = await discovery_test(device, hass)\n assert len(appliance[\"capabilities\"]) == result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the Virtual Media drive status It sets the boot option for virtual media device.
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): # CONNECT is a RIBCL call. There is no such property to set in RIS. if boot_option == 'CONNECT': return boot_option_map = {'BOOT_ONCE': True, 'BOOT_ALWAYS': False, 'NO_BOOT': False } if boot_option not in boot_option_map: msg = ('Virtualmedia boot option "' + boot_option + '" is ' 'invalid.') raise exception.IloInvalidInputError(msg) response, vm_device_uri = self._get_vm_device_status(device) # Update required property vm_settings = {} vm_settings['Oem'] = ( {'Hp': {'BootOnNextServerReset': boot_option_map[boot_option]}}) # perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def set_volume_bootable_status(self, volume, bootable):\n volume = self._get_resource(_volume.Volume, volume)\n volume.set_bootable_status(self, bootable)", "def set_vm_status(self, device='FLOPPY',\n boot_option='BOOT_ONCE', write_protect='YES'):\n dic = {'DEVICE': device.upper()}\n xml = self._create_dynamic_xml(\n 'SET_VM_STATUS', 'RIB_INFO', 'write', dic)\n\n if six.PY2:\n child_iterator = xml.getiterator()\n else:\n child_iterator = xml.iter()\n\n for child in child_iterator:\n if child.tag == 'SET_VM_STATUS':\n etree.SubElement(child, 'VM_BOOT_OPTION',\n VALUE=boot_option.upper())\n etree.SubElement(child, 'VM_WRITE_PROTECT',\n VALUE=write_protect.upper())\n\n d = self._request_ilo(xml)\n data = self._parse_output(d)\n return data", "def enable_vmedia(self, set_vmedia_state):\n\n if not isinstance(set_vmedia_state, bool):\n msg = ('The parameter \"%(parameter)s\" value \"%(value)s\" for '\n 'vmedia is invalid. Valid values are: True/False.' %\n {'parameter': 'ServiceEnabled',\n 'value': set_vmedia_state})\n raise exception.InvalidInputError(msg)\n sushy_system = self._get_sushy_system()\n sdflex_virtual_media.VirtualMedia.enable_vmedia(sushy_system,\n set_vmedia_state)", "def step7(self):\n for indx, mr in enumerate(self.mrs):\n self.log.info(\"Set boot drive on controller:%d\"\n % (mr.ctrl_id))\n for vd in self.mr_vds[indx]:\n if (int(mr.cli.bootdrive_vd_get()) != vd):\n mr.cli.bootdrive_vd_set(vd_id=self.mr_vds[indx][indx],\n setting=\"On\")\n break", "def virtualmediaautomatictesting(self):\n self.rdmc.ui.printer(\n \"\\n*************************************************\"\n \"********************************\\n\"\n )\n self.rdmc.ui.printer(\n \"*********************VIRTUAL MEDIA \"\n \"AUTOMATIC TESTING*****************************\\n\"\n )\n self.rdmc.ui.printer(\n \"***************************************************\"\n \"******************************\\n\\n\"\n )\n return\n\n self.auxcommands[\"login\"].run(\"\")\n self.auxcommands[\"virtualmedia\"].run(\"\")\n\n try:\n self.auxcommands[\"virtualmedia\"].run(\"2 --remove\")\n except:\n pass\n\n # Needs to be updated\n self.auxcommands[\"virtualmedia\"].run(\"2 http://10.0.0.1/vm.iso --bootnextreset\")\n results = self.rdmc.app.get_handler(\n \"/rest/v1/Managers/1/VirtualMedia/2\", service=True, silent=True\n ).dict\n\n if (\n not results[\"Inserted\"]\n or not results[\"Oem\"][self.rdmc.app.typepath.defs.oemhp][\n \"BootOnNextServerReset\"\n ]\n ):\n raise InvalidOrNothingChangedSettingsError(\"VM not found.\")\n\n self.auxcommands[\"virtualmedia\"].run(\"2 --remove\")\n\n results = self.rdmc.app.get_handler(\n \"/rest/v1/Managers/1/VirtualMedia/2\", service=True, silent=True\n ).dict\n\n if (\n results[\"Inserted\"]\n or results[\"Oem\"][self.rdmc.app.typepath.defs.oemhp][\"BootOnNextServerReset\"]\n ):\n raise InvalidOrNothingChangedSettingsError(\"VM not removed.\")\n\n self.auxcommands[\"logout\"].run(\"\")", "def set_media_volume_sync(self, dut_name, enable=True):\n try:\n if self.phone_info.phone_type == PhoneType.ANDROID and 'SM' in self._get_android_phone_model():\n is_bt_connected_to_device = self.bt_is_connected_to(dut_name)\n if not is_bt_connected_to_device:\n logger.debug(\n 'For phone found that DUT {} is not connected with {} , '\n 'So Media Volume Sync option is not available '.format(\n dut_name,\n self.phone_info.bluetooth_name))\n return False\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_more_options,\n 5)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_more_options, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_text,\n 10)\n self.find_element(self.driver.appium_driver,\n self.media_volume_text, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_sync_switch,\n 10)\n\n volume_sync_switch = self.find_element(\n self.driver.appium_driver, self.media_volume_sync_switch,\n 0)\n\n # Now click that button if we're in the wrong state.\n is_media_volume_sync = self._toggle_switch(volume_sync_switch,\n enable)\n self.driver.appium_driver.back()\n logger.debug(\n \"Media Volume option is set to {} on connected bluetooth devices {}\".format(\n enable, dut_name))\n return is_media_volume_sync\n logger.warning(\n \"Media Volume Sync Option is not available on {} connected bluetooth devices\".format(\n self.phone_info.bluetooth_name))\n except Exception as e:\n logger.warning(\n \"Could not enable/disable Media Volume Sync on connected mobile devices {}\"\n .format(self.phone_info.bluetooth_name))\n logger.warning(repr(e))\n return False", "def set_status(self, status):\n self.set_attr('procstatus', status)", "def set_status(self, status, status_extra, last_command=None, last_device_command=None, delay=None):\n if delay is None:\n delay = 0.100\n\n if last_device_command is not None:\n command = last_device_command.command\n request_id = last_device_command.request_id\n else:\n command = None\n request_id = None\n\n if last_command is not None:\n command = last_command\n\n if status is None:\n self.yombo_device.set_status_delayed(\n delay=delay,\n machine_status_extra=status_extra,\n request_id=request_id,\n reported_by=\"Wemo node\"\n )\n else:\n self.yombo_device.set_status_delayed(\n delay=delay,\n command=command,\n request_id=request_id,\n machine_status=status,\n machine_status_extra=status_extra,\n reported_by=\"Wemo node\"\n )", "def firmware_status(self, firmware_status):\n self._firmware_status = firmware_status", "def set_virtual_stage(self, virtual_stage: int) -> None:\n self.virtual_stage = virtual_stage", "def api_set_drive_mode():\n webserver_node = webserver_publisher_node.get_webserver_node()\n drive_mode = request.json.get(\"drive_mode\")\n if drive_mode is None:\n return jsonify({\"success\": False, \"reason\": \"drive_mode must be set.\"})\n\n webserver_node.get_logger().info(f\"Changed the vehicle state to {drive_mode}\")\n if drive_mode == \"manual\":\n drive_mode_state = 0\n elif drive_mode == \"auto\":\n drive_mode_state = 1\n else:\n # RoboCat mode.\n drive_mode_state = 3\n\n try:\n vehicle_state_req = ActiveStateSrv.Request()\n vehicle_state_req.state = drive_mode_state\n vehicle_state_res = call_service_sync(webserver_node.vehicle_state_cli,\n vehicle_state_req)\n if vehicle_state_res and (vehicle_state_res.error == 0):\n return jsonify(success=True)\n else:\n webserver_node.get_logger().error(\"Vehicle state service call failed\")\n return jsonify(success=False, reason=\"Error\")\n\n except Exception as ex:\n webserver_node.get_logger().error(f\"Unable to reach vehicle state server: {ex}\")\n return jsonify({\"success\": False,\n \"reason\": \"Unable to reach vehicle state server.\"})", "def set_drive_mode(mode):", "def setDriveMode(self, mode):\n self.driveMode = mode", "def switchToDriveMode(self, state):\n #base.direct.minimumConfiguration()\n #base.direct.manipulationControl.disableManipulation()\n # Update vis data\n self.initVisibilityData()\n## # Switch to drive mode\n## base.useDrive()\n## # Move cam up and back\n## base.cam.setPos(0, -5, 4)\n## # And move down and forward to compensate\n## base.camera.setPos(base.camera, 0, 5, -4)\n## # Make sure we're where we want to be\n## pos = base.direct.camera.getPos()\n## pos.setZ(0.0)\n## hpr = base.direct.camera.getHpr()\n## hpr.set(hpr[0], 0.0, 0.0)\n## # Fine tune the drive mode\n## base.mouseInterface.node().setPos(pos)\n## base.mouseInterface.node().setHpr(hpr)\n## base.mouseInterface.node().setForwardSpeed(0)\n## base.mouseInterface.node().setReverseSpeed(0)\n\n cameraPos = base.camera.getPos(self.avatar)\n base.camera.reparentTo(self.avatar)\n base.camera.setPos(cameraPos)\n base.camera.setHpr(0, 0, 0)\n #base.camera.setPos(0, 0, 0)\n base.camera.setPos(0, -11.8125, 3.9375)\n\n base.camLens.setFov(VBase2(60, 46.8265))\n\n #self.initializeSmartCameraCollisions()\n #self._smartCamEnabled = False\n\n # Turn on collisions\n if self.panel.fColl.get():\n self.collisionsOn()\n # Turn on visiblity\n if self.panel.fVis.get():\n self.visibilityOn()\n # Turn on collision traversal\n if self.panel.fColl.get() or self.panel.fVis.get():\n self.traversalOn()\n\n if (self.controlManager == None):\n # create player movement controls,camera controls, and avatar\n self.controlManager = ControlManager.ControlManager()\n avatarRadius = 1.4\n floorOffset = OTPGlobals.FloorOffset\n reach = 4.0\n\n #walkControls=GravityWalker.GravityWalker(gravity = -32.1740 * 2.0)\n walkControls=NonPhysicsWalker.NonPhysicsWalker()\n walkControls.setWallBitMask(OTPGlobals.WallBitmask)\n walkControls.setFloorBitMask(OTPGlobals.FloorBitmask)\n walkControls.initializeCollisions(self.cTrav, self.avatar,\n avatarRadius, floorOffset, reach)\n self.controlManager.add(walkControls, \"walk\")\n self.controlManager.use(\"walk\", self)\n\n # set speeds after adding controls to the control manager\n self.controlManager.setSpeeds(\n OTPGlobals.ToonForwardSpeed,\n OTPGlobals.ToonJumpForce,\n OTPGlobals.ToonReverseSpeed,\n OTPGlobals.ToonRotateSpeed\n )\n else:\n self.controlManager.enable()\n\n self.avatarAnimTask = taskMgr.add(self.avatarAnimate, 'avatarAnimTask', 24)\n self.avatar.startUpdateSmartCamera()\n\n self.avatarMoving = 0", "def SetStatus(self, status):\r\n self.status = status", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def _set_boot_device(cls, task, device, persistent=False):\n # NOTE(etingof): always treat CD/floppy as virtual\n if device not in cls.VIRTUAL_MEDIA_DEVICES:\n LOG.debug(\n 'Treating boot device %(device)s as a non-virtual '\n 'media device for node %(node)s',\n {'device': device, 'node': task.node.uuid})\n super(DracRedfishVirtualMediaBoot, cls)._set_boot_device(\n task, device, persistent)\n return\n\n device = cls.VIRTUAL_MEDIA_DEVICES[device]\n\n system = redfish_utils.get_system(task.node)\n\n for manager in system.managers:\n\n # This call makes Sushy go fishing in the ocean of Sushy\n # OEM extensions installed on the system. If it finds one\n # for 'Dell' which implements the 'Manager' resource\n # extension, it uses it to create an object which\n # instantiates itself from the OEM JSON. The object is\n # returned here.\n #\n # If the extension could not be found for one manager, it\n # will not be found for any others until it is installed, so\n # abruptly exit the for loop. The vendor and resource name,\n # 'Dell' and 'Manager', respectively, used to search for the\n # extension are invariant in the loop.\n try:\n manager_oem = manager.get_oem_extension('Dell')\n except sushy.exceptions.OEMExtensionNotFoundError as e:\n error_msg = (_(\"Search for Sushy OEM extension Python package \"\n \"'sushy-oem-idrac' failed for node %(node)s. \"\n \"Ensure it is installed. Error: %(error)s\") %\n {'node': task.node.uuid, 'error': e})\n LOG.error(error_msg)\n raise exception.RedfishError(error=error_msg)\n\n try:\n manager_oem.set_virtual_boot_device(\n device, persistent=persistent, manager=manager,\n system=system)\n except sushy.exceptions.SushyError as e:\n LOG.debug(\"Sushy OEM extension Python package \"\n \"'sushy-oem-idrac' failed to set virtual boot \"\n \"device with system %(system)s manager %(manager)s \"\n \"for node %(node)s. Will try next manager, if \"\n \"available. Error: %(error)s\",\n {'system': system.uuid if system.uuid else\n system.identity,\n 'manager': manager.uuid if manager.uuid else\n manager.identity,\n 'node': task.node.uuid,\n 'error': e})\n continue\n\n LOG.info(\"Set node %(node)s boot device to %(device)s via OEM\",\n {'node': task.node.uuid, 'device': device})\n break\n\n else:\n error_msg = (_('iDRAC Redfish set boot device failed for node '\n '%(node)s, because system %(system)s has no '\n 'manager%(no_manager)s.') %\n {'node': task.node.uuid,\n 'system': system.uuid if system.uuid else\n system.identity,\n 'no_manager': '' if not system.managers else\n ' which could'})\n LOG.error(error_msg)\n raise exception.RedfishError(error=error_msg)", "def _set_status(self, value):\n print(self._pv)\n CaChannelWrapper.set_pv_value(self._pv, value)", "def virtual_media(self, virtual_media):\n self._virtual_media = virtual_media" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Notifies iLO of the location of a virtual media diskette image.
def insert_virtual_media(self, url, device='FLOPPY'): response, vm_device_uri = self._get_vm_device_status(device) # Eject media if there is one. RIBCL was tolerant enough to overwrite # existing media, RIS is not. This check is to take care of that # assumption. if response.get('Inserted', False): self.eject_virtual_media(device) # Update required property vm_settings = {} vm_settings['Image'] = url # Perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def insert_virtual_media(self, url, device='FLOPPY'):\n dic = {\n 'DEVICE': device.upper(),\n 'IMAGE_URL': url,\n }\n data = self._execute_command(\n 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic)\n return data", "def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info", "def __virtual__():\n if \"imgadm.list\" in __salt__:\n return True\n else:\n err_msg = \"Only available on SmartOS compute nodes.\"\n log.error(\"Unable to load %s beacon: %s\", __virtualname__, err_msg)\n return False, err_msg", "def mount_disk_image(vm_object, image_object, device_identifier=None):\n raise NotARealImplementationError", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def gen_virtual(self, provider = ''):\n self.create()\n\n image = self.data['image']\n if image:\n self.add_config('vm', self.kve(\"box_url\", image))\n\n if provider:\n self.add_provider(provider, self.kve('memory', self.data['memory']))", "def image_url(self):\n cat = getToolByName(self.portal_state.context, 'portal_catalog')\n path = '/'.join(self.portal_state.context.getPhysicalPath())\n results = cat.searchResults(portal_type='Vignette',\n path={'query': path})\n if results:\n return results[0].getURL()\n else:\n return super(PackageATMetatags, self).image_url", "def plug_vifs(self, instance, network_info):", "def test_02_attach_volume(self):\n\n self.debug(\n \"Attaching volume (ID: %s) to VM (ID: %s)\" % (\n self.volume.id,\n self.virtual_machine.id\n ))\n self.virtual_machine.attach_volume(self.apiClient, self.volume)\n self.attached = True\n list_volume_response = Volume.list(\n self.apiClient,\n id=self.volume.id\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n list_volume_response,\n None,\n \"Check if volume exists in ListVolumes\"\n )\n volume = list_volume_response[0]\n self.assertNotEqual(\n volume.virtualmachineid,\n None,\n \"Check if volume state (attached) is reflected\"\n )\n try:\n # Format the attached volume to a known fs\n format_volume_to_ext3(self.virtual_machine.get_ssh_client())\n\n except Exception as e:\n\n self.fail(\"SSH failed for VM: %s - %s\" %\n (self.virtual_machine.ipaddress, e))\n return", "def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info", "def test_v1vmi_addvolume(self):\n pass", "def eject_virtual_media(self, device='FLOPPY'):\n response, vm_device_uri = self._get_vm_device_status(device)\n\n # Check if virtual media is connected.\n if response.get('Inserted') is False:\n return\n\n # Update required property\n vm_settings = {}\n vm_settings['Image'] = None\n\n # perform the patch operation\n status, headers, response = self._rest_patch(\n vm_device_uri, None, vm_settings)\n\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)", "def plug(self, vif, instance_info):", "def _attach_volume_vmdk(self, connection_info, instance,\n adapter_type=None):\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n LOG.debug(\"_attach_volume_vmdk: %s\", connection_info,\n instance=instance)\n data = connection_info['data']\n volume_ref = self._get_volume_ref(data)\n\n # Get details required for adding disk device such as\n # adapter_type, disk_type\n vmdk = vm_util.get_vmdk_info(self._session, volume_ref)\n adapter_type = adapter_type or vmdk.adapter_type\n\n # IDE does not support disk hotplug\n if adapter_type == constants.ADAPTER_TYPE_IDE:\n state = vm_util.get_vm_state(self._session, instance)\n if state != power_state.SHUTDOWN:\n raise exception.Invalid(_('%s does not support disk '\n 'hotplug.') % adapter_type)\n\n # Attach the disk to virtual machine instance\n self.attach_disk_to_vm(vm_ref, instance, adapter_type, vmdk.disk_type,\n vmdk_path=vmdk.path)\n\n # Store the uuid of the volume_device\n self._update_volume_details(vm_ref, data['volume_id'],\n vmdk.device.backing.uuid)\n\n LOG.debug(\"Attached VMDK: %s\", connection_info, instance=instance)", "def mount_image(self, mount_point=None):\n raise NotImplementedError(\"mount_image method is not implemented\")", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_03_download_attached_volume(self):\n\n self.debug(\"Extract attached Volume ID: %s\" % self.volume.id)\n\n self.virtual_machine.attach_volume(self.apiClient, self.volume)\n self.attached = True\n cmd = extractVolume.extractVolumeCmd()\n cmd.id = self.volume.id\n cmd.mode = \"HTTP_DOWNLOAD\"\n cmd.zoneid = self.services[\"zoneid\"]\n # A proper exception should be raised;\n # downloading attach VM is not allowed\n with self.assertRaises(Exception):\n self.apiClient.extractVolume(cmd)", "def pv(self, *args, **kwargs):\n return _image.image_pv(self, *args, **kwargs)", "def insert_virtual_media( context, image, system_id = None, media_id = None, media_types = None, inserted = None, write_protected = None ):\n\n # Set up acceptable media types based on the image URI if not specified\n if media_types is None:\n if image.lower().endswith( \".iso\" ):\n media_types = [ \"CD\", \"DVD\" ]\n elif image.lower().endswith( \".img\" ):\n media_types = [ \"USBStick\" ]\n elif image.lower().endswith( \".bin\" ):\n media_types = [ \"USBStick\" ]\n\n # Get the virtual media collection\n virtual_media_collection = get_virtual_media_collection( context, system_id = system_id )\n\n # Scan the virtual media for an appropriate slot\n match = False\n for member in virtual_media_collection.dict[\"Members\"]:\n media = context.get( member[\"@odata.id\"] )\n if media.dict[\"Image\"] is not None:\n # In use; move on\n continue\n\n # Check for a match\n if media_id is not None:\n if media.dict[\"Id\"] == media_id:\n # Identifier match\n match = True\n else:\n if media_types is None:\n # No preferred media type; automatic match\n match = True\n else:\n # Check if the preferred media type is in the reported list\n for type in media_types:\n if type in media.dict[\"MediaTypes\"]:\n # Acceptable media type found\n match = True\n\n # If a match was found, attempt to insert the media\n if match:\n payload = {\n \"Image\": image\n }\n if inserted:\n payload[\"Inserted\"] = inserted\n if write_protected:\n payload[\"WriteProtected\"] = write_protected\n try:\n # Preference for using the InsertMedia action\n response = context.post( media.dict[\"Actions\"][\"#VirtualMedia.InsertMedia\"][\"target\"], body = payload )\n except:\n # Fallback to PATCH method\n if \"Inserted\" not in payload:\n payload[\"Inserted\"] = True\n headers = None\n etag = media.getheader( \"ETag\" )\n if etag is not None:\n headers = { \"If-Match\": etag }\n response = context.patch( media.dict[\"@odata.id\"], body = payload, headers = headers )\n verify_response( response )\n return response\n\n # No matches found\n if media_id is not None:\n reason = \"'{}' not found or is already in use\".format( media_id )\n elif media_types is not None:\n reason = \"No available slots of types {}\".format( \", \".join( media_types ) )\n else:\n reason = \"No available slots\"\n raise RedfishNoAcceptableVirtualMediaError( \"No acceptable virtual media: {}\".format( reason ) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ejects the Virtual Media image if one is inserted.
def eject_virtual_media(self, device='FLOPPY'): response, vm_device_uri = self._get_vm_device_status(device) # Check if virtual media is connected. if response.get('Inserted') is False: return # Update required property vm_settings = {} vm_settings['Image'] = None # perform the patch operation status, headers, response = self._rest_patch( vm_device_uri, None, vm_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def eject_image(self, identity, device):\n device_info = self._get_device(identity, device)\n\n device_info['Image'] = ''\n device_info['ImageName'] = ''\n device_info['Inserted'] = False\n device_info['WriteProtected'] = False\n device_info['UserName'] = ''\n device_info['Password'] = ''\n\n self._devices.update({(identity, device): device_info})\n\n local_file = device_info.pop('_local_file', None)\n if local_file:\n try:\n os.unlink(local_file)\n\n self._logger.debug(\n 'Removed local file %(file)s for %(identity)s' % {\n 'identity': identity, 'file': local_file})\n except FileNotFoundError:\n # Ignore error as we are trying to remove the file anyway\n pass", "def eject_virtual_media( context, media_id, system_id = None ):\n\n # Get the virtual media collection\n virtual_media_collection = get_virtual_media_collection( context, system_id = system_id )\n\n # Scan the virtual media for the selected slot\n for member in virtual_media_collection.dict[\"Members\"]:\n media = context.get( member[\"@odata.id\"] )\n if media.dict[\"Id\"] == media_id:\n # Found the selected slot; eject it\n try:\n # Preference for using the EjectMedia action\n response = context.post( media.dict[\"Actions\"][\"#VirtualMedia.EjectMedia\"][\"target\"], body = {} )\n except:\n # Fallback to PATCH method\n payload = {\n \"Image\": None,\n \"Inserted\": False\n }\n headers = None\n etag = media.getheader( \"ETag\" )\n if etag is not None:\n headers = { \"If-Match\": etag }\n response = context.patch( media.dict[\"@odata.id\"], body = payload, headers = headers )\n verify_response( response )\n return response\n\n # No matches found\n raise RedfishNoAcceptableVirtualMediaError( \"No acceptable virtual media: '{}' not found\".format( media_id ) )", "def eject_vmedia(self, device):\n device_name = VALID_VMEDIA_DEVICES.get(device)\n if not device_name:\n raise exception.InvalidInputError(\n \"Invalid device. Valid devices: cd0 or cd1 or hd0 or hd1.\")\n vmedia_partition_id = self.get_vmedia_device_uri(device_name)\n try:\n virtual_media_object = virtual_media.VirtualMedia(\n self._sushy._conn, vmedia_partition_id)\n virtual_media_object.eject_media()\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish System \"%(partition_id)s\" was '\n 'not found. Error %(error)s') %\n {'partition_id': vmedia_partition_id, 'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)", "def on_delete_pre(sender, instance, **kwargs):\n if instance.big_image and os.path.isfile(instance.big_image.path):\n os.remove(instance.big_image.path)\n if instance.min_image and os.path.isfile(instance.min_image.path):\n os.remove(instance.min_image.path)", "def test_eject_virtual_media_no_media(\n self, request_ilo_mock, get_vm_status_mock):\n get_vm_status_mock.return_value = {'IMAGE_INSERTED': 'NO'}\n self.ilo.eject_virtual_media(device='FLOPPY')\n get_vm_status_mock.assert_called_once_with(device='FLOPPY')\n self.assertFalse(request_ilo_mock.called)", "def delete_product_image(self):\n self.product.image.delete()", "def delete(self):\n self._cc._delete(\"/1/media/%s\" % self.media_id)\n self.__dict__.clear()", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._image)\n self._image = None", "def remove_image(self):\n self.client.remove_container(self.image_tag)", "def delete_image(self, image):", "def click_remove_file(self):\n if self.attached_file is not None: # if an image was already loaded\n self.canvas_solution_sim.delete(self.attached_file.image)\n self.attached_file = None", "def destroy(self):\n\n if not self.imgdir or not os.path.exists(self.imgdir):\n return\n\n if os.path.abspath(self.imgdir) == \"/\":\n # Paranoia.\n return\n\n try:\n shutil.rmtree(self.imgdir)\n except EnvironmentError as e:\n raise apx._convert_error(e)", "def delete_screenshots(self, model):\n try:\n os.remove(\"uploads/{}.png\".format(model.screenshot))\n os.remove(\"uploads/small_{}.png\".format(model.screenshot))\n except:\n pass\n\n self.delete_from_s3(\"{}.png\".format(model.screenshot))\n self.delete_from_s3(\"small_{}.png\".format(model.screenshot))", "def removeTexture():\n\ttry:\n\t\tdel logic.skyTexture\n\t\tdel logic.skyTextureAnimated\n\texcept:\n\t\tpass", "def export_destroyMachine(self, name):\n\n new_image_path = running_images_dir+\"/\"+name+\".disk\"\n\n if not self.destroyDomain(name):\n return False\n\n try:\n os.remove(new_image_path)\n except:\n errMsg = name+\" - Unable to remove disk image \\\"\"\n errMsg += new_image_path+\"\\\"\"\n logging.error(errMsg)\n return False\n\n return True", "def release(self):\n if self._video is not None:\n del self._video", "def tearDown(self):\n self.image.delete()", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._pixmapHandle)\n self._pixmapHandle = None\n self.zoom=-1\n self.scene.clear()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get details of persistent boot devices, its order
def _get_persistent_boot_devices(self): # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the Boot resource. boot_settings = self._get_bios_boot_resource(bios_settings) # Get the BootSources resource try: boot_sources = boot_settings['BootSources'] except KeyError: msg = ("BootSources resource not found.") raise exception.IloError(msg) try: boot_order = boot_settings['PersistentBootConfigOrder'] except KeyError: msg = ("PersistentBootConfigOrder resource not found.") raise exception.IloCommandNotSupportedError(msg) return boot_sources, boot_order
[ "def list_devices():\r\n return sd.query_devices()", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def print_devices():\n print(device_lib.list_local_devices())", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def load_devices():", "def get_registered_devices():\n return registeredDevices[\"devices\"]", "def test_get_bios_boot_mode_list(self):\n pass", "def GetDeviceSerials(cls):\n cls._CheckAdb()\n adb_cmd = [cls._adb_command, _ADB_DEVICE]\n device_info = utils.CheckOutput(adb_cmd)\n serials = []\n # Skip the first line which is \"List of devices attached\". Each of the\n # following lines consists of the serial number, a tab character, and\n # the state. The last line is empty.\n for line in device_info.splitlines()[1:]:\n serial_state = line.split()\n if len(serial_state) > 1:\n serials.append(serial_state[0])\n return serials", "def get_system_boot_order(self):\n result = {}\n try:\n system_url = self._find_system_resource()\n bmc_type = 'TSM' if 'Self' in system_url else 'XCC'\n \n if bmc_type == 'XCC':\n result = self._get_url(system_url)\n if result['ret'] == False: \n return result\n # for current products\n oem = result['entries']['Oem']\n if 'Lenovo' in oem and 'BootSettings' in oem['Lenovo']:\n boot_settings_url = oem['Lenovo']['BootSettings']['@odata.id']\n result = self._get_collection(boot_settings_url)\n if result['ret'] == False:\n return result\n\n boot_order_member = result['entries'][0]\n data_filtered = propertyFilter(boot_order_member)\n result = {'ret': True, 'entries': data_filtered}\n return result\n\n # for next generation, TBU.\n if 'BootOrder' in result['entries']['Boot']:\n pass\n \n if bmc_type == 'TSM':\n result = self._get_url(system_url + '/Bios')\n if result['ret'] == False:\n return result\n\n attributes = result['entries']['Attributes']\n attribute_name = ''\n attribute_value = ''\n if 'Q00999_Boot_Option_Priorities' in attributes:\n attribute_name = 'Q00999_Boot_Option_Priorities'\n attribute_value = attributes[attribute_name]\n elif 'Q00999 Boot Option Priorities' in attributes:\n attribute_name = 'Q00999 Boot Option Priorities'\n attribute_value = attributes[attribute_name]\n else:\n rsult = {'ret': False, 'msg': \"Failed to find boot options in Bios attributes.\"}\n return result\n\n # Get BootOrderNext\n attribute_value_next = None\n bios_settings_url = result['entries']['@Redfish.Settings']['SettingsObject']['@odata.id']\n result = self._get_url(bios_settings_url)\n if result['ret'] == False:\n return result\n if 'Attributes' in result['entries'] and attribute_name in result['entries']['Attributes']:\n attribute_value_next = result['entries']['Attributes'][attribute_name]\n\n # Parse attribute value string to get currnt/supported/next boot order settings\n boot_order_current = list()\n boot_order_supported = list()\n for boot_order_item in attribute_value.split(';'):\n boot_order_name = boot_order_item.split(',')[0]\n boot_order_supported.append(boot_order_name)\n if 'true' in boot_order_item:\n boot_order_current.append(boot_order_name)\n if attribute_value_next is None:\n boot_order_next = boot_order_current\n else:\n boot_order_next = list()\n for boot_order_item in attribute_value_next.split(';'):\n boot_order_name = boot_order_item.split(',')[0]\n if 'true' in boot_order_item:\n boot_order_next.append(boot_order_name)\n\n # Set result\n boot_order_info = {}\n boot_order_info['BootOrderNext'] = boot_order_next\n boot_order_info['BootOrderSupported'] = boot_order_supported\n boot_order_info['BootOrderCurrent'] = boot_order_current\n result = {'ret': True, 'entries': boot_order_info}\n return result\n except Exception as e:\n LOGGER.debug(\"%s\" % traceback.format_exc())\n msg = \"Failed to get system boot order. Error message: %s\" % repr(e)\n LOGGER.error(msg)\n return {'ret': False, 'msg': msg}", "def GetDeviceSerials(self):\n return self._device_serial_index.keys()", "def get_partitions():\n partitions_list = []\n with open(\"/proc/partitions\") as partitions:\n lines = partitions.readlines()\n for line in lines:\n if \"major\" not in line:\n info = line.split()\n if len(info) > 0:\n if len(info[3]) > len(\"sdX\") and \"loop\" not in info[3]:\n partitions_list.append(\"/dev/\" + info[3])\n return partitions_list", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "def get_devices(adb=DEFAULT_ADB):\n # Check that adb is running\n Device.__start_adb(adb)\n # Split by newline and remove first line (\"List of devices attached\")\n # TODO: surround with try/except?\n devices = subprocess.check_output(\n [adb, \"devices\", \"-l\"]).decode().split('\\n')[1:]\n tmp = {}\n for dev in devices:\n if dev:\n tmp[dev.split()[0]] = dev\n return tmp", "def device_info(self) -> dict[str, any]:\n return system_info(self.hacs)", "def get_persistent_boot_device(self):\n system = self._get_host_details()\n try:\n # Return boot device if it is persistent.\n if system['Boot']['BootSourceOverrideEnabled'] == 'Continuous':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n # Check if we are in BIOS boot mode.\n # There is no resource to fetch boot device order for BIOS boot mode\n if not self._is_boot_mode_uefi():\n return None\n\n # Get persistent boot device order for UEFI\n boot_sources, boot_devices = self._get_persistent_boot_devices()\n\n boot_string = \"\"\n try:\n for source in boot_sources:\n if (source[\"StructuredBootString\"] == boot_devices[0]):\n boot_string = source[\"BootString\"]\n break\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n if 'HP iLO Virtual USB CD' in boot_string:\n return 'CDROM'\n\n elif ('NIC' in boot_string or\n 'PXE' in boot_string or\n \"iSCSI\" in boot_string):\n return 'NETWORK'\n\n elif common.isDisk(boot_string):\n return 'HDD'\n\n else:\n return None", "def devices_dict(self):\n return self.devices.dict", "def devices(self):\n return self._recordings.keys()", "def enumerateDevices(self):\n results = []\n \n devices = usb.core.find(find_all=True, idVendor = 0x2AB9, idProduct = 0x0001)\n for device in devices:\n results.append(str(device.serial_number))\n return results", "def devices(self):\n result = self.tk.call('snack::mixer', 'devices')\n return self.tk.splitlist(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current persistent boot device set for the host
def get_persistent_boot_device(self): system = self._get_host_details() try: # Return boot device if it is persistent. if system['Boot']['BootSourceOverrideEnabled'] == 'Continuous': device = system['Boot']['BootSourceOverrideTarget'] if device in DEVICE_RIS_TO_COMMON: return DEVICE_RIS_TO_COMMON[device] return device except KeyError as e: msg = "get_persistent_boot_device failed with the KeyError:%s" raise exception.IloError((msg) % e) # Check if we are in BIOS boot mode. # There is no resource to fetch boot device order for BIOS boot mode if not self._is_boot_mode_uefi(): return None # Get persistent boot device order for UEFI boot_sources, boot_devices = self._get_persistent_boot_devices() boot_string = "" try: for source in boot_sources: if (source["StructuredBootString"] == boot_devices[0]): boot_string = source["BootString"] break except KeyError as e: msg = "get_persistent_boot_device failed with the KeyError:%s" raise exception.IloError((msg) % e) if 'HP iLO Virtual USB CD' in boot_string: return 'CDROM' elif ('NIC' in boot_string or 'PXE' in boot_string or "iSCSI" in boot_string): return 'NETWORK' elif common.isDisk(boot_string): return 'HDD' else: return None
[ "def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bios, bios_uri, bios_settings = self._check_bios_resource()\n\n # Get the Boot resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n\n # Get the BootSources resource\n try:\n boot_sources = boot_settings['BootSources']\n except KeyError:\n msg = (\"BootSources resource not found.\")\n raise exception.IloError(msg)\n\n try:\n boot_order = boot_settings['PersistentBootConfigOrder']\n except KeyError:\n msg = (\"PersistentBootConfigOrder resource not found.\")\n raise exception.IloCommandNotSupportedError(msg)\n\n return boot_sources, boot_order", "def get_boot_device(self):\n root_vol = None\n boot_vol = None\n for volume in self.volumes:\n if not volume.partitions:\n continue\n for partition in volume.partitions:\n if partition.mount_point == \"/\":\n root_vol = volume\n elif partition.mount_point == '/boot':\n boot_vol = volume\n\n if not boot_vol:\n return root_vol\n return boot_vol", "def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault", "def get_root_device():\r\n return utils.system_output('rootdev -s -d')", "def get_boot_device(self):\n operation = 'get_boot_device'\n try:\n boot_device = self.sp_manager.get_boot_device()\n return boot_device\n except UcsException as ex:\n print(_(\"Cisco client exception: %(msg)s.\"), {'msg': ex})\n raise exception.UcsOperationError(operation=operation, error=ex)", "def get_one_time_boot(self):\n system = self._get_host_details()\n try:\n if system['Boot']['BootSourceOverrideEnabled'] == 'Once':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n else:\n # value returned by RIBCL if one-time boot setting are absent\n return 'Normal'\n\n except KeyError as e:\n msg = \"get_one_time_boot failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)", "def setPlatformBootPartition(self):\n return [PartSpec(mountpoint=\"/boot\", size=Size(\"1GiB\"),\n weight=self.weight(mountpoint=\"/boot\"), lv=False)]", "def setPlatformBootPartition(self):\n return [PartSpec(mountpoint=\"/boot\", size=Size(\"1GiB\"),\n weight=self.weight(mountpoint=\"/boot\"))]", "def getDevice(self):\n return self.myRamdiskDev", "def GetRootDev(cls, device):\n rootdev = device.RunCommand(\n ['rootdev', '-s'], capture_output=True).output.strip()\n logging.debug('Current root device is %s', rootdev)\n return rootdev", "def _get_boot_mount(boot_path):\n cmd = \"findmnt -no source -T \" + boot_path\n return __salt__[\"cmd.run\"](cmd, python_shell=True, output_loglevel=\"quiet\")", "def boot_configuration(self):\n bootconfs = self.get_logical_configuration(gdef.BOOT_LOG_CONF)\n if not bootconfs:\n return bootconfs\n assert len(bootconfs) == 1 # Only one boot configuration can exist for each device instance.\n return bootconfs[0]", "def get_registered_devices():\n return registeredDevices[\"devices\"]", "def _set_boot_device(cls, task, device, persistent=False):\n # NOTE(etingof): always treat CD/floppy as virtual\n if device not in cls.VIRTUAL_MEDIA_DEVICES:\n LOG.debug(\n 'Treating boot device %(device)s as a non-virtual '\n 'media device for node %(node)s',\n {'device': device, 'node': task.node.uuid})\n super(DracRedfishVirtualMediaBoot, cls)._set_boot_device(\n task, device, persistent)\n return\n\n device = cls.VIRTUAL_MEDIA_DEVICES[device]\n\n system = redfish_utils.get_system(task.node)\n\n for manager in system.managers:\n\n # This call makes Sushy go fishing in the ocean of Sushy\n # OEM extensions installed on the system. If it finds one\n # for 'Dell' which implements the 'Manager' resource\n # extension, it uses it to create an object which\n # instantiates itself from the OEM JSON. The object is\n # returned here.\n #\n # If the extension could not be found for one manager, it\n # will not be found for any others until it is installed, so\n # abruptly exit the for loop. The vendor and resource name,\n # 'Dell' and 'Manager', respectively, used to search for the\n # extension are invariant in the loop.\n try:\n manager_oem = manager.get_oem_extension('Dell')\n except sushy.exceptions.OEMExtensionNotFoundError as e:\n error_msg = (_(\"Search for Sushy OEM extension Python package \"\n \"'sushy-oem-idrac' failed for node %(node)s. \"\n \"Ensure it is installed. Error: %(error)s\") %\n {'node': task.node.uuid, 'error': e})\n LOG.error(error_msg)\n raise exception.RedfishError(error=error_msg)\n\n try:\n manager_oem.set_virtual_boot_device(\n device, persistent=persistent, manager=manager,\n system=system)\n except sushy.exceptions.SushyError as e:\n LOG.debug(\"Sushy OEM extension Python package \"\n \"'sushy-oem-idrac' failed to set virtual boot \"\n \"device with system %(system)s manager %(manager)s \"\n \"for node %(node)s. Will try next manager, if \"\n \"available. Error: %(error)s\",\n {'system': system.uuid if system.uuid else\n system.identity,\n 'manager': manager.uuid if manager.uuid else\n manager.identity,\n 'node': task.node.uuid,\n 'error': e})\n continue\n\n LOG.info(\"Set node %(node)s boot device to %(device)s via OEM\",\n {'node': task.node.uuid, 'device': device})\n break\n\n else:\n error_msg = (_('iDRAC Redfish set boot device failed for node '\n '%(node)s, because system %(system)s has no '\n 'manager%(no_manager)s.') %\n {'node': task.node.uuid,\n 'system': system.uuid if system.uuid else\n system.identity,\n 'no_manager': '' if not system.managers else\n ' which could'})\n LOG.error(error_msg)\n raise exception.RedfishError(error=error_msg)", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def get_one_time_boot(self):\n data = self._execute_command(\n 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read')\n return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE']", "def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device", "def list_devices():\r\n return sd.query_devices()", "def get_default_device(self):\n if not ADB().devices(state=\"device\"):\n raise IndexError(\"ADB devices not found\")\n return ADB().devices(state=\"device\")[0][0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the current setting for the one time boot.
def get_one_time_boot(self): system = self._get_host_details() try: if system['Boot']['BootSourceOverrideEnabled'] == 'Once': device = system['Boot']['BootSourceOverrideTarget'] if device in DEVICE_RIS_TO_COMMON: return DEVICE_RIS_TO_COMMON[device] return device else: # value returned by RIBCL if one-time boot setting are absent return 'Normal' except KeyError as e: msg = "get_one_time_boot failed with the KeyError:%s" raise exception.IloError((msg) % e)
[ "def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault", "def myCurrentSetting(self):\n paramDict = self.getCurrentSetting()\n return paramDict", "def get_one_time_boot(self):\n data = self._execute_command(\n 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read')\n return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE']", "def bootstrap_setting(value):\n return get_bootstrap_setting(value)", "def find_setting(self):\n return self._sqt.boolSetting(AM.AsterModule.name, self.key)", "def find_setting(self):\n return str(self._sqt.stringSetting(AM.AsterModule.name, self.key))", "def get_setting(self, name):\n\n cursor = self.db.cursor()\n cursor.execute(\"SELECT value FROM settings WHERE name='\" + name + \"';\")\n results = cursor.fetchone()\n cursor.close()\n return results[0]", "def get_setting(self, key, default=NOT_SET):\n if key in self.settings:\n return self.settings[key]\n app_key = 'tangled.app.' + key\n if app_key in self.settings:\n return self.settings[app_key]\n if default is NOT_SET:\n raise KeyError(\"'{}' not present in settings\".format(key))\n return default", "def current_settings(self):\n return {\n 'power_state': self.power_state,\n 'brightness': self.brightness,\n }", "def get_setting(self, setting):\n return self.do_rpc(\"get_setting\", key=key)", "def get(self):\n self.value = os.getenv(self.name, self.default)\n return self.value", "def getGlobalSetting(self, setting):\n self._cacheConfig()\n settingVal = None\n try:\n settingVal = self._fileCache[setting]\n except KeyError:\n # if no global setting exists, try finding the value as a daily setting\n # (if all days are the same it'll be a global, but otherwise we'll just give today's setting)\n settingVal = self.getDailySetting(getDayFromNum(datetime.datetime.today().weekday()), setting)\n\n return settingVal", "def get_current_boot_mode(self):\n data = self._execute_command(\n 'GET_CURRENT_BOOT_MODE', 'SERVER_INFO', 'read')\n return data['GET_CURRENT_BOOT_MODE']['BOOT_MODE']['VALUE']", "def getSystemAwake(self):\n print 'start of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n try:\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n if self.db['system_awake'] == False:\n print 'start of if true - getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n self.system_awake = self.db['system_awake']\n self.db.close()\n else:\n self.system_awake = True\n self.db['system_awake'] = self.system_awake\n self.db.close()\n \n print 'End of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n \n except Exception, e:\n self.log_file.logEntry('{0}\\nUnable to load previous system_awake value, setting value to True'.format(e))\n self.system_awake = True", "def get_settings(self):\n return self.settings", "def get_config():\n return CONFIG", "def get_settings(self):\n return self.__settings", "def get_setting(setting):\n import settings\n return getattr(settings, setting)", "def settings(self):\r\n return self.application.settings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the firmware update service uri.
def _get_firmware_update_service_resource(self): manager, uri = self._get_ilo_details() try: fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href'] except KeyError: msg = ("Firmware Update Service resource not found.") raise exception.IloCommandNotSupportedError(msg) return fw_uri
[ "def _get_uri(plex_server):\n return plex_server.url(\n \"/:/websockets/notifications\", includeToken=True\n ).replace(\"http\", \"ws\")", "def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri", "def update_url(self):\r\n return self._update_url", "def host_service_uri(self) -> str:\n return pulumi.get(self, \"host_service_uri\")", "def get_service_url(opts, config):\n\n protocol = path = ''\n server = conf.get_value(opts, config, 'server')\n if not server.startswith('http'):\n protocol = 'https://'\n if server.split('/')[-1] != 'sdk':\n path = '/sdk'\n return ''.join([protocol, server, path])", "def EndpointURI(self):\n return '/'.join(str(x) for x in [self.base_endpoint,self.match,self.resource] if x)", "def uri(self) -> str:\n host = (self._host if self._host not in ('', '0.0.0.0')\n else socket.gethostname())\n port = self.socket.getsockname()[1]\n return f'http://{host}:{port}'", "def service_path(self):\n return self._service_path", "def service_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_url\")", "def get_update_url_rule(self):\n\n return getattr(self, 'update_url_rule', '.update')", "def get_overpass_uri() -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", \"overpass_uri\", fallback=\"https://overpass-api.de\").strip()", "def endpoint_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_uri\")", "def uri_for_service(self, region, service_id, base_uri):\n return str(URLPath.fromString(base_uri)\n .child(\"service\").child(region).child(service_id).child(\"\"))", "def _get_webservice_url(self, ws_key):\n if self._webservices.get(ws_key) is None:\n raise PyiCloudServiceNotActivatedException(\n \"Webservice not available\", ws_key\n )\n return self._webservices[ws_key][\"url\"]", "def get_latest_version_link(self):\n return self.get_latest_version().dbgap_link", "def get_wsdl_url(self):\n return self.mycam.devicemgmt.GetWsdlUrl()", "def uri(self):\n return self._connection_data[\"uri\"]", "def endpoint_url(self, endpoint):\n return urljoin(current_app.lastuser_config['lastuser_server'], endpoint)", "def application_service_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_service_path\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the given firmware on the server for the given component.
def update_firmware(self, file_url, component_type): fw_update_uri = self._get_firmware_update_service_resource() action_data = { 'Action': 'InstallFromURI', 'FirmwareURI': file_url, } # perform the POST LOG.debug(self._('Flashing firmware file: %s ...'), file_url) status, headers, response = self._rest_post( fw_update_uri, None, action_data) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) # wait till the firmware update completes. common.wait_for_ris_firmware_update_to_complete(self) try: state, percent = self.get_firmware_update_progress() except exception.IloError: msg = 'Status of firmware update not known' LOG.debug(self._(msg)) # noqa return if state == "ERROR": msg = 'Unable to update firmware' LOG.debug(self._(msg)) # noqa raise exception.IloError(msg) elif state == "UNKNOWN": msg = 'Status of firmware update not known' LOG.debug(self._(msg)) # noqa else: # "COMPLETED" | "IDLE" LOG.info(self._('Flashing firmware file: %s ... done'), file_url)
[ "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def update_firmware(self) -> str:", "def update_firmware(self, node, port):\n return hpsum_controller.update_firmware(node)", "def updateFirmware(self, device=None, version=\"latest\"):\n if device in ('sender', 'model'):\n result = {}\n update = Update(firmware_path=firmware_dir, firmware_pattern=firmware_pattern, device_function=device, requested_version=version)\n update_allowed, message_list = update.check_go()\n #print(update_allowed, message_list)\n if update_allowed:\n #print(\"geen update met bestand\", update.filename)\n #return json.dumps({\"nu\": \"niets\"}).encode('utf-8', 'replace')\n return update.send_file()\n else:\n cherrypy.response.headers[\"Content-Type\"] = \"application/json\"\n cherrypy.response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n cherrypy.response.headers[\"Access-Control-Allow-Methods\"] = \"POST\"\n cherrypy.response.headers[\"Cache-Control\"] = \"no-cache\"\n cherrypy.response.headers[\"Connection\"] = \"keep-alive\"\n cherrypy.response.headers[\"Pragma\"] = \"no-cache\"\n result[\"Message\"] = message_list\n return json.dumps(result).encode('utf-8', 'replace')\n else:\n return json.dumps({\"Error\": \"Device unknown, should be sender or model\"}).encode('utf-8', 'replace')", "def send_firmware(self, firmware):\n pass", "def fusion_api_edit_server_hardware_mp_firmware_version(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/mpFirmwareVersion')", "def fusion_api_update_sas_li_firmware(self, body=None, uri=None, api=None, headers=None):\n param = \"/firmware\" # put method expecting a param\n return self.sasli.put(body=body, uri=uri, param=param, api=api, headers=headers)", "async def update_firmware(module, firmware_file_path, loop):\n # TODO: Make sure the module isn't in the middle of operation\n\n ports_before_update = await _discover_ports()\n config_file_path = os.path.join(package_root,\n 'config', 'modules', 'avrdude.conf')\n proc = await asyncio.create_subprocess_exec(\n 'avrdude', '-C{}'.format(config_file_path), '-v',\n '-p{}'.format(PART_NO),\n '-c{}'.format(PROGRAMMER_ID),\n '-P{}'.format(module.port),\n '-b{}'.format(BAUDRATE), '-D',\n '-Uflash:w:{}:i'.format(firmware_file_path),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE, loop=loop)\n await proc.wait()\n\n _result = await proc.communicate()\n result = _result[1].decode()\n log.debug(result)\n log.debug(\"Switching back to non-bootloader port\")\n module._port = _port_on_mode_switch(ports_before_update)\n\n return _format_avrdude_response(result)", "def do_external_firmware_upgrade(self, component=None):\n Logger.info(\"Start firmware upgrade test!\")\n components_to_upgrade = []\n upgraded_components = []\n json_data = {}\n\n # Set up json data for individual test\n if component is not None:\n json_data[component] = self.json.get(component, \"\")\n if json_data[component] == \"\":\n self.skipTest('Component \"{}\" does not exist'.format(component))\n self.json = json_data\n\n if G_VERBOSE:\n print(\"Start firmware upgrade test!\")\n\n # Connect to UUT\n self.connect_to_remote_host(logging=G_VERBOSE)\n\n # Test connection\n if not self.is_connection_ready():\n if not self.reconnect_to_remote_host(\n self.bmc_reconnect_timeout, logging=G_VERBOSE\n ):\n self.fail(\"Cannot establish the connection to UUT!\")\n\n self.extract_subentity()\n\n # Check version and prepare upgrade list\n if not self.checking_components_version(\n components_to_upgrade, logging=G_VERBOSE\n ):\n # No need upgrading for all components\n # Summary the test with verbose flag\n self.summary_test(components_to_upgrade, verbose=True, logging=G_VERBOSE)\n return\n\n # Verify binaries checksom on UUT\n self.verify_binary_checksum_on_remote_target(logging=G_VERBOSE)\n\n # Main upgrading process\n self.upgrade_components(components_to_upgrade, logging=G_VERBOSE)\n\n # Send power cycle command to UUT and clear old ssh session\n self.power_cycle_UUT_and_close_session(logging=G_VERBOSE)\n\n # Wait and try to reconnect once the UUT is come back\n if not self.reconnect_to_remote_host(\n self.bmc_reconnect_timeout, logging=G_VERBOSE\n ):\n # Print test result and fail the test.\n self.summary_test(components_to_upgrade, verbose=True, logging=True)\n self.fail(\n \"Cannot get the running firmware version, UUT is no longer accesible\"\n )\n\n # Get the current version of components on UUT\n self.checking_components_version(upgraded_components, logging=G_VERBOSE)\n\n # Summary test result for only collective test\n self.summary_test(\n components_to_upgrade, upgraded_components, G_VERBOSE, logging=G_VERBOSE\n )", "def install_component_firmware(self, component_name, image_path):\n raise NotImplementedError", "def update_firmware(self):\n return self._dll.JLINKARM_UpdateFirmwareIfNewer()", "def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def upgrade_example_device_model1234_firmware(self, node, ports):\n # Any commands needed to perform the firmware upgrade should go here.\n # If you plan on actually flashing firmware every cleaning cycle, you\n # should ensure your device will not experience flash exhaustion. A\n # good practice in some environments would be to check the firmware\n # version against a constant in the code, and noop the method if an\n # upgrade is not needed.\n\n def _is_latest_firmware():\n \"\"\"Detect if device is running latest firmware.\"\"\"\n # Actually detect the firmware version instead of returning here.\n node.get('created_at')\n return True\n\n def _upgrade_firmware():\n \"\"\"Upgrade firmware on device.\"\"\"\n # Actually perform firmware upgrade instead of returning here.\n return True\n\n if _is_latest_firmware():\n LOG.debug('Latest firmware already flashed, skipping')\n # Return values are ignored here on success\n return True\n else:\n LOG.debug('Firmware version X found, upgrading to Y')\n # Perform firmware upgrade.\n try:\n _upgrade_firmware()\n except Exception as e:\n # Log and pass through the exception so cleaning will fail\n LOG.exception(e)\n raise\n return True", "def performFirmwareUpdate(self, deviceIndex) -> None:\r\n fn = self.function_table.performFirmwareUpdate\r\n error = fn(deviceIndex)\r\n openvr.error_code.FirmwareError.check_error_value(error)", "def update_fw(board, board_name, fw_path, test_log):\n try:\n from tests import pyboard # pylint: disable=import-outside-toplevel\n\n with board:\n if not board.bootloader:\n test_log.write(\" - Resetting into bootloader mode...\")\n board.reset_to_bootloader(repl=True)\n time.sleep(10)\n\n boot_board = pyboard.CPboard.from_build_name_bootloader(board_name)\n with boot_board:\n test_log.write(\n \" - In bootloader mode. Current bootloader: \"\n f\"{boot_board.firmware.info['header']}\"\n )\n test_log.write(\" - Uploading firmware...\")\n\n boot_board.firmware.upload(fw_path)\n\n time.sleep(10)\n\n with board:\n pass\n test_log.write(\"Firmware upload successful!\")\n\n except BaseException as brd_err:\n err_msg = [\n \"Updating firmware failed:\",\n f\" - {brd_err.args}\",\n ]\n raise RuntimeError(\"\\n\".join(err_msg)) from None", "def upgrade(\n self, firmware: Union[bytes, str], status_tracker=_default_progress_tracker\n ) -> None:\n\n def _update_tracker(\n status_tracker,\n preamble_count,\n indexf,\n indexl,\n firmware_len,\n unknown_count,\n done,\n upload,\n ):\n if status_tracker is not None:\n status_tracker(\n preamble_count,\n indexf,\n indexl,\n firmware_len,\n unknown_count,\n done,\n upload,\n )\n\n if isinstance(firmware, str):\n # load binary from file\n with open(firmware, \"rb\") as f:\n firmware = f.read()\n\n firmware_len = int(len(firmware))\n half_len = int(firmware_len / 2)\n\n # split firmware in 2 halves (low and high part) and compute the half file length (2 bytes).\n two_bytes_half_len = half_len.to_bytes(2, \"big\")\n lenf = two_bytes_half_len[0].to_bytes(1, \"big\")\n lenl = two_bytes_half_len[1].to_bytes(1, \"big\")\n\n buff = firmware[:half_len]\n bufl = firmware[half_len:]\n\n # save RS-232 parameters\n baudrate = self.serial.baudrate\n timeout = self.serial.timeout\n\n indexf = -1\n indexl = -1\n preamble_count = 0\n unknown_count = 0\n\n # switch serial to upgrade mode\n self.serial.baudrate = 57600\n self.timeout = 5\n\n # Process upgrade preamble. Wait to have enough \"c\" chars to consider the preamble valid.\n # This helps getting rid of potential garbage in the buffer which could mess up with the protocol\n while preamble_count < 10:\n msg = self.serial.read(1)\n if msg == b\"\":\n # Timeout. Abort upgrade\n raise CloudWatcherException(\"Upgrade failed - timeout before transfer\")\n elif msg == b\"c\" or msg == b\"\\xff\":\n # 0xFF may occur after a B!O!O!T! -triggered reboot. Not on power-on. Funny.\n preamble_count += 1\n else:\n # Unknown message from CW. Should we abort ? Count just in case.\n unknown_count += 1\n _update_tracker(\n status_tracker,\n preamble_count,\n indexf,\n indexl,\n firmware_len,\n unknown_count,\n False,\n False,\n )\n\n # Signal CW that we are ready to transfer\n self.serial.write(b\"d\")\n\n # remain at 57600 bps but lower the timeout.\n self.timeout = 1\n\n # Actual firmware upload\n while indexf < half_len or indexl < half_len:\n msg = self.serial.read(1)\n if msg == b\"\":\n # Timeout. End transfer\n raise CloudWatcherException(\"Upgrade failed - timeout during transfer\")\n elif msg == b\"c\" or msg == b\"\\xff\":\n # Absorb excess \"c\" that may occur after sending \"d\". 0xFF occur after sending \"d\" in B!O!O!T! triggered sequences but not on power-on.\n preamble_count += 1\n elif msg == b\"0\":\n if indexf < 0:\n self.serial.write(lenf)\n else:\n self.serial.write(buff[indexf].to_bytes(1, \"big\"))\n indexf += 1\n elif msg == b\"1\":\n if indexl < 0:\n self.serial.write(lenl)\n else:\n self.serial.write(bufl[indexl].to_bytes(1, \"big\"))\n indexl += 1\n else:\n # Unknown message from CW. Should we abort ? Count just in case.\n unknown_count += 1\n\n _update_tracker(\n status_tracker,\n preamble_count,\n indexf,\n indexl,\n firmware_len,\n unknown_count,\n False,\n True,\n )\n\n # Tell the progress tracker we're done\n _update_tracker(\n status_tracker,\n preamble_count,\n indexf,\n indexl,\n firmware_len,\n unknown_count,\n True,\n True,\n )\n\n # CW should now be rebooting. It will be in upgrade mode for a few more seconds and send a bunch of \"c\"\n # Let's wait until it is over.\n char_count = 0\n err_count = 0\n while char_count < 1000:\n msg = self.serial.read()\n char_count += 1\n if msg == b\"\":\n # Timeout. Done with the upgrade-ready pattern.\n # restore RS-232 parameters and return to end the upload process\n self.serial.baudrate = baudrate\n self.serial.timeout = timeout\n return\n elif msg != b\"c\":\n err_count += 1\n\n # If the loop ended, CW is still in upgrade mode. This means the upgrade failed. Troubleshoot.\n raise CloudWatcherException(\"Upgrade failed - stuck in upgrade mode\")", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def enable_update(password):\n\n local_print(\"Enabling firmware update mode\")\n nks = NitrokeyStorage()\n nks.connect()\n try:\n if nks.enable_firmware_update(password) == 0:\n local_print(\"setting firmware update mode - success!\")\n except DeviceNotFound:\n local_print(\"No Nitrokey Storage device found\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the progress of the firmware update.
def get_firmware_update_progress(self): try: fw_update_uri = self._get_firmware_update_service_resource() except exception.IloError as e: LOG.debug(self._('Progress of firmware update not known: %s'), str(e)) return "UNKNOWN", "UNKNOWN" # perform the GET status, headers, response = self._rest_get(fw_update_uri) if status != 200: msg = self._get_extended_error(response) raise exception.IloError(msg) fw_update_state = response.get('State') fw_update_progress_percent = response.get('ProgressPercent') LOG.debug(self._('Flashing firmware file ... in progress %d%%'), fw_update_progress_percent) return fw_update_state, fw_update_progress_percent
[ "def get_firmware_update_status(self):\n\n response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]\n inprogress = (response & 0x80) == 0x80\n return {\n \"inprogress\": inprogress,\n \"error\": response & 0x7f,\n }", "def progress(self):\n return 100 if self.status == STATUS_COMPLETE else 0", "def UpgradeProgress(self):\n if self.force_auto_sync:\n self.get('UpgradeProgress')\n return self._UpgradeProgress", "def getProgress(self):", "def get_progress(self):\n\n data = self.make_request(REQUEST_PROGRESS_MESSAGE)\n regex_res = PROGRESS_REGEX.search(data)\n current = int(regex_res.group(1))\n max_val = int(regex_res.group(2))\n percentage = current / max_val * 100\n return round(percentage, PERCENTAGE_DIGITS)", "def progress(self):\n return self._progress", "def progress(self):\n return self.progressValue", "def progress(self):\n\t\treturn core.BNGetBackgroundTaskProgressText(self.handle)", "def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)", "def get_print_progress(self):\n r = self.s.get(self.base_address + '/api/job')\n if r.status_code != 200:\n raise Exception(\"Error: {code} - {content}\".format(code=r.status_code, content=r.content.decode('utf-8')))\n\n data = r.content.decode('utf-8').split('\\n')\n for line in data:\n if 'completion' in line:\n # check if null\n if 'null' in line:\n raise Exception('Error reading print progress')\n else:\n return int(float(line[line.find(':')+1:line.find(',')]))\n return 0", "def sound_install_progress(self):\n return SoundInstallStatus(self.send(\"get_sound_progress\")[0])", "def get_progress(self):\n raise NotImplementedError", "def ilo_ris_fw_update_progress_status(self):\n time.sleep(30)\n iLO_REBOOT_TIME = BuiltIn().get_variable_value(\"${iLO_Reboot_Time}\")\n updateDetails = self.ilo_ris_get_firmware_update_details()\n updateState = self.ilo_ris_get_firmware_update_state()\n update_timeout = 0\n while updateDetails == \"Firmware flash in progress\" or updateState == 'PROGRESSING':\n time.sleep(3)\n update_timeout += 1\n # Exit out of function if FW update takes longer than 5 minutes\n if update_timeout >= 100:\n return False\n try:\n updateDetails = self.ilo_ris_get_firmware_update_details()\n updateState = self.ilo_ris_get_firmware_update_state()\n except:\n updateDetails = \"Firmware flash completed\"\n updateState = \"COMPLETED\"\n # Sleep here to wait for iLO to reboot after update\n time.sleep(iLO_REBOOT_TIME)\n return True", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def build_progress(self) -> Union[int, float]:\n return self.proto.build_progress", "def boot_progress(self):\n self._get_data()\n if self._boot_progress:\n return self._boot_progress\n return \"booted\" if self.power else \"powered off\"", "def get_sync_progress(self):\n\n if self.total_batches > 0:\n self.progress = round((self.batches_completed / self.total_batches) * 100, 2)\n LOGGER.info(\n f\"{self.stream} SYNC: {self.progress}% COMPLETE\"\n )\n return self.progress", "def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()", "def get_progress(self):\n progress = self.get_time()\n if progress:\n return progress / self.current_track.duration\n else:\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves if server is TPM capable or not.
def _get_tpm_capability(self): tpm_values = {"NotPresent": False, "PresentDisabled": True, "PresentEnabled": True} try: tpm_state = self._get_bios_setting('TpmState') except exception.IloCommandNotSupportedError: tpm_state = "NotPresent" tpm_result = tpm_values[tpm_state] return tpm_result
[ "def is_available():", "def has_tpu():\n def _check():\n with session.Session() as sess:\n sess.run(tpu.initialize_system())\n sess.run(tpu.shutdown_system())\n\n try:\n _check()\n return True\n except errors.OpError as _:\n return False", "def is_vtd_supported(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfg_IsVtdSupported', self.handle))", "def evaluate_hardware_support(self):\n return hardware.HardwareSupport.SERVICE_PROVIDER", "def is_available (self):\r\n\r\n\t\t# dance with the API.\r\n\t\tresponse = self.__API(\"server/available\")\r\n\r\n\t\ttry:\r\n\t\t\tif response.content == \"1\":\r\n\t\t\t\treturn True\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\n\t\treturn False", "def is_snmp_available(args):\n try:\n data = get_host_attributes(args.node_name)\n data['sensors']['power']['via']['pdu']\n except KeyError:\n return False\n return True", "def support_kvm(self):\n return self._support_kvm", "def is_ctu_capable():\n\n context = package_context.get_context()\n ctu_func_map_cmd = context.ctu_func_map_cmd\n try:\n version = subprocess.check_output([ctu_func_map_cmd, '-version'])\n except (subprocess.CalledProcessError, OSError):\n version = 'ERROR'\n return version != 'ERROR'", "def support_kvm_gpu_type(self):\n return self._support_kvm_gpu_type", "def is_accelerator_available():\n if edgeiq.find_ncs1():\n return True\n if edgeiq.find_ncs2():\n return True\n return False", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def is_vserver_kernel():\n\n kinfo = commands.getoutput('/bin/uname -a').split()[2]\n return '-vs' in kinfo", "def is_vm():\n hostname = platform.node()\n return (hostname == \"racecar-vm\")", "def check_hyperv() -> bool:\n try:\n out = subprocess.check_output(\n ['DISM', '/Online', '/Get-FeatureInfo', '/FeatureName:Microsoft-Hyper-V']\n )\n except subprocess.CalledProcessError:\n return False\n\n if 'State : Disabled' in out.decode():\n return False\n\n return True", "def _is_device_available():\n result = get_serial_no()\n if result[1].strip() == \"unknown\":\n return False\n else:\n return True", "def is_gti_enabled(self):\n if not self.type.startswith('virtual'):\n gti = self.data['gti_settings'].get('file_reputation_context')\n if gti == 'disabled':\n return False\n return True\n raise UnsupportedEngineFeature(\n 'GTI should be enabled on the Master Engine not directly on the '\n 'virtual engine.')", "def is_GPU_available():\n code = os.system(\"nvidia-smi\")\n return code == 0", "def is_system(self) -> bool:", "def hasaccelerator():\n\n return torch.cuda.is_available() or torch.backends.mps.is_available() or bool(Models.finddevice())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get cpu virtualization status.
def _get_cpu_virtualization(self): try: cpu_vt = self._get_bios_setting('ProcVirtualization') except exception.IloCommandNotSupportedError: return False if cpu_vt == 'Enabled': vt_status = True else: vt_status = False return vt_status
[ "def _vm_get_cpu(self, vm_instance):\n pass", "def vCPU_calculator(self):\r\n\r\n return self.vm_obj.config.hardware.numCPU", "def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuMode', self.handle)", "def VMStatus(self):\n try:\n status = self.vmInstance.get_status()\n LOGGER.info('Current status of virtual machine \"{}\": {}'.format(VM_NAME, status))\n\n except Exception as e:\n status = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting status of virtual machine \"{}\"!'.format(VM_NAME))\n\n return status", "def cpuStats():", "def get_cpu_accel_level(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuAccelLevel', self.handle)", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuMode', self.handle)", "def get_vcpu_total(self):\n\n # On certain platforms, this will raise a NotImplementedError.\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n LOG.warn(_(\"Cannot get the number of cpu, because this \"\n \"function is not implemented for this platform. \"\n \"This error can be safely ignored for now.\"))\n return 0", "def cpu(self) -> int:\n return pulumi.get(self, \"cpu\")", "def checkCpu(self):\n cpu = self.getCpu()\n err_msg = []\n task_result = device_status = 0\n\n if cpu is None:\n err_msg.append('Get CPU info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return cpu, err_msg, task_result, device_status", "def CPU(self):\n return self._core.runtime.cpu", "def vcpus(self):\n return self._vcpus", "def _get_cpu(self, vm_size):\n return self.cc._size_get_cpu(vm_size) if self.cc else None", "def _get_cpu_util_current(self):\n return self.__cpu_util_current", "def _size_get_cpu(self, vm_size):\n pass", "def cpu(self):\n return self._cpu", "def cpu(self):\n return self.no_params_func(\"cpu\", delete_after_use=False)", "def get_cpu_info():\n try:\n cpu_info = subprocess.check_output('lscpu')\n return cpu_info\n except OSError:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get status of NVDIMM_N.
def _get_nvdimm_n_status(self): try: nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality') if nvdimm_n_status == 'Enabled': nvn_status = True else: nvn_status = False except exception.IloCommandNotSupportedError: nvn_status = False return nvn_status
[ "def get_thinet_status_n(self):\r\n return self.ask(\"MOTTE:STATUS?\",\"int\")", "def get_brf_status_n(self):\r\n return self.ask(\"MOTBI:STATUS?\",\"int\")", "def avail_status(self):\n return self._nfvi_image.avail_status # assume one-to-one mapping", "def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)", "def node_status(self) -> Optional['outputs.CSIVXFlexOSStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")", "def getNDV(self):\n return len(self.globalDVList)", "def _read_status_register(self):\n cmd = self._DEVMEM_CMD + \" \" + self._WDT1_STATUS_REG\n cmd_out = run_shell_cmd(cmd)\n return int(cmd_out, 16)", "def nwmetricmepstatus(self) :\n\t\ttry :\n\t\t\treturn self._nwmetricmepstatus\n\t\texcept Exception as e:\n\t\t\traise e", "def getnumbarvar(self): # 3\n res,resargs = self.__obj.getnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numbarvar_return_value = resargs\n return _numbarvar_return_value", "def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data", "def nodata_value(self):\n # Get value and nodata exists flag\n nodata_exists = c_int()\n value = capi.get_band_nodata_value(self._ptr, nodata_exists)\n if not nodata_exists:\n value = None\n # If the pixeltype is an integer, convert to int\n elif self.datatype() in GDAL_INTEGER_TYPES:\n value = int(value)\n return value", "def node_status(self) -> Optional['outputs.CSIUnityStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")", "def getSnmpStatus(self):\n return getattr(self, \"_snmpStatus\", 0)", "def showLogicalNvdimmConfig(self, options):\n\n if self._rdmc.app.config._ac__format.lower() == 'json':\n options.json = True #pragma: no cover\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # overall config status\n self._helpers.validateAllConfigurationPolicies(scalable_pmem_config, \\\n output_as_json=options.json)\n\n if options.available:\n if not options.json:\n self._helpers.writeHeader2(u\"Available Scalable Persistent Memory\")\n sys.stdout.write(\"Available capacity to create logical NVDIMMs \"\\\n \"is constrained by the system\\n hardware, including\"\\\n \" the number of backup storage devices selected.\\n\")\n\n self._helpers.displayAvailableCapacity(scalable_pmem_config, \\\n output_as_json=options.json)\n else:\n if not options.json:\n # overall config enabled and capacity graph\n self._helpers.writeHeader2(u\"Overall Allocated Scalable Persistent Memory\")\n sys.stdout.write(u\"\\n\")\n self._helpers.displayOverallCapacityBarGraph(scalable_pmem_config, 60)\n self._helpers.printBackupBootTimeMessage(scalable_pmem_config)\n if len(scalable_pmem_config.drives.selectedDrives) == 0:\n sys.stdout.write(\"* No backup storage devices have been selected\")\n sys.stdout.write(u\"\\n\")\n # allocated logical nvdimms\n self._helpers.writeHeader2(u\"Logical NVDIMMs\")\n self._helpers.displayRegionConfiguration(scalable_pmem_config, \\\n output_as_json=options.json, \\\n print_backup_time_message=False)\n\n sys.stdout.write(u\"\\n\\n\")", "def dev_status(self):\n self.debug_stream(\"In dev_status()\")\n argout = \"\"\n #----- PROTECTED REGION ID(SynchroMotorDS.Status) ENABLED START -----#\n self.argout = \"Status is ON\"\n #----- PROTECTED REGION END -----#\t//\tSynchroMotorDS.Status\n self.set_status(self.argout)\n self.__status = PyTango.Device_4Impl.dev_status(self)\n return self.__status", "def _nvidia_smi():\n\n status = check_output(['nvidia-smi', \n '--query-gpu=utilization.gpu,utilization.memory', \n '--format=csv'])\n status = pd.read_csv(StringIO(status.decode('utf-8')))\n \n # Reformat column names.\n # (Need the col.strip() because sometimes there are preceding spaces)\n map_cols = {'utilization.gpu [%]': 'Utilization (%)',\n 'utilization.memory [%]': 'Memory (%)'}\n status.columns = [map_cols[col.strip()] for col in status.columns]\n\n # Convert to numerical data\n for col in status.columns:\n status[col] = status[col].apply(lambda x: int(x.rstrip('%')))\n\n return status", "def get_nova_volume_status(self):\n cmd = (\"sudo nova-manage volume list |grep %s\" % self.volume)\n\n response, err = self._exec_ssh_cmd(self.nova_volume_status_node, cmd,\n exec_timeout=600)\n logging.info((\"running:\\n%s\\non: %s\\nresponse:\\n%s\") %\n (cmd, self.nova_volume_status_node, response))\n\n if 'exception' in response or 'vol' not in response:\n logging.error((\"get_nova_volume_status: Volume not found in DB\\n\"\n \"Response:%s\\nErr:%s\") % (response, err))\n self.exceptions += 1\n self._check_exit_condition()\n return \"volume not found\"\n\n logging.info(\"Nova DB POV:\\n %s\" % response)\n nova_volume_status_list = response.split()\n\n self.instance_ID = nova_volume_status_list[5]\n self.nova_status = nova_volume_status_list[8]\n\n if 'None' not in self.instance_ID:\n self.get_instance_hex()\n if self.compute_hostname is None:\n self._get_nova_host()\n\n return \"OK\"", "def get_nvram_info(\n self,):\n\n self._check_connection_type(\"get_nvram_info\", \"Node\")\n\n params = { \n }\n \n # There is no adaptor.\n return self.send_request(\n 'GetNvramInfo',\n GetNvramInfoResult,\n params,\n since=5\n )", "def get_ndvi(self):\n\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determina numere divizibile cu k dintro lista
def get_longest_div_k(lst, k): rezultat = [] for x in lst: if x % k == 0: rezultat.append(x) return rezultat
[ "def divides(k, n):\n return n % k == 0", "def subarraysDivByK(A: List[int], K: int) -> int:\n from collections import defaultdict\n s = defaultdict(int)\n s[0] += 1\n res, x = 0, 0\n for a in A:\n x = (x + a) % K\n res += s[x]\n s[x] += 1\n return res", "def getDivisors(n):", "def smallestRepunitDivByK(self, K: int) -> int:\n if K%2 == 0 or K%5 == 0:\n return -1\n r = 1\n for digits in range(1,K+1):\n if r % K == 0:\n return digits\n else:\n r = (r*10 + 1)%K\n return -1", "def divisor(k, num):\n\n if k < 0:\n raise Exception('k must be >= 0: {}'.format(k))\n\n factors = prime_factorization(num)\n result = 1\n if k == 0:\n for prime in factors:\n result *= prime + 1\n\n for prime in factors:\n result *= ((pow(prime, (factors[prime] + 1) * k) - 1) //\n (prime ** k - 1))\n return result", "def is_possible_divide(nums, k):\n cnts = collections.Counter(nums)\n \n for num in sorted(cnts):\n cnt = cnts[num]\n if cnt > 0:\n for i in range(num + 1, num + k):\n cnts[i] -= cnt\n if cnts[i] < 0: return False\n \n return True", "def drop_factors(n, k):\n i = 0\n while n > 1:\n (d, r) = divmod(n, k)\n if r != 0: break\n i += 1\n n = d\n return (i, n)", "def split_num_into_parts(total, k):\n\n while True:\n sum_so_far = 0\n return_list = list()\n\n for index in range(0, k-1):\n\n avg_remaining_val = (total - sum_so_far) / (k - index)\n\n val = random.randint(0, (avg_remaining_val * 2))\n sum_so_far += val\n return_list.append(val)\n\n if sum_so_far <= total:\n return_list.append(total - sum_so_far)\n return return_list", "def split_num_into_parts(total, k):\n\n while True:\n sum_so_far = 0\n return_list = list()\n\n for index in range(0, k-1):\n\n avg_remaining_val = (total - sum_so_far) / (k - index)\n\n val = random.randint(0, (avg_remaining_val * 2))\n sum_so_far += val\n return_list.append(val)\n\n if sum_so_far <= total:\n return_list.append(total - sum_so_far)\n assert len(return_list) == k\n\n return return_list", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total", "def arranjos(n, k: int) -> float:\r\n\r\n # quando n for um conjunto de elementos (list), então\r\n # atribui à n a quantidade de elementos do conjunto\r\n n = len(n) if isinstance(n, list) else n\r\n\r\n return fatorial(n) / fatorial(n-k)", "def subarraysDivByK(self, A, K):\n if not A:\n return []\n\n if K == 0:\n raise Exception(\"K is zero! \")\n\n # key is mod and value is the frequency of indexes.\n modmap = dict()\n prefix_sum = 0\n for i, a in enumerate(A):\n prefix_sum += a\n # mod function of a positive always return positive\n # -9 % 2 == 1\n mod = prefix_sum % K\n modmap[mod] = modmap.get(mod, 0) + 1\n\n # then count the pair of prefix sum sharing the same reminder by % K.\n ans = 0\n for mod, c in modmap.items():\n # there are c subarries whose sum % k == mod\n # pair c subarrays to get c * (c - 1) // 2 subarrays whose sum % K == 0\n ans += c * (c - 1) // 2\n\n # there are also modmap[0] subarrys (if exists) whose sum is divisible by K.\n # they do not need pair.\n return ans + modmap.get(0, 0)", "def solveProblem(self, A, k):\n count = 0\n for p in range(0, len(A)):\n for q in range(p + 1, len(A)):\n for r in range(q + 1, len(A)):\n if A[p] * A[q] * A[r] % k == 0: count += 1\n return count", "def _compute_required_number_of_particles_kld(k, epsilon, upper_quantile):\n # Helper variable (part between curly brackets in (7) in Fox paper\n x = 1.0 - 2.0 / (9.0 * (k - 1)) + np.sqrt(2.0 / (9.0 * (k - 1))) * upper_quantile\n return np.ceil((k - 1) / (2.0 * epsilon) * x * x * x)", "def _get_m(self, ks: List[int]) -> int:\n\n base = 1\n for c in ks:\n base = base * c // gcd(base, c)\n return base", "def divisor_k_lookup(up_to, k):\n div = defaultdict(lambda: 1)\n div[1] = 1\n\n for i in xrange(2, up_to):\n for j in xrange(i, up_to, i):\n div[j] += i**k\n\n return div", "def split(n, k):\n new_size_floor = math.floor(n/k)\n additional = n % k\n for i in range(k):\n if i < additional:\n yield new_size_floor + 1\n else:\n yield new_size_floor", "def divisores(n):\n\n if n != 0:\n n = int(abs(n))\n listaDivisores = []\n for i in range(1, n + 1):\n if n % i == 0:\n listaDivisores.append(i)\n else:\n listaDivisores = 'infinitos'\n return listaDivisores", "def kmers_of(self, k):\n if k < 1:\n raise ValueError(\"There is no such thing as %s-mer\" % (k,))\n\n return max(0, len(self) - k + 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Linac phasing Note that these overlays override individual klystron phases.
def bmad_linac_phasing_lines(epics): lines = [ '! Linac overall phasing', 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')), 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499')) ] return lines
[ "def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1", "def landmarking(centerline, folder):\n\n line, max_point_ids, min_point_ids = splineCenterline(centerline)\n WritePolyData(line, path.join(folder, \"surface\", \"centerline_splined.vtp\"))\n curvature = get_array(\"Curvature\", line)\n length = get_curvilinear_coordinate(line)\n k1 = get_array(\"k1\", line)\n k2 = get_array(\"k2\", line)\n\n # Remove a min / max point that is in reality a saddle point\n for i in min_point_ids:\n for j in max_point_ids:\n if abs(i-j) < 5 and abs(curvature[i] - curvature[j]) < 0.01:\n min_point_ids.remove(i)\n max_point_ids.remove(j)\n\n k1_points = k1[max_point_ids]\n k2_points = k2[max_point_ids]\n k_points = np.zeros((k1_points.shape[0], 2))\n k_points[:,0] = k1_points[:,0]\n k_points[:,1] = k2_points[:,0]\n tetha = np.zeros(k1_points.shape[0] - 1)\n for i in range(tetha.shape[0]):\n a = k_points[i,:] / np.sqrt(np.sum(k_points[i,:]**2))\n b = k_points[i+1,:] / np.sqrt(np.sum(k_points[i+1,:]**2))\n tetha[i] = math.acos(np.dot(a, b))\n tetha[i] = tetha[i] * 180 / math.pi\n \n Z = np.zeros(length.shape[0])\n Y = np.zeros(length.shape[0])\n X = np.zeros(length.shape[0])\n for i in range(Z.shape[0]):\n X[i] = line.GetPoints().GetPoint(i)[0]\n Y[i] = line.GetPoints().GetPoint(i)[1]\n Z[i] = line.GetPoints().GetPoint(i)[2]\n\n\n from matplotlib.pyplot import plot, hold, show, legend\n plot(length, X, label=\"X\")\n hold(\"on\")\n plot(length, Y, label=\"Y\")\n plot(length, Z, label=\"Z\")\n legend()\n\n # Tolerance parameters from Bogunevic et al. (2012)\n tol_ant_post = 60\n tol_sup_ant = 45\n tol_post_inf = 45\n tol_inf_end = 110\n\n # Find max coronal coordinate\n if Z.min() in Z[argrelextrema(Z, np.less)[0]]:\n value_index = Z[argrelextrema(Z, np.less)[0]].min()\n max_coronal_ids = np.array(Z.tolist().index(value_index))\n else:\n value_index = Z[argrelextrema(Z, np.greater)[0]].max()\n max_coronal_ids = np.array(Z.tolist().index(value_index))\n\n plot(length[max_coronal_ids], Z[max_coronal_ids], \"g^\")\n show()\n\n m = max_coronal_ids\n viz(line, [[X[m], Y[m], Z[m]]])\n plot(length, curvature, [length[m], length[m]], [0,1])\n hold(\"on\")\n plot(length[max_point_ids], curvature[max_point_ids], 'g^')\n show()\n\n sys.exit(0)\n # Find all interfaces\n def find_interface(start, dir, tol, part):\n stop = dir if dir == -1 else tetha.shape[0]\n sucess = False\n for i in range(start-1, stop, dir):\n if tetha[i] > tol:\n sucess = True\n break\n\n if sucess:\n start = max_point_ids[i]\n stop = max_point_ids[i + 1]\n index = ((min_point_ids > start) * (min_point_ids < stop)).nonzero()[0]\n min_point = min_point_ids[index]\n interfaces[part] = min_point\n\n elif not sucess and part == \"sup_ant\":\n print \"Where not able to identify the interface between the\" + \\\n \"anterior and superior bend. Chekc the coronal coordinates\"\n return None\n\n elif not sucess and part != \"inf_end\":\n print \"The geometry is to short to be classified with superior\" + \\\n \", anterior, posterior and inferior.\"\n return None\n\n elif not sucess and part == \"inf_end\":\n interfaces[\"inf_end\"] = 0\n i = 0\n print \"End of inferior is at the end of the geometry, this might\" + \\\n \"affect the geometry stats\"\n else:\n print \"Something happend, idea: some bend ended at the last point\"\n return None\n\n return i\n\n interfaces = {}\n min_point_ids = np.array(min_point_ids)\n index = np.array((max_coronal_ids > max_point_ids).nonzero()[0]).max()\n start = find_interface(index, -1, tol_ant_post, \"ant_post\")\n if start is None:\n return None\n start = find_interface(start, -1, tol_post_inf, \"post_inf\")\n if start is None:\n return None\n start = find_interface(start, -1, tol_inf_end, \"inf_end\")\n start = find_interface(index + 1, 1, tol_sup_ant, \"sup_ant\")\n if start is None:\n return None\n\n # Find a \"center\" of each bend\n bends = [\"inferior\", \"posterior\", \"anterior\", \"superior\"]\n values = [interfaces[\"inf_end\"], interfaces[\"post_inf\"],\n interfaces[\"ant_post\"], interfaces[\"sup_ant\"], \n curvature.shape[0]]\n\n max_landmarks = {}\n for i in range(len(bends)):\n curv_part = curvature[values[i]: values[i+1] + 1][:, 0]\n max_new = []\n\n for j in range(len(max_point_ids)):\n if values[i] < max_point_ids[j] < values[i+1] + 1:\n max_new.append(max_point_ids[j])\n max_new = np.array(max_new)\n\n while max_new.shape[0] > 1:\n Gauss = gaussian(curv_part.shape[0], std=curv_part.shape[0]//2) \n new = np.convolve(curv_part, Gauss, 'same')\n max_new = argrelextrema(new, np.greater)[0]\n\n max_landmarks[bends[i]] = max_new + values[i]\n\n # Following Bogunovic\n max_landmarks.pop(\"superior\")\n landmarks = {}\n for k, v in max_landmarks.iteritems():\n landmarks[k] = line.GetPoints().GetPoint(float(v))\n\n for k, v in interfaces.iteritems():\n landmarks[k] = line.GetPoints().GetPoint(float(v))\n\n return landmarks", "def drawWarpLines(self):\n # draw warp lines\n for item in self.game.warpLines:\n anwp.sl.engine.drawLine(item[0]+self.bufferX, item[1]+self.bufferY, item[2]+self.bufferX, item[3]+self.bufferY, pyui.colors.blue)", "def road_lines():\n cv2.polylines(frame_1, [pts_1], True, yellow_color)\n cv2.polylines(frame_2, [pts_2], True, yellow_color)", "def crosshairs(self):\r\n cv2.line(self.img, (0, int(self.img_dim[0] / 2)), (self.img_dim[1], int(self.img_dim[0] / 2)), (0, 0, 200), 1)\r\n cv2.line(self.img, (int(self.img_dim[1] / 2), 0), (int(self.img_dim[1] / 2), self.img_dim[0]), (0, 0, 200), 1)", "def _extra_lines(self):\n if self.nout == 1:\n xtra_lines = 2 if self.xpix>10 else 3\n else:\n xtra_lines = 1\n \n return xtra_lines", "def draw_line_pro(coeffs1, coeffs2, M, frame, h=130):\n # obten altura y ancho de la imagen\n try:\n height, width, _ = frame.shape\n # Mascara para dibujar las lineas\n mask = np.zeros_like(frame)\n\n # Crea un Array hasta heigh-1\n plot_y = np.linspace(0, height - 1, height)\n\n \n left_x = coeffs1['a'] * plot_y ** 2 \\\n + coeffs1['b'] * plot_y \\\n + coeffs1['c']\n right_x = coeffs2['a'] * plot_y ** 2 + \\\n coeffs2['b'] * plot_y + \\\n coeffs2['c']\n\n x1 = 0\n y1 = height- h\n x2 = width\n y2 = height- h\n \n #if right_x != mask:\n cv2.polylines(mask, [np.int32(np.stack((right_x, plot_y), axis=1))], False, (0, 0, 255), 20)\n #if left_x != mask:\n # Draw the lines (one red, one blue)\n cv2.polylines(mask, [np.int32(np.stack((left_x,plot_y), axis=1))], False, (255, 0, 0), 20)\n \n \n # Warpea la perspectiva\n mask = cv2.warpPerspective(mask, np.float32(M), (width, height)) # Warp back to original image space\n\n # Añade las lineas a la imagen original\n img = cv2.addWeighted(frame, 1., mask, 0.5, 0)\n x_r, x_l = right_x[height - h], left_x[height -h]\n \n \n #Calcula el punto medio \n x_medio = ((x_r - x_l)/2) + x_l\n\n \n #Lineas horizontales de corto\n #cv2.line(img, (x1,y1), (x2,y2), (102,255,102), 2)\n\n #Direction Line\n cv2.line(img, (int(width/2), height- 1), (int(x_medio),height-h), (255,102,178), 3)\n\n except ValueError:\n pass\n\n return img, x_medio", "def overlay_lane_boundaries(self):\n # Establish original image size as tuple\n undist_img_size = (self.undist_img_m.shape[1], self.undist_img_m.shape[0])\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(self.binary_warped_m).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n \n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([self.left_fitx_m, self.ploty_m]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([self.right_fitx_m, self.ploty_m])))])\n pts = np.hstack((pts_left, pts_right))\n \n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n \n # Warp blank back to original image space, inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, self.Minv_m, undist_img_size)\n \n # Combine the result with the original image for lane boundaries to appear\n self.result_m = cv2.addWeighted(self.undist_img_m, 1, newwarp, 0.3, 0)", "def set_lanes(left_lines, right_lines, image):\n \n Y_LANE_EXTRAP = 35 # percent up from bottom of image to extrapolate lane lines\n \n image_wk = np.copy(image) # working copy\n image_lines = np.copy(image_wk)*0 # create a blank to draw lines on\n im_y = image_wk.shape[0]\n \n y1_lane = im_y\n y2_lane = np.int32(im_y - (Y_LANE_EXTRAP/100*im_y))\n \n # Process left lane\n if left_lines:\n z_left = my_linear_polyfit(left_lines)\n x1_lane = np.int32( (y1_lane - z_left[1]) / z_left[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_left[1]) / z_left[0] )\n \n # Draw left lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Process right lane\n if right_lines:\n z_right = my_linear_polyfit(right_lines)\n x1_lane = np.int32( (y1_lane - z_right[1]) / z_right[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_right[1]) / z_right[0] )\n \n # Draw right lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Overlay detected left/right lanes on road image\n image_wk = weighted_img(image_lines, image_wk)\n \n # Output road image with overlaid left/right lanes\n return image_wk", "def phase_lines(self):\n return phase_lines(self)", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def Polyline(self):\r\n pass", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def plane_unwrapping():\r\n pass", "def calc_crick(self):\n\n\t\tdef ClosestPointOnLine(a, b, p):\n\t\t\tap = p-a\n\t\t\tab = b-a\n\t\t\tresult = a + np.dot(ap,ab)/np.dot(ab,ab) * ab\n\t\t\treturn result\n\n\t\tdef angle(v1, v2):\n\t\t\treturn np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n\n\t\tfor pos, r in enumerate(self.res[1:-1]):\n\t\t\trealpos=pos+1\n\n\t\t\tisFirstLayer = realpos==1\n\t\t\tisLastLayer = realpos==len(self.res)-2\n\n\t\t\t# parallel\n\t\t\tif not self.ap:\n\t\t\t\tif isFirstLayer:\n\t\t\t\t\tO_next = self.res[realpos+1].O.get_array()\n\t\t\t\t\tC_next = self.res[realpos+1].C.get_array()\n\t\t\t\telse:\n\t\t\t\t\tO_next = self.res[realpos-1].O.get_array()\n\t\t\t\t\tC_next = self.res[realpos-1].C.get_array()\n\n\t\t\t# anti-parallel\n\t\t\telse:\n\t\t\t\tif isLastLayer:\n\t\t\t\t\tO_next = self.res[realpos-1].O.get_array()\n\t\t\t\t\tC_next = self.res[realpos-1].C.get_array()\n\t\t\t\telse:\n\t\t\t\t\tO_next = self.res[realpos+1].O.get_array()\n\t\t\t\t\tC_next = self.res[realpos+1].C.get_array()\n\n\t\t\tO = r.O.get_array()\n\t\t\tCa = r.Ca.get_array()\n\t\t\tC = r.C.get_array()\n\n\t\t\t# Project Ca onto the helix axis (O_new)\n\t\t\tO_new = ClosestPointOnLine(O, O_next, Ca)\n\n\t\t\t# Define a plane perpendicular to the helix axis\n\t\t\t# and find intersection between this plane and the bundle axis (C_new)\n\t\t\tn = O-O_new\n\t\t\tV0 = O_new\n\t\t\tw = C - O_new\n\t\t\tu = C_next - C\n\t\t\tN = -np.dot(n, w)\n\t\t\tD = np.dot(n, u)\n\t\t\tsI = N / D\n\t\t\tC_new = C+ sI * u\n\n\t\t\t# Define sign of the Crick angle\n\t\t\tmixed = np.dot(np.cross(O_next - O_new, Ca - O_new), C_new - O_new)\n\n\t\t\tif mixed<0:\n\t\t\t\tsign=1\n\t\t\telse:\n\t\t\t\tsign=-1\n\n\t\t\tif not self.ap:\n\t\t\t\tif not isFirstLayer: sign = sign * -1\n\t\t\telse:\n\t\t\t\tif not isLastLayer: sign = sign * -1\n\n\t\t\tr.crick = np.degrees(angle(Ca-O_new, C_new-O_new)) * sign\n\n\t\tself.res[0].crick=None\n\t\tself.res[-1].crick=None\n\n\t\tcrick = [r.crick for r in self.res[1:-1]]\n\t\tself.crick = [None]+crick+[None]", "def _update_phase_plane(self):\n self.pp_ax.clear()\n self.pp_splt.clear()\n self.pp_splt.set_color_cycle(get_color(self.model.nvar))\n self.pp_splt.plot(numpy.arange(TRAJ_STEPS + 1) * self.integrator.dt,\n numpy.zeros((TRAJ_STEPS + 1, self.model.nvar)))\n if hasattr(self.pp_splt, 'autoscale'):\n self.pp_splt.autoscale(enable=True, axis='y', tight=True)\n self.pp_splt.legend(self.model.state_variables)\n self._plot_phase_plane()", "def route_bitlines(self):\n for j in range(self.columns):\n\n bl_offset_begin = self.mux_inst[j].get_pin(\"bl_out\").bc()\n br_offset_begin = self.mux_inst[j].get_pin(\"br_out\").bc()\n\n bl_out_offset_begin = bl_offset_begin - vector(0, (self.words_per_row + 1) * self.sel_pitch)\n br_out_offset_begin = br_offset_begin - vector(0, (self.words_per_row + 2) * self.sel_pitch)\n\n # Add the horizontal wires for the first bit\n if j % self.words_per_row == 0:\n bl_offset_end = self.mux_inst[j + self.words_per_row - 1].get_pin(\"bl_out\").bc()\n br_offset_end = self.mux_inst[j + self.words_per_row - 1].get_pin(\"br_out\").bc()\n bl_out_offset_end = bl_offset_end - vector(0, (self.words_per_row + 1) * self.sel_pitch)\n br_out_offset_end = br_offset_end - vector(0, (self.words_per_row + 2) * self.sel_pitch)\n\n self.add_path(self.sel_layer, [bl_out_offset_begin, bl_out_offset_end])\n self.add_path(self.sel_layer, [br_out_offset_begin, br_out_offset_end])\n\n # Extend the bitline output rails and gnd downward on the first bit of each n-way mux\n self.add_layout_pin_segment_center(text=\"bl_out_{}\".format(int(j / self.words_per_row)),\n layer=self.bitline_layer,\n start=bl_offset_begin,\n end=bl_out_offset_begin)\n self.add_layout_pin_segment_center(text=\"br_out_{}\".format(int(j / self.words_per_row)),\n layer=self.bitline_layer,\n start=br_offset_begin,\n end=br_out_offset_begin)\n\n else:\n self.add_path(self.bitline_layer, [bl_out_offset_begin, bl_offset_begin])\n self.add_path(self.bitline_layer, [br_out_offset_begin, br_offset_begin])\n\n # This via is on the right of the wire\n self.add_via_stack_center(from_layer=self.bitline_layer,\n to_layer=self.sel_layer,\n offset=bl_out_offset_begin,\n directions=self.via_directions)\n\n # This via is on the left of the wire\n self.add_via_stack_center(from_layer=self.bitline_layer,\n to_layer=self.sel_layer,\n offset=br_out_offset_begin,\n directions=self.via_directions)", "def __call__(self, time, hard_reset=False):\n loc = self.loc\n #print(\"\\n\\npplane call back, mouseUp =\", gui._mouseUp)\n\n fig_struct = plotter.figs['Master']\n # combine all layer information\n dynamicData = fig_struct.layers['nullclines_nV'].data.copy()\n\n ax = gui.dynamicPlots['nullclines_nV']\n #sc = fig_struct.layers['nullclines'].scale\n sc = [ax.get_xlim(), ax.get_ylim()]\n\n if hard_reset:\n force = True\n preComputed = False\n print(\"\\n HARD REFRESH for phase plane\")\n else:\n preComputed = False\n force = False\n # REPLACE WITH A PROPER CACHE STRUCTURE\n for key, val in dynamicData.items():\n # dynamicData.keys are 'yNull_<time>' or 'xNull' or keys from horiz_PP etc.\n # re-use computed nullclines if time is in \"cache\", i.e. it shows up in the keys\n if key[6:] == str(time):\n # cache hit!\n val['display'] = True # not clear if this updates original data structure after copy\n # also check to see whether has been rescaled\n if self.last_scale == sc:\n preComputed = True\n else:\n force = True\n elif key[:5] != 'xNull':\n # Use != to clean up collision lines and ohter nullcline that are not for\n # this time value.\n # switch off y-nullcline (V_inf) display for the other times\n # yNull stays constant so keep that display=True\n val['display'] = False\n\n pt = gui.points[gui.ix]\n\n p = fig_struct.layers['points_nV']\n p.display = True\n\n dV_dt = (pt['vinf']-pt['V'])/pt['tauv']\n dm_dt = (pt['Na.minf']-pt['Na.m'])/pt['Na.taum']\n dn_dt = (pt['K.ninf']-pt['K.n'])/pt['K.taun']\n plotter.add_data([[pt['K.n'], pt['K.n']+dn_dt*self.vel_arrow_scale],\n [pt['V'], pt['V']+dV_dt*self.vel_arrow_scale]],\n layer='state_vel_nV', name='state', style=vel_vec_style, force=True)\n\n dvinf_dt_m = ptsDSSRT[gui.ix]['Omega_m']\n # self.dQ_dt('vinf', gui.ix, gui.points)\n plotter.add_data([[pt['K.n'], pt['K.n']],\n [pt['vinf'], pt['vinf']+dvinf_dt_m*self.vel_arrow_scale]],\n layer='state_vel_nV', name='vinf', style=vinf_vec_style, force=True)\n\n if self.first_call:\n plotter.add_data([gui.points['K.n'], gui.points['V']],\n layer='vfp_nV', name='traj', style='y')\n plotter.add_data([gui.points['K.n'], gui.points['vinf']],\n layer='vfp_nV', name='quasiVnull', style='m--')\n\n vs = np.linspace(sc[1][0], sc[1][1], 100)\n x = dict(pt).copy()\n\n def vinf(n, v):\n x['K.n'] = n\n x['V'] = v\n x['Na.m'] = gen.auxfns.Na_dssrt_fn_minf(v)\n # assume autonomous system\n return model.Rhs(0, x, asarray=False)['V']\n\n vinfs_inv_n = np.array([fsolve(vinf, gen.auxfns.K_dssrt_fn_ninf(v), args=(v,)) for v in vs]).T[0]\n if self.first_call:\n plotter.add_data([vinfs_inv_n, vs], layer='vfp_nV', name='vinf_fastm', style='b--')\n else:\n plotter.set_data('vfp_nV', data={'vinf_fastm': {'data': [vinfs_inv_n, vs], 'style':'b--', 'display': True}}, display=True)\n\n # Virtual fixed point and linearized nullclines\n if 'fast_m' in model.name:\n vfp = None\n with_jac = False\n do_fps = False\n fast_vars = ['Na.m']\n else:\n loc.analyze(pt)\n vfp = loc.fp\n with_jac = False #True\n do_fps = False #True\n fast_vars = None\n lin_ns = np.linspace(sc[0][0], sc[0][1], 3)\n lin_vinfs = [loc.lin.auxfns.vinf(n) for n in lin_ns]\n lin_ninfs = [loc.lin.auxfns.fnin(n) for n in lin_ns]\n plotter.add_data([lin_ns, lin_vinfs], layer='vfp_nV',\n name='lin_nullV', style='b:', force=True)\n plotter.add_data([lin_ns, lin_ninfs], layer='vfp_nV',\n name='lin_nulln', style='r:', force=True)\n\n # update (or create) points\n try:\n plotter.set_point('state_pt', Point2D(pt['K.n'], pt['V']), 'points_nV')\n plotter.set_point('vinf_pt', Point2D(pt['K.n'], pt['vinf']), 'points_nV')\n if vfp:\n plotter.set_point('vfp_pt', Point2D(vfp['n'], vfp['v']), 'points_nV')\n except KeyError:\n plotter.add_point(Point2D(pt['K.n'], pt['V']),\n layer='points_nV', style='ko', name='state_pt')\n plotter.add_point(Point2D(pt['K.n'], pt['vinf']),\n layer='points_nV', style='bx', name='vinf_pt')\n if vfp:\n plotter.add_point(Point2D(vfp['n'], vfp['v']), layer='points_nV',\n name='vfp_pt', style={'color': 'y', 'marker': 'o',\n 'markersize': 5})\n\n d = fig_struct.layers['nullclines_nV'].data\n\n if not preComputed and gui._mouseUp:\n print(\"\\nComputing phase plane...\")\n print(\" Current time = %.4f\" % (time))\n\n if self.nullx is None or force:\n # compute m nullcline this once\n only_var = None\n else:\n only_var = 'V'\n\n # refresh wait notification\n ax.text(0.05, 0.95, 'wait', transform=ax.transAxes, fontsize=22,\n color='r', fontweight='bold', va='top')\n gui.masterWin.canvas.draw()\n\n # comment out for testing - use surrogate below\n nulls = computePPlaneObjects(gen, 'K.n', 'V', state=pt,\n num_x_points=self.num_x_points,\n num_y_points=self.num_y_points,\n only_var=only_var, with_jac=with_jac,\n do_fps=do_fps, fast_vars=fast_vars,\n subdomain={'V': sc[1],\n 'K.n': sc[0]})\n\n # Surrogate data - much faster to test with\n #self.nully = [[-100+time, -50+time/10., 0], [0.1, 0.4, 0.8]]\n #self.nullx = [[-130, -80, 50], [0.2, 0.3, 0.4]]\n\n self.nully = castNullArray(nulls['nullcY'])\n plotter.add_data(self.nully, layer='nullclines_nV', style=self.nullcY_style,\n name='yNull_'+str(time), force=force)\n\n # delete update 'wait' notice\n ax.texts = []\n #ax.clear()\n gui.clear_axes(ax)\n\n if only_var is None:\n # nullx is added second so will be the second line\n self.nullx = castNullArray(nulls['nullcX'])\n plotter.add_data(self.nullx, layer='nullclines_nV',\n style=self.nullcX_style,\n name='xNull', force=force)\n\n #if force:\n # rescale = sc\n #else:\n # rescale = None\n gui.build_layers(['nullclines_nV', 'points_nV', 'state_vel_nV', 'vfp_nV'],\n ax, rescale=sc, figure='Master')\n\n self.last_scale = sc\n print(\" Phase plane rebuild completed.\\n\")\n else:\n # just refresh display with the current selected data\n gui.clear_axes(ax)\n #if force:\n # rescale = sc\n #else:\n # rescale = None\n gui.build_layers(['nullclines_nV', 'points_nV', 'state_vel_nV', 'vfp_nV'],\n ax, rescale=sc, figure='Master')\n self.last_scale = sc\n\n gui.masterWin.canvas.draw()\n self.first_call = False", "def lla(self, input_poly):\n # check the input\n if type(input_poly) is not Polygon:\n #if not isinstance(input_poly, Polygon):\n # if we weren't given a polygon, turn the coordinates into one\n if (type(input_poly) is np.ndarray) or (type(input_poly) is list):\n input_poly = Polygon(input_poly)\n else:\n return\n # set the internal value for lla shape\n self._lla_shape = input_poly\n # get the vertex coordinates for the lla shape\n lla_coords_temp = np.array(self._lla_shape.exterior.xy).T\n # add a column of zeros for altitude (shapely is 2D..)\n lla_coords = np.zeros((lla_coords_temp.shape[0], 3))\n lla_coords[:,:-1] = lla_coords_temp\n # convert lla vertices to ned\n ned_coords = lla2ned(lla_coords, self._ref_pt)\n # make the ned shape out of these coordinates\n ned_exterior = Polygon(ned_coords)\n\n # make a unified shape for the keep out zones\n keep_out_list = []\n for shape in input_poly.interiors:\n # convert keepout coords to ned\n shape = Polygon(shape)\n lla_coords_temp = np.array(shape.exterior.xy).T\n # add a column of zeros for altitude (shapely is 2D..)\n lla_coords = np.zeros((lla_coords_temp.shape[0], 3))\n lla_coords[:,:-1] = lla_coords_temp\n # convert lla vertices to ned\n ned_coords = lla2ned(lla_coords, self._ref_pt)\n # add this region to the list\n keep_out_list.append(Polygon(ned_coords) )\n keep_out = cascaded_union(keep_out_list)\n\n # now make a valid mission area polygon\n self._ned_shape = ned_exterior.difference(keep_out)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes linac phasing lines to a Bmad file. Requires epics (or proxy object).
def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False): lines = bmad_linac_phasing_lines(epics) with open(filePath, 'w') as f: for l in lines: f.write(l+'\n') if verbose: print('Written:', filePath)
[ "def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False):\n lines = tao_BC_and_LEM_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)\n\n \n \n return lines", "def bmad_linac_phasing_lines(epics):\n lines = [\n '! Linac overall phasing',\n 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', \n 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),\n 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))\n ]\n return lines", "def writelines(self, seq):\n for line in seq:\n self.write(line)", "def writeLines(self,lines):\n raise NotImplementedError", "def writelines(self, lines):\n for line in lines:\n self.write(line)", "def writelines(self, seq):\n for line in seq:\n self.write(line)", "def put(output_file, lines):\n with open(output_file, \"w\") as outf:\n outf.writelines(lines)", "def WriteLines(lines, fname):\n fh = open(fname, 'w')\n for l in lines:\n print >>fh, l\n fh.close()", "def _write_lines_to_file(cls, filename, lines, metadata_dict=None):\n write_lines_to_file(cls.__name__, filename, lines, metadata_dict)", "def write_itp(atom_types,distances,all_bonds,all_types,molname,charges,elements):\n\n mass_file = open('atom_masses.dat','r') #read atom masses from file\n\n atom_masses = []\n masses_elements = []\n\n for line in mass_file:\n line_content = line.strip('\\n').split()\n masses_elements.append(line_content[0])\n atom_masses.append(line_content[1]) \n\n mass_file.close()\n\n itp_lines = []\n cnt = 1\n for i in range(len(atom_types)):\n if (atom_types[i] != 'Hw') and (atom_types[i] != 'Ow'):\n atom_name = elements[i]+str(cnt)\n space1 = (4-len(str(cnt)))*' '\n space2 = (9-len(atom_types[i]))*' '\n space3 = (7-len('1'))*' '\n space4 = (12-len(molname))*' '\n space5 = (9-len(atom_name))*' '\n space6 = (8-len(str(cnt)))*' '\n space7 = (13-len(format(charges[i],'.5f')))*' '\n for j in range(len(atom_masses)):\n if elements[i] == masses_elements[j]:\n #space8 = (8-len(str(atom_masses[j])))*' '\n mass = format(float(atom_masses[j]),'.5f')\n itp_lines.append(space1+str(cnt)+space2+atom_types[i]+space3+'1'+space4+molname+space5+atom_name+space6+str(cnt)+space7+format(charges[i],'.5f')+' '+mass+'\\n')\n cnt += 1\n\n #create bond section in itp file\n bonded_pairs = []\n\n for bond in all_bonds:\n for i in range(len(distances[0])):\n for j in range(i+1,len(distances[0])):\n if bond[0] == atom_types[i] and bond[1] == atom_types[j] and distances[i][j] <= float(bond[2]):\n space1 = (3-len(str(i+1)))*' '\n space2 = (10-len(str(j+1)))*' '\n space3 = 6*' '\n bonded_pairs.append(space1+str(i+1)+space2+str(j+1)+space3+'1\\n')\n elif bond[0] == atom_types[j] and bond[1] == atom_types[i] and distances[i][j] <= float(bond[2]):\n space1 = (3-len(str(i+1)))*' '\n space2 = (10-len(str(j+1)))*' '\n space3 = 6*' '\n bonded_pairs.append(space1+str(i+1)+space2+str(j+1)+space3+'1\\n')\n\n\n\n itp_file = open('{}.itp'.format(molname),'w') #open itp file to write\n\n itp_file.write('[ moleculetype ]\\n') \n itp_file.write('; molname nrexcl\\n')\n itp_file.write('{} 1\\n'.format(molname))\n itp_file.write('\\n')\n itp_file.write('[ atoms ]\\n')\n itp_file.write('; nr atype resnr resname atname cgnr NAC mass desc\\n')\n\n for x in itp_lines:\n itp_file.write(x)\n \n itp_file.write('\\n[ bonds ]\\n')\n itp_file.write('; i j func desc\\n')\n for y in bonded_pairs:\n itp_file.write(y)\n\n\n itp_file.close()\n\n return", "def write_lines(self, lines, encoding='None', errors=\"'strict'\", linesep=\"'\\\\r\\\\n'\", append='False'):\n \n pass", "def write_lines(list_of_lines, file):\r\n for i in range(0, len(list_of_lines)):\r\n file.write(list_of_lines[i] + b\"\\n\")", "def writelines(lines, filename, encoding='utf-8', mode='wb'):\r\n return write(os.linesep.join(lines), filename, encoding, mode)", "def write2lines(myItrb, out_fn):\n with open(out_fn, 'w') as writer:\n for item in myItrb:\n writer.write(str(item)+'\\n')", "def write_partition(self):\n \n# np_pv = np.array(self.part_vert,dtype=np.int32);\n# fn = 'parts.lbm';\n# np_pv.astype('int32').tofile(fn)\n parts = open('parts.lbm','w')\n for p in self.part_vert:\n parts.write('%d \\n'% p)\n\n parts.close()", "def writeKinetics(CoM_output,kinetics):\n np.save(CoM_output,kinetics)", "def write_file(file_cc,file_bim,outfile):\n\n print \"Reading ...\"\n data_cc = get_data_cc(file_cc)\n data_bim = get_data_bim(file_bim)\n\n rs = np.unique(np.append( data_cc['SNP1'],data_cc['SNP2'] ))\n\n shape_data=np.shape(data_cc)\n\n ofile = open(outfile,'w') # open file for writing \n print \"Writing ...\"\n for i in range(shape_data[0]):\n\n ofile.write(str(data_cc[i]['CHR1'])+\" \"\n +str(data_cc[i]['SNP1'])+\" \"\n +str(data_cc[i]['CHR2'])+\" \"\n +str(data_cc[i]['SNP2'])+\" \"\n +str(data_cc[i]['OR_INT'])+\" \"\n +str(data_cc[i]['STAT'])+\" \"\n +str(data_cc[i]['P'])+\" \" \n +str(data_bim['position'][data_bim['rs']==data_cc[i]['SNP1']][0])+\" \" #position1\n +str(data_bim['position'][data_bim['rs']==data_cc[i]['SNP2']][0])+\" \" #position2 \n +str(np.flatnonzero(rs==data_cc[i]['SNP1'])[0])+\" \" #id1\n +str(np.flatnonzero(rs==data_cc[i]['SNP2'])[0])+\" \" #id2 np.flatnonzero(\n +\"\\n\" )\n \n ofile.close", "def write_file(self, records):\n ...", "def compile_alignments_bookworm(as_ep_ids, file_name):\n counter = 0\n # db = db_connect()\n db = Postgres_Connect().connection\n as_client = init_as_client()\n make_bw_directories(file_name)\n ts_csv = load_timesteps(file_name)\n trans_dict = find_episode_transcript_ids(as_ep_ids)\n\n for (ep_num, trans_id) in trans_dict.items():\n phoneme_file = \\\n './alignment_data/alignments_json/{}_seg*.json'.format(ep_num)\n\n if len(glob.glob(phoneme_file)) == 0:\n print('No phoneme alignments for episode' \\\n ' {}. Skipping.'.format(ep_num))\n continue\n\n transcript = compile_episode_transcript(trans_id, db)\n meta = collect_episode_metadata(db, ep_num, as_client)\n\n if len(transcript) == 0:\n print('Unable to find transcript ID {} ' \\\n 'in AS database. Skipping.'.format(trans_id))\n continue\n\n phoneme_transcript = []\n\n for fp in np.sort(glob.glob(phoneme_file)):\n phoneme_transcript = phoneme_transcript + \\\n compile_phoneme_transcript(fp, transcript)\n\n print('Writing BW entry for {}'.format(phoneme_file))\n\n # phoneme_transcript[idx] =\n # [phoneme sentence, transcript sentence, transcript index]\n phoneme_transcript = np.asarray(phoneme_transcript)\n counter = write_bw_catalog(transcript, phoneme_transcript,\n counter, ep_num, meta, file_name)\n write_field_descriptions(file_name)\n\n shutil.move('./metadata', './' + file_name)\n shutil.move('./texts', './' + file_name)\n\n os.mkdir('./' + file_name + '_bookworm')\n\n shutil.move('./' + file_name, './' + file_name + '_bookworm')\n shutil.move('./' + file_name + '_phonemes', './' + file_name + '_bookworm')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes tao LEM lines to a .tao file. Requires epics (or proxy object).
def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False): lines = tao_BC_and_LEM_lines(epics) with open(filePath, 'w') as f: for l in lines: f.write(l+'\n') if verbose: print('Written:', filePath) return lines
[ "def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):\n lines = bmad_linac_phasing_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)", "def write_ead(self):\n fileio.write_text(self.dump_ead(), self.ead_path)", "def output_otis_elevation(elevation_file,h,xlim,ylim,constituents):\n fid = open(elevation_file,'wb')\n ny,nx,nc = np.shape(h)\n #-- length of header: allow for 4 character >i c_id strings\n header_length = 4*(7 + nc)\n fid.write(struct.pack('>i',header_length))\n fid.write(struct.pack('>i',nx))\n fid.write(struct.pack('>i',ny))\n fid.write(struct.pack('>i',nc))\n ylim.tofile(fid,format='>f4')\n xlim.tofile(fid,format='>f4')\n for c in constituents:\n fid.write(c.ljust(4).encode('utf-8'))\n fid.write(struct.pack('>i',header_length))\n #-- write each constituent to file\n constituent_header = 8*nx*ny\n for ic in range(nc):\n fid.write(struct.pack('>i',constituent_header))\n for m in range(ny):\n temp = np.zeros((2*nx),dtype='>f')\n temp[0:2*nx-1:2] = h.real[m,:,ic]\n temp[1:2*nx:2] = h.imag[m,:,ic]\n temp.tofile(fid,format='>f4')\n fid.write(struct.pack('>i',constituent_header))\n #-- close the output OTIS file\n fid.close()", "def write_tiem_orbit_files(self): \n # solver settings file\n self.solver_settings.write_to_file(self.analysis_name, directory=self.directory) \n\n # event file\n self.event.parameters['Event_Name'] = self.analysis_name\n self.event.write_to_file(directory=self.directory)\n \n # string file\n self.string.parameters['Event_Property_File'] = os.path.split(self.event.filename)[1]\n self.string.parameters['ModelName'] = self.analysis_name\n self.string.parameters['OutputName'] = self.analysis_name\n abs_str_file = self.string.write_to_file(directory=self.directory, publish=True) \n self.string_filename = os.path.relpath(abs_str_file, self.directory)", "def writeLines(self,lines):\n raise NotImplementedError", "def _write_lines_to_file(cls, filename, lines, metadata_dict=None):\n write_lines_to_file(cls.__name__, filename, lines, metadata_dict)", "def save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=[], shift_idxs=[]):\n # Get uid and stations data:\n uid, stations = get_event_uid_and_station_data_MTFIT_FORMAT_from_nonlinloc_hyp_file(nlloc_hyp_filename)\n # Write all data to output dict:\n out_dict = {}\n out_dict[\"MTs\"] = MTs\n out_dict[\"MTp\"] = MTp\n out_dict[\"uid\"] = uid\n out_dict[\"stations\"] = stations\n if len(MTp_absolute)>0:\n out_dict[\"MTp_absolute\"] = MTp_absolute\n if len(shift_idxs)>0:\n out_dict[\"shift_idxs\"] = shift_idxs\n # And save to file:\n out_fname = outdir+\"/\"+uid+\"_FW_\"+inversion_type+\".pkl\"\n print(\"Saving FW inversion to file:\", out_fname)\n pickle.dump(out_dict, open(out_fname, \"wb\"))", "def writelines(self, lines):\n for line in lines:\n self.write(line)", "def write_telluric_transmission_to_file(wls,T,outpath):\n import pickle\n print('------Saving teluric transmission to '+outpath)\n with open(outpath, 'wb') as f: pickle.dump((wls,T),f)", "def put(output_file, lines):\n with open(output_file, \"w\") as outf:\n outf.writelines(lines)", "def write_body(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"a+\")\r\n for list_item in self.list_of_body_objects:\r\n self.file.write(list_item.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_body_objects:\r\n print(list_item.line)", "def write_file(model_name, assets_path):\n step_filenames = glob.glob(os.path.join(assets_path, model_name + \"_step*.obj\"))\n # lines_filenames = glob.glob(os.path.join(assets_path, model_name + \"_step*_lines.txt\"))\n\n # joining .obj files\n with open(os.path.join(assets_path, model_name + \".obj\"), \"w\") as f:\n f.write(\"mtllib colors.mtl\\n\")\n for fname in step_filenames:\n with open(fname) as infile:\n for line in infile:\n f.write(line)\n\n # joining lines\n # with open(os.path.join(assets_path, model_name + \"_lines.txt\"), \"w\") as f:\n # f.write(\"mtllib colors.mtl\\n\")\n # for fname in lines_filenames:\n # with open(fname) as infile:\n # for line in infile:\n # f.write(line)", "def to_file(self, elf_file):\n elf_file.write(self.ehdr)\n off = ctypes.sizeof(self.ehdr) + \\\n len(self.segments) * ctypes.sizeof(self.segments[0])\n\n for phdr in self.segments:\n phdr.p_offset = off\n elf_file.write(phdr)\n off += phdr.p_filesz\n\n for note in self.notes:\n elf_file.write(note)", "def write_file(l_dta, outputfile):\n l_dta2 = []\n for row in l_dta:\n s = '\\t'.join(row)\n l_dta2.append(s)\n s_dta = \"\\r\\n\".join(l_dta2)\n try:\n with open(outputfile, 'w') as fd:\n fd.write(s_dta)\n except (IOError,) as e:\n tracker()\n return None", "def test_file_write_attributes_for_each(self):\n\n with OrthoMultiTs(self.testfilename, n_loc=3, mode=\"w\") as dataset:\n n_data = 5\n locations = np.array([1, 2, 3])\n data = {\n \"test\": np.arange(n_data * 3).reshape(3, n_data),\n \"test2\": np.arange(n_data * 3).reshape(3, n_data)\n }\n base = datetime(2007, 1, n_data)\n dates = np.array(\n [base + timedelta(hours=i) for i in range(n_data)])\n descriptions = np.repeat([str(\"station\")], 3).tolist()\n\n dataset.write_all(locations,\n data,\n dates,\n loc_descrs=descriptions,\n lons=np.arange(3),\n lats=np.arange(3),\n alts=np.arange(3),\n attributes={\n \"test\": {\n \"testattribute\": \"teststring\"\n },\n \"test2\": {\n \"testattribute2\": \"teststring2\"\n }\n })\n\n with OrthoMultiTs(self.testfilename) as dataset:\n data = dataset.read_all(2)\n nptest.assert_array_equal(data[\"test\"], np.arange(5) + 5)\n assert dataset.dataset.variables[\n \"test\"].testattribute == \"teststring\"\n assert dataset.dataset.variables[\n \"test2\"].testattribute2 == \"teststring2\"\n test_dates = []\n for n_data in [5]:\n base = datetime(2007, 1, n_data)\n test_dates.append(\n np.array(\n [base + timedelta(hours=i) for i in range(n_data)]))\n dates = np.concatenate(test_dates)\n nptest.assert_array_equal(data[\"time\"], dates)", "def writeTazFile(tazFile_path, zones_path, tazs, zones_req=None, offset=(0,0)):\n offset = np.array(offset)\n ## read geojson\n zones = gpd.read_file(zones_path)\n ## Open output file\n fout = open(tazFile_path,'w')\n # Header\n fout.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\\n\")\n fout.write(\"<additional xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xsi:noNamespaceSchemaLocation=\\\"http://sumo.dlr.de/xsd/additional_file.xsd\\\">\\n\")\n\n colours = [\"blue\", \"red\", \"green\"]\n n = len(zones.OBJECTID)\n\n if zones_req == None:\n zones_req = [a for a in range(n)]\n\n # For all the geometries in the geojson\n for i in zones_req:\n arr = np.array(mapping(zones.geometry[i-1])[\"coordinates\"][0]) # get coords\n if type(arr[0][0]) == np.float64: # if it's one piece\n fout.write(\"<taz id=\\\"taz_{0}\\\" color=\\\"{1}\\\" shape=\\\"\".format(i, colours[i%3]))\n\n to_print = arr[0] + offset\n fout.write(\"{0},{1}\".format(to_print[0], to_print[1]))\n\n for a in arr[1:]:\n to_print = a + offset\n fout.write(\" {0},{1}\".format(to_print[0], to_print[1]))\n fout.write(\"\\\">\\n\")\n else: # if there are more then one pieces\n #print(i+1)\n e=0\n for e, b in enumerate(np.array(mapping(zones.geometry[i])[\"coordinates\"])):\n fout.write(\"<taz id=\\\"taz_{0}#{2}\\\" color=\\\"{1}\\\" shape=\\\"\" .format(i+1, colours[i%3], e))\n bb = np.array(b[0])\n\n to_print = bb[0] + offset\n fout.write(\"{0},{1}\".format(to_print[0], to_print[1]))\n\n for a in bb[1:]:\n to_print = a + offset\n fout.write(\" {0},{1}\".format(to_print[0], to_print[1]))\n fout.write(\"\\\">\\n\")\n if e < len(mapping(zones.geometry[i])[\"coordinates\"])-1:\n fout.write(\"</taz>\\n\")\n # Write edges\n for lane in tazs[i]:\n fout.write(\"<tazSource weight=\\\"1.00\\\" id=\\\"{0}\\\"/> \\n\".format(lane))\n fout.write(\"<tazSink weight=\\\"1.00\\\" id=\\\"{0}\\\"/> \\n\".format(lane))\n fout.write(\"</taz>\\n\")\n\n fout.write(\"</additional>\\n\\n\") # Clousure\n\n fout.close()", "def WriteLines(lines, fname):\n fh = open(fname, 'w')\n for l in lines:\n print >>fh, l\n fh.close()", "def create_telemetry_file():\n loginfo(\"Creating telem file if it doesn't exist...\")\n with open(HAB_TELEM_FILE, \"w\"):\n pass", "def maketopo():\n outfile= \"landslide.topotype2\" \n topotools.topo2writer(outfile,topo,xlower,xupper,ylower,yupper,nxpoints,nypoints)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get the credentials from ~/.mofplusrc
def credentials_from_rc(self): mprc_filename = os.environ["HOME"]+'/.mofplusrc' with open(mprc_filename, 'r') as mprc: username = mprc.readline().split()[0] pw = mprc.readline().split()[0] return username, pw
[ "def load_credentials():\n import os\n from dotenv import load_dotenv\n load_dotenv()\n return (\n os.getenv('MARKLOGIC_URL'),\n os.getenv('MARKLOGIC_USERNAME'),\n os.getenv('MARKLOGIC_PASSWORD')\n )", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,'credentials.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n creds = {'username': os.environ.get('OS_USERNAME'),\n 'password': os.environ.get('OS_PASSWORD'),\n 'auth_url': os.environ.get('OS_AUTH_URL'),\n 'project_name': os.environ.get('OS_PROJECT_NAME')\n }\n\n if os.getenv('OS_USER_DOMAIN_NAME'):\n creds['user_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')\n if os.getenv('OS_PROJECT_DOMAIN_NAME'):\n creds['project_domain_name'] = os.getenv('OS_PROJECT_DOMAIN_NAME')\n\n return creds", "def get_credentials(prefs_file):\n with open(prefs_file, \"rb\") as pl:\n if six.PY2:\n prefs = plistlib.readPlist(pl)\n else:\n prefs = plistlib.load(pl)\n\n try:\n jamf_url = prefs[\"JSS_URL\"]\n except KeyError:\n jamf_url = \"\"\n try:\n jamf_user = prefs[\"API_USERNAME\"]\n except KeyError:\n jamf_user = \"\"\n try:\n jamf_password = prefs[\"API_PASSWORD\"]\n except KeyError:\n jamf_password = \"\"\n return jamf_url, jamf_user, jamf_password", "def credentials_from_config(path):\n username = None\n password = None\n\n return (username, password)", "def get_credentials():\r\n global _credentials\r\n if ('username' in _credentials) and ('api_key' in _credentials):\r\n return copy.copy(_credentials)\r\n else:\r\n return tools.get_credentials_file()", "def load_credentials(self):\n if self.rc_file is None:\n return\n config = configparser.ConfigParser()\n rc = os.path.expanduser(self.rc_file)\n if os.path.exists(rc):\n config.read(rc)\n trace(1, \"load credentials from\", rc)\n try:\n self.auth(\n config[\"netatmo\"][\"client_id\"],\n config[\"netatmo\"][\"client_secret\"],\n config[\"netatmo\"][\"username\"],\n config[\"netatmo\"][\"password\"],\n )\n if config.has_option(\"netatmo\", \"default_device_id\"):\n self.default_device_id = config[\"netatmo\"][\"default_device_id\"]\n except:\n self.auth(None, None, None, None)", "def creds():\n return obj_utils.Creds()", "def _get_creds(self):\n config_file = \"\"\n if os.getenv(\"DOCKER_CONFIG\") is not None:\n config_file = os.path.join(os.getenv(\"DOCKER_CONFIG\"), \"config.json\")\n else:\n config_file = os.path.expanduser(os.path.join(\"~\", \".docker\", \"config.json\"))\n if not os.path.exists(config_file):\n config_file = os.path.expanduser(os.path.join(\"~\", \".dockercfg\"))\n if not os.path.exists(config_file):\n return None\n with open(os.path.expanduser(config_file), \"r\") as file:\n config = json.load(file)\n try:\n return config['auths'][re.sub(\"^https?://\", \"\", self._registry)]['auth']\n except KeyError:\n pass\n return None", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url", "def get_credentials(self):\n if getattr(self, 'credentials', None):\n return self.credentials\n\n scopes = settings.SCOPES\n client_secret_file = settings.CLIENT_SECRET_FILE\n application_name = 'Google Sheets API Python Quickstart'\n\n home_dir = os.path.expanduser(settings.CREDENTIALS_DIRECTORY)\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file, scopes)\n flow.user_agent = application_name\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n # print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def fetch_creds():\n pass", "def get_credentials():\n try:\n netrc_path = netrc.path()\n auths = netrc(netrc_path).authenticators(\n urlparse(solvebio.api_host).netloc)\n except (IOError, TypeError, NetrcParseError) as e:\n raise CredentialsError(\n 'Could not open credentials file: ' + str(e))\n\n if auths:\n return (auths[0], auths[2])\n else:\n return None", "def get_credential():\n credential = get_token_from_environment() \\\n or get_user_basic_auth_from_environment() \\\n or get_datasource_basic_auth_from_environment()\n return credential", "def __get_credentials():\n username = os.getenv('username')\n password = os.getenv('password')\n if not username or not password:\n raise ValueError('Username or Password value is not set in .env file')\n return username, password", "def get_credentials(self):\n return self.credentials" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get the credentials from the command line
def credentials_from_cmd(self): username = raw_input("Email:") pw = getpass.getpass() return username, pw
[ "def request_credentials_from_console():\n username = raw_input('Username: ')\n password = raw_input('Password: ')\n return username, password", "def credentials(args):\n\n if args.helper_command != 'get':\n # From api-credentials.txt:\n # For a `store` or `erase` operation, the helper's output is ignored.\n # If it fails to perform the requested operation, it may complain to\n # stderr to inform the user. If it does not support the requested\n # operation (e.g., a read-only store), it should silently ignore the\n # request.\n return 0\n\n git_config = get_git_config()\n\n for k in ('bz.username', 'bz.apikey'):\n if k not in git_config:\n raise AbortError('%s config variable must be defined; '\n 'run `git mozreview configure`' % k)\n\n username = git_config['bz.username']\n apikey = git_config['bz.apikey']\n\n for line in sys.stdin:\n print(line.rstrip('\\n'))\n print('username=%s' % username)\n print('password=%s' % apikey)\n return 0", "def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url", "def credentials():\n username, password, _ = input().lstrip('[').split(';')\n return username, password", "def get_credentials(options, environment):\n if options[\"--username\"] or options[\"--auth\"]:\n if not options[\"--username\"]:\n options[\"<username>\"] = lib.prompt(\n \"Please enter the username for %s...\" % environment\n )\n if not options[\"--password\"]:\n options[\"<password>\"] = lib.prompt(\n \"Please enter the password for %s...\" % environment, secret=True\n )\n return options", "def GetCreds():\n\n _username = input(\"Router username: \")\n _password = getpass(\"Password for {}: \".format(_username))\n return [_username, _password]", "def get_creds_from_args(args):\n if args.prefs:\n (jamf_url, jamf_user, jamf_password) = get_credentials(args.prefs)\n else:\n jamf_url = \"\"\n jamf_user = \"\"\n jamf_password = \"\"\n\n # CLI arguments override any values from a prefs file\n if args.url:\n jamf_url = args.url\n elif not jamf_url:\n jamf_url = input(\"Enter Jamf Pro Server URL : \")\n if args.user:\n jamf_user = args.user\n elif not jamf_user:\n jamf_user = input(\n \"Enter a Jamf Pro user with API rights to upload a package : \"\n )\n if args.password:\n jamf_password = args.password\n elif not jamf_password:\n jamf_password = getpass.getpass(\n \"Enter the password for '{}' : \".format(jamf_user)\n )\n\n # encode the username and password into a basic auth b64 encoded string so that we can get the session token\n enc_creds = encode_creds(jamf_user, jamf_password)\n\n return jamf_url, jamf_user, jamf_password, enc_creds", "def __get_credentials():\n username = os.getenv('username')\n password = os.getenv('password')\n if not username or not password:\n raise ValueError('Username or Password value is not set in .env file')\n return username, password", "def get_credentials(args):\n\n username = args.get('username')\n password = args.get('password')\n\n try:\n password = base64.b64decode(password)\n except:\n raise ValueError(\"Password is not properly Base64 encoded.\")\n\n return username, password", "def credentials(cloud='devstack-admin'):\n return get_cloud_config(cloud=cloud).get_auth_args()", "def obtain_credentials(self, username=None):\n if username:\n print \"Using stored username {0}\".format(username)\n else:\n username = raw_input(\"GitHub Username: \")\n password = getpass.getpass(prompt=\"Password: \")\n return (username, password)", "def get_credentials():\n creds = {'username': os.environ.get('OS_USERNAME'),\n 'password': os.environ.get('OS_PASSWORD'),\n 'auth_url': os.environ.get('OS_AUTH_URL'),\n 'project_name': os.environ.get('OS_PROJECT_NAME')\n }\n\n if os.getenv('OS_USER_DOMAIN_NAME'):\n creds['user_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')\n if os.getenv('OS_PROJECT_DOMAIN_NAME'):\n creds['project_domain_name'] = os.getenv('OS_PROJECT_DOMAIN_NAME')\n\n return creds", "def credentials_from_config(path):\n username = None\n password = None\n\n return (username, password)", "def load_credentials():\n import os\n from dotenv import load_dotenv\n load_dotenv()\n return (\n os.getenv('MARKLOGIC_URL'),\n os.getenv('MARKLOGIC_USERNAME'),\n os.getenv('MARKLOGIC_PASSWORD')\n )", "def get_credential():\n credential = get_token_from_environment() \\\n or get_user_basic_auth_from_environment() \\\n or get_datasource_basic_auth_from_environment()\n return credential", "def creds():\n return obj_utils.Creds()", "def fetch_creds():\n pass", "def get_credentials():\r\n global _credentials\r\n if ('username' in _credentials) and ('api_key' in _credentials):\r\n return copy.copy(_credentials)\r\n else:\r\n return tools.get_credentials_file()", "def get_credentials_file(*args):\r\n ensure_local_plotly_files_exist()\r\n return utils.load_json(CREDENTIALS_FILE, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints the MFP banner
def print_banner(self): print ":##::::'##::'#######::'########:::::::::::::::'###::::'########::'####:\n\ :###::'###:'##.... ##: ##.....::::'##::::::::'## ##::: ##.... ##:. ##::\n\ :####'####: ##:::: ##: ##::::::::: ##:::::::'##:. ##:: ##:::: ##:: ##::\n\ :## ### ##: ##:::: ##: ######:::'######::::'##:::. ##: ########::: ##::\n\ :##. #: ##: ##:::: ##: ##...::::.. ##.::::: #########: ##.....:::: ##::\n\ :##:.:: ##: ##:::: ##: ##::::::::: ##:::::: ##.... ##: ##::::::::: ##::\n\ :##:::: ##:. #######:: ##:::::::::..::::::: ##:::: ##: ##::::::::'####:\n\ :..:::::..:::.......:::..:::::::::::::::::::..:::::..::..:::::::::....:"
[ "def banner():\n\n print(\n r\"\"\"\n ____ ____ ____ ____\n/ \\ / \\ / \\ / \\\n| ################################# |\n\\__#_/ \\____/ \\____/ \\_#__/\n # _ _______ _____ #\n # (_) |_ __ \\|_ _| # -*- v 2.0 -*-\n # __ ______ | |__) | | | #\n Y [ ||______|| ___/ | | # A Universal Force Engine\n 0 0 | | _| |_ _| |_ #\n # [___] |_____| |_____| #\n __#_ ____ ____ _#__\n/ # \\ / \\ / \\ / # \\\n| ################################# |\n\\____/ \\____/ \\____/ \\____/\n\n \"\"\"\n )", "def print_banner():\n print(\"\"\"\n .-\"\"-.\n (___/\\ \\\\\n ( |' ' ) ) \"\"\"\n + \"\\tEphe v\" + __version__ +\n \"\"\"\n __) _\\=_/ (\n ____(__._ ` \\ )\n .(/8-.._.88, ; (\n / /8. `88., | )\n_.`'---.._/ /.8_ ____.'_| |_/\n'-'``'-._ / | `-........'\n `;-\"`; |\"\"\"\n + 6 * \"\\t\" + \"Dionach Ltd\" + \"\"\"\n `'.__/\"\"\")", "def show_banner():\n print(\"\"\"\n _ _ _ _ _____ _______\n| | | | / \\ | | |_ _\\ \\ / / ____|\n| |_| | / _ \\ | | | | \\ \\ / /| _|\n| _ |/ ___ \\| |___ | | \\ V / | |___\n|_| |_/_/ \\_\\_____|___| \\_/ |_____|\n\n\nA super fast asynchronous http and https prober, to check who is (h)alive.\nDeveloped by gnc\n \"\"\")", "def _print_banner(out_file, banner_text):\n banner_separator = \"\".ljust(len(banner_text), \"=\")\n\n out_file.write(\"\\n{}\\n{}\\n{}\\n\".format(\n banner_separator,\n banner_text,\n banner_separator))", "def banner():\n print(\"\\033[32m\")\n print(\" ___ _ ___ _ _ _\")\n print(\" | _ )_ _ _ _| |_ ___ | __| |_ ___ _ _ _ _ __ _| | | | ___ ___ _ __\")\n print(\" | _ \\ '_| || | _/ -_) | _|| _/ -_) '_| ' \\/ _` | | | |__/ _ \\/ _ \\ '_ \\\\\")\n print(\" |___/_| \\_,_|\\__\\___| |___|\\__\\___|_| |_||_\\__,_|_|_|____\\___/\\___/ .__/\")\n print(\" |___| |_|\")\n print(\"\\033[0m\")", "def banner(self):\n from sage.misc.banner import banner_text\n return banner_text()", "def display_banner():\n msg = 'AWESOME camelCaseGenerator PROGRAM'\n stars = '*' * len(msg)\n print(f'\\n{stars} \\n{msg} \\n{stars} \\n')", "def banner(auth=None):\n\n mate_banner = \"\"\"\n __ __ _ _____ ___ __ __ _ _____\n| \\/ | / \\|_ _/ _ \\| \\/ | / \\|_ _|\n| |\\/| | / _ \\ | || | | | |\\/| | / _ \\ | |\n| | | |/ ___ \\| || |_| | | | |/ ___ \\| |\n|_| |_/_/ \\_\\_| \\___/|_| |_/_/ \\_\\_|\n\"\"\" # noqa: W605 (Invalid escape sequence)\n os.system('clear')\n log(mate_banner)\n if auth is not None:\n if auth[\"user\"][\"admin\"] is True:\n log(\"Hi %s, current credits: %.2f Euro. You are admin!\\n\" % (auth[\"user\"][\"username\"], auth[\"user\"][\"credits\"] / 100))\n else:\n log(\"Hi %s, current credits: %.2f Euro.\\n\" % (auth[\"user\"][\"username\"], auth[\"user\"][\"credits\"] / 100))\n\n return True", "def banner(message, border = '-'):\n line = border * len(message)\n print(line)\n print(message)\n print(line)", "def banner(*args):\n return _ida_kernwin.banner(*args)", "def show_banner():\n from x84.bbs import showart, echo, getterminal\n import os\n\n artfile = os.path.join(os.path.dirname(__file__), 'art', 'main.ans')\n\n # displays a centered main menu header in topaz encoding for utf8\n for line in showart(artfile, 'topaz', center=True):\n echo(line)", "def init_banner(self):\n if self.display_banner and self.interact:\n self.shell.show_banner()\n # Make sure there is a space below the banner.\n if self.log_level <= logging.INFO: print()", "def _show_welcome_banner() -> None:\r\n print(\"Welcome to the connectfour client!\")\r\n print()\r\n print(\"Please play game with your username.\")\r\n print()", "def print_banner(dog=True):\n banner = \"\"\n if dog:\n banner += \" ____,'`-,\\n\"\n banner += \" _,--' ,/::.;\\n\"\n banner += \" ,-' ,/::,' `---.___ ___,_\\n\"\n banner += \" | ,:';:/ ;'\\\"';\\\"`--./ ,-^.;--.\\n\"\n banner += \" |: ,:';,' ' `. ;` `-.\\n\"\n banner += \" \\\\:.,:::/;/ -:. ` | ` `-.\\n\"\n banner += \" \\\\:::,'//__.; ,; , , :.`-. :. | ; :.\\n\"\n banner += \" \\\\,',';/O)^. :' ; : '__` ` :::`. .:' )\\n\"\n banner += \" |,' |\\\\__,: ; ; '/O)`. :::`; ' ,'\\n\"\n banner += \" |`--'' \\\\__,' , ::::( ,'\\n\"\n banner += \" ` , `--' ,: :::,'\\\\ ,-'\\n\"\n banner += \" | ,; , ,::' ,::: |,'\\n\"\n banner += \" |,: .( ,:::| `\\n\"\n banner += \" ::'_ _ :: ,::/:|\\n\"\n banner += \" ,',' `-' \\\\ `. ,:::/,:|\\n\"\n banner += \" | : _ _ | ' ,::,' :::\\n\"\n banner += \" | \\\\ O`'O ,', , :,' ;::\\n\"\n banner += \" \\\\ `-'`--',:' ,' , ,,' ::\\n\"\n banner += \" ``:.:.__ ',-',' ::'\\n\"\n banner += \" -hrr- `--.__, ,::. ::'\\n\"\n banner += \" |: ::::. ::'\\n\"\n banner += \" |: :::::: ,::'\\n\"\n banner += \"########################################################\\n\"\n banner += \"# ruffer-overflow v0.2 #\\n\"\n banner += \"# don't \\\"bark\\\" up the wrong tree. #\\n\"\n banner += \"#======================================================#\\n\"\n banner += \"# weak-sauce tool for buffer-overflow #\\n\"\n banner += \"# please don't crime with it. #\\n\"\n banner += \"########################################################\\n\"\n print(banner)", "def banner(self, banner):\n self._banner = banner", "def banner(name):\n print \"#\"\n print \"# {0}\".format(name.encode('utf-8'))\n print \"#\"\n return name", "def my_banner(bannerString):\n print(len(bannerString) * \"!\")\n print(bannerString)\n print(len(bannerString) * \"!\")", "def banner(self):\n return self._banner", "def print_header(self):\n print()\n print(\"=\"*25)\n print()\n print(\"Have fun in your blackjack round!\")\n print()\n print(\"=\"*25)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all BBS in the db
def get_list_of_bbs(self): return self.mfp.get_list_of_bbs()
[ "def get_all(session):\n try:\n vbds = session.xenapi.VBD.get_all()\n vbd_list = []\n\n for vbd in vbds:\n vbd_list.append(VBD(session, vbd))\n\n return vbd_list\n except Exception as e:\n print(\"VBD.get_all Exception\", e)\n return None", "def get_all_bouquets():\n with sqlite3.connect(Connection.db_path) as conn:\n # Uses the Bouquet model to represent data\n conn.row_factory = model_factory(Bouquet)\n\n db_cursor = conn.cursor()\n\n # Grabs all data in alphabetical order by name\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM bouquetapp_bouquet\n ORDER BY name ASC\n \"\"\")\n\n # list of Bouquet classes\n return db_cursor.fetchall()", "def get_all_bt(self):\n return list(self.collection.find({\"sensor_type\": \"bt\"}, {\"_id\": False})) # Return a list", "def get_bus_list():\n\n\tbuses = db.session.query(Bus.bus_name).all()\n\n \n\treturn buses", "def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n db_boars = cls.dbapi.get_boar_list(\n context, limit=limit, marker=marker, sort_key=sort_key,\n sort_dir=sort_dir, filters=filters)\n\n #import pdb; pdb.set_trace()\n return [Boar._from_db_object(cls(context), obj) for obj in db_boars]", "def __sync_bulbs__() -> list:\n\n bulbs = list()\n\n try:\n discovered_bulbs = discover_bulbs(timeout=2)\n except Exception as e:\n raise Exception(str(e))\n\n for bulb in discovered_bulbs:\n ip = bulb['ip']\n port = bulb['port']\n model = bulb['capabilities']['model']\n name = bulb['capabilities']['name']\n name = name if name != '' else ip\n identifier = bulb['capabilities']['id']\n\n found_bulb = Bulb(\n ip=ip,\n port=port,\n model=model\n )\n\n found_bulb.set_name(name)\n properties = found_bulb.get_properties()\n\n bulbs.append({\n 'bulb': found_bulb,\n 'name': name,\n 'model': model,\n 'ip': ip,\n 'metadata':\n {\n 'id': identifier,\n 'ip': ip,\n 'name': name,\n 'model': model,\n 'properties': properties\n }\n })\n\n return bulbs", "def list_all_breeds(self):\n self.client.get(f'{self.api_endpoint}{self.list_breeds_ep}')", "def list(self):\n\t\treturn http.db_list()", "def get_all(self):\n cursor = self._dbcon.cursor()\n cursor.execute(u\"select rowid,* from books\")\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]", "def _get_bills(bill_identifier=None):\n bill = None\n if bill_identifier:\n bill = PymongoDB.get_db().find_one(db_constants.BILLS,\n queries.bill_query(bill_identifier))\n\n if bill:\n return [bill]\n else:\n if bill_identifier:\n logger.LOGGER.error(\"Bill not found: %s\" % bill_identifier)\n return PymongoDB.get_db().find(db_constants.BILLS)", "def get_DBList(self):\n db_list = []\n ori_db_list = self.DBClient.get_list_database()\n for num in range(1,len(ori_db_list)):\n db_list.append(ori_db_list[num]['name'])\n return db_list", "def breeds():\n conn = sqlite3.connect(db_path)\n conn.row_factory = dict_factory\n results = conn.execute(\"SELECT * FROM breeds\").fetchall()\n conn.close()\n return jsonify(results)", "def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def get_all_borrowed_books():\n return BorrowBook.query.all()", "def get_blocks(self):\n cmd = \"\"\" SELECT * FROM %s; \"\"\" %(TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()", "def get_bids(self) -> list:\n bids_qs = Bid.objects.filter(item_id=self.item).order_by('-bid_dt')\n\n bids_list = []\n for bid in bids_qs:\n bids_list.append({\n 'id': bid.id,\n 'bid_dt': utils.to_epoch(bid.bid_dt),\n 'price': bid.price,\n 'user_id': bid.user.id,\n 'user_name': bid.user_name\n })\n\n return bids_list", "def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results", "def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)", "def all_bags(request):\n\n bags = Bags.objects.all()\n\n context = {\n 'bags': bags,\n }\n\n return render(request, 'sweets/bags.html', context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the scaled topo file for a given id supercell id.
def get_scaledtopo(self,id): lines = self.mfp.get_scaledtopo(id) return lines
[ "def path(self, id):\n return self._inode_to_path.get(id)", "def serve_gridfs_file_for_id(self, id):\n return File(id).serve(self.request)", "def get_coord_file(cls, modelid):\n if not PDBMapSwiss._modelid2info:\n raise Exception(\"PDBMapSwiss.load_swiss_INDEX_JSON must be called before using this method.\")\n d = PDBMapSwiss._modelid2info[modelid]\n uniprot_directories = d['unp']\n dash_or_end = uniprot_directories.rfind('-')\n # In the SwissModel filename system\n # The Uniprot Isoform is dropped to locate the directory\n # However,the coordinate_id quasi-guid could vary per isoform\n if (dash_or_end == -1):\n dash_or_end = len(uniprot_directories)\n return \"%s/%s/%s/%s/swissmodel/%d_%d_%s_%s.pdb\" % (\n PDBMapSwiss.swiss_dir,\n d['unp'][0:2],\n d['unp'][2:4],\n d['unp'][4:dash_or_end],\n # d['unp'][4:dash_or_end],\n int(d['start']),\n int(d['end']),\n d['template'],\n d['coordinate_id'] #\n )", "def cell_for_id(self, id):\n\t\tcell_id = (id & self.id2cell_mask) | u0xFFFFFFFF\n\t\tassert np.all(self.is_cell_id(cell_id))\n\n\t\t# TODO: Debugging (remove when happy)\n\t\tx, y, t, _ = self._xyti_from_id(id)\n\t\tcell_id2 = self._cell_id_for_xyt(x, y, t)\n\t\tassert np.all(cell_id2 == cell_id), 'cell_id2=%s cell_id=%s %s x=%s y=%s t=%s' % (cell_id2, cell_id, bin(cell_id), x, y, t)\n\n\t\treturn cell_id", "def get_topogram(self, _id):\n return self.make_request(\"GET\", \"topograms/\"+_id, {})", "def _get_disk_by_id(worker):\n cmd = (\n f\"oc debug nodes/{worker} --to-namespace={config.ENV_DATA['cluster_namespace']} \"\n f\"-- chroot /host ls -la /dev/disk/by-id/\"\n )\n return run_cmd(cmd)", "def tile_geom(id_):\n xmin = int(id_[1:4])\n ymax = int(id_[5:7])\n if id_[0] == \"W\":\n xmin *= -1\n if id_[4] == \"S\":\n ymax *= -1\n ymin = ymax - 20\n xmax = xmin + 20\n coords = ((xmin, ymax), (xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax))\n return Polygon(coords)", "def get_size_by_id(self, size_id):\n sizes = self._driver.list_sizes()\n size = [i for i in sizes if i.id == size_id][0]\n return size", "def getImage(self, id):\n selectImgById = \"\"\"SELECT crop_id, image_id, date_part('epoch', time_stamp), cropped_path, crop_coordinate_tl, crop_coordinate_br, tapped\n FROM \"\"\" + self.croppedTableName + \"\"\"\n WHERE crop_id = %s\n LIMIT 1;\"\"\"\n selectedImage = super(CroppedImageDAO, self).basicTopSelect(selectImgById, (id,))\n\n if selectedImage is None:\n return None\n return self.newModelFromRow(selectedImage)", "def import_super():\r\n super_cat = loadtxt('super.csv', delimiter=',', skiprows=1, usecols=[0, 1])\r\n id_coords = []\r\n for obj_id in range(int(super_cat.shape[0])):\r\n obj = super_cat[obj_id]\r\n id_coords.append((obj_id+1, obj[0], obj[1]))\r\n return id_coords", "def get_file_by_id(id):\n return my_query(construct_get_file_by_id(id))", "def get_by_id(self, id: int) -> BoundIso:\n response = self._client.request(url=f\"/isos/{id}\", method=\"GET\")\n return BoundIso(self, response[\"iso\"])", "def map_id_to_device(dev_map, osd_id):\n for elem in dev_map:\n if elem['id'] == osd_id:\n return elem['path']", "def get_single_layer_image(self, layer_id):\n for layer in self.psd_file.layers:\n if layer.layer_id == layer_id:\n return layer.as_PIL()", "def get_distribution():\n return distro.id()", "def get(cls, id):\n response = get_by_endpoint(\"computed_files/\" + str(id)).json()\n return ComputedFile(**response)", "def get_sgd(self, id, name):\n # check if id exists in group definition\n if id in self.mstats.keys() and 'df' in self.mstats[id].keys():\n # print \"id %s in mstats\" % id\n type = 'group' if id.endswith('/') else 'dataset'\n sgd = {'id': id, 'type': type, 'ns':self.sdef['ns'], 'df': self.mstats[id]['df'],}\n # print \"found definition for %s in mstats, mstats=\" % id\n # pp.pprint(self.mstats)\n return sgd\n else:\n # see if parent group is specified in locations; if so, check for id in \n # locations list of members of parent group. Example for nwb format is are\n # \"UnitTimes/\" inside <module>/. <module> is parent group\n pid = self.sdef['id'] # parent id, e.g. \"<module>\"\n ns = self.sdef['ns']\n if pid in self.file.ddef[ns]['locations']:\n if id in self.file.ddef[ns]['locations'][pid]:\n type = 'group' if id.endswith('/') else 'dataset'\n # add id to mstats so can register creation of group\n self.mstats[id] = {'ns':ns, 'created': [], 'qty': '+', \n 'type': type} # todo: jeff, need to check df\n sgd = self.file.get_sdef(id, ns, \"referenced in make_subgroup\")\n # print \"id %s in %s location ns %s structures\" % (id, pid, ns)\n # example output: id UnitTimes/ in <module>/ location ns core structures\n # traceback.print_stack()\n return sgd\n else:\n print \"found parent %s in locations, but %s not inside\" % (pid, id)\n print \"locations contains:\"\n pp.pprint(self.file.ddef[ns]['locations'][pid])\n else:\n print \"did not find parent %s in locations for namespace %s\" % (pid, ns)\n print \"** Error, attempting to create '%s' (name='%s') inside group:\" % (id, name)\n print self.full_path\n print \"But '%s' is not a member of the structure for the group\" % id\n print \"Valid options are:\", self.mstats.keys()\n # print \"Extra information (for debugging): Unable to find definition for node %s\" % id\n # print \"mstats=\"\n # pp.pprint(self.mstats)\n traceback.print_stack()\n sys.exit(1)", "def read_pcigale_best_model(cwd, object_id):\n\n try:\n # Open *_best_mode.fits\n hdulist = fits.open(cwd + '/' + object_id + '_best_model.fits')\n cols = hdulist[1].columns\n hdulist_data = hdulist[1].data\n hdulist_hdr = hdulist[1].header\n # Redshift\n z = hdulist_hdr['universe.redshift']\n z = float(z)\n # Luminosity distance [m]\n cosmo = FlatLambdaCDM(H0=100.*cosmo_params['h'],\n Om0=cosmo_params['omega_M_0'], Tcmb0=cosmo_params['Tcmb0'])\n dl = cosmo.luminosity_distance(z)\n dl = dl.value * 3.0857E16 * 1.E6\n\n # Extract total SED model [nm], [mJy]\n wvl_o = np.array([row[0] for row in hdulist_data])\n flx = np.array([row[1] for row in hdulist_data])\n # Convert to [m], [Hz], [Jy]\n wvl_o = wvl_o * 1.E-9\n flx = flx * 1.E-3\n frq_o = cnsts['c'] / wvl_o\n\n # Extract dust SED model [W nm^-1] --> [Jy]\n index_1 = [i for i, item in enumerate(cols.names)\n if 'dust.Umin_Umax' in item]\n index_1 = index_1[0]\n index_2 = [i for i, item in enumerate(cols.names)\n if 'dust.Umin_Umin' in item]\n index_2 = index_2[0]\n lum_dust = (np.array([row[index_1] for row in hdulist_data])\n + np.array([row[index_2] for row in hdulist_data]))\n # Convert specific luminosity to [W m^-1]\n lum_dust = lum_dust * 1.E9\n # Convert specific luminosity to flux [Jy] (note: no (1+z) correction)\n flx_dust = ((lum_dust / (4. * pi * dl)) / dl)\n flx_dust = flx_dust * (wvl_o * wvl_o) / cnsts['c']\n flx_dust = flx_dust / 1.E-26\n\n # Extract un/atteanuated stellar SED model [W nm^-1] --> [Jy]\n # Unattenuated\n index_1 = [i for i, item in enumerate(cols.names)\n if item == 'stellar.old']\n index_1 = index_1[0]\n index_2 = [i for i, item in enumerate(cols.names)\n if item == 'stellar.young']\n index_2 = index_2[0]\n lum_stellar_unatt = (np.array([row[index_1] for row in hdulist_data])\n + np.array([row[index_2] for row in hdulist_data]))\n # Attenuated\n index_3 = [i for i, item in enumerate(cols.names)\n if item == 'attenuation.stellar.old']\n index_3 = index_3[0]\n index_4 = [i for i, item in enumerate(cols.names)\n if item == 'attenuation.stellar.young']\n index_4 = index_4[0]\n lum_stellar_att = (np.array([row[index_1] for row in hdulist_data])\n + np.array([row[index_3] for row in hdulist_data])\n + np.array([row[index_2] for row in hdulist_data])\n + np.array([row[index_4] for row in hdulist_data]))\n # Convert specific luminosities to [W m^-1]\n lum_stellar_unatt = lum_stellar_unatt * 1.E9\n lum_stellar_att = lum_stellar_att * 1.E9\n # Convert specific luminosity to flux [Jy] (note: no (1+z) correction)\n # Unattenuated\n flx_stellar_unatt = ((lum_stellar_unatt / (4. * pi * dl)) / dl)\n flx_stellar_unatt = flx_stellar_unatt * (wvl_o * wvl_o) / cnsts['c']\n flx_stellar_unatt = flx_stellar_unatt / 1.E-26\n # Attenuated\n flx_stellar_att = ((lum_stellar_att / (4. * pi * dl)) / dl)\n flx_stellar_att = flx_stellar_att * (wvl_o * wvl_o) / cnsts['c']\n flx_stellar_att = flx_stellar_att / 1.E-26\n\n # Extract Fritz2006 AGN SED model [W nm^-1] --> [Jy]\n index = [i for i, item in enumerate(cols.names) if 'agn.fritz2006' in item]\n index = index[0]\n lum_agn = np.array([row[index] for row in hdulist_data])\n # Convert specific luminosity to [W m^-1]\n lum_agn = lum_agn * 1.E9\n # Convert specific luminosity to flux [Jy] (note: no (1+z) correction)\n flx_agn = ((lum_agn / (4. * pi * dl)) / dl)\n flx_agn = flx_agn * (wvl_o * wvl_o) / cnsts['c']\n flx_agn = flx_agn / 1.E-26\n\n # Extract non-thermal radio SED model [W nm^-1] --> [Jy]\n index = [i for i, item in enumerate(cols.names)\n if item == 'radio_nonthermal']\n if len(index) > 0:\n index = index[0]\n L_radio = np.array([row[index] for row in hdulist_data])\n # Convert specific luminosity to [W m^-1]\n L_radio = L_radio * 1.E9\n # Convert specific luminosity to flux [Jy] (note: no (1+z) correction)\n flx_radio = ((L_radio / (4. * pi * dl)) / dl)\n flx_radio = flx_radio * (wvl_o * wvl_o) / cnsts['c']\n flx_radio = flx_radio / 1.E-26\n else:\n flx_radio = frq_o*0.\n\n # Close opened fits files\n hdulist.close()\n\n return {'z': z, 'frq_o': frq_o, 'wvl_o': wvl_o, 'flx': flx,\n 'flx_dust': flx_dust, 'flx_agn': flx_agn, 'flx_radio': flx_radio,\n 'flx_stellar_unatt': flx_stellar_unatt,\n 'flx_stellar_att': flx_stellar_att}\n except IOError:\n print >> sys.stderr, \"Exception: %s\" % str(e)\n sys.exit(1)", "def construct_get_file_by_id(file_id):\n query_template = Template(\"\"\"\n PREFIX mu: <http://mu.semte.ch/vocabularies/core/>\n PREFIX nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>\n PREFIX nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#>\n SELECT (?phys_file AS ?uri)\n WHERE {\n GRAPH $graph {\n ?virt_file a nfo:FileDataObject ;\n mu:uuid $uuid .\n ?phys_file a nfo:FileDataObject ;\n nie:dataSource ?virt_file .\n }\n }\n LIMIT 1\n \"\"\")\n return query_template.substitute(\n graph=sparql_escape_uri(MU_APPLICATION_GRAPH or\"http://mu.semte.ch/application\"),\n uuid=sparql_escape_string(file_id))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the orients file for a given id supercell id.
def get_orients(self,id): lines = self.mfp.get_orients(id) return lines
[ "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def path(self, id):\n return self._inode_to_path.get(id)", "def id_to_index(self, cell_id):\n\n # TODO: Need __getitem__\n raise NotImplementedError", "def get_volume(self, id):\n # If the id is actually a path\n if exists(id):\n with open(id) as file:\n size = os.lseek(file.fileno(), 0, os.SEEK_END)\n return {'path': id, 'size': size}\n return self.volume.get(id)", "def get_orientations(self, int32 dim, codim=None):\n if codim is not None:\n dim = self.tdim - codim\n\n if dim == 1:\n return self.edge_oris\n\n elif dim == 2:\n return self.face_oris\n\n else:\n raise ValueError('only edges or faces have orientations! (%d)'\n % dim)", "def import_super():\r\n super_cat = loadtxt('super.csv', delimiter=',', skiprows=1, usecols=[0, 1])\r\n id_coords = []\r\n for obj_id in range(int(super_cat.shape[0])):\r\n obj = super_cat[obj_id]\r\n id_coords.append((obj_id+1, obj[0], obj[1]))\r\n return id_coords", "def _get_data_file_from_id(self, a_dir, a_id):\r\n data_p = self.DATA_FNAME % (a_dir, a_id)\r\n \r\n # check if encrypted and compressed or not\r\n if os.path.exists('%s.crypt.gz' % (data_p)):\r\n data_fd = gzip.open('%s.crypt.gz' % (data_p), 'r')\r\n elif os.path.exists('%s.gz' % (data_p)):\r\n data_fd = gzip.open('%s.gz' % (data_p), 'r')\r\n elif os.path.exists('%s.crypt' % (data_p)):\r\n data_fd = open('%s.crypt' % (data_p), 'r')\r\n else:\r\n data_fd = open(data_p)\r\n \r\n return data_fd", "def get_one_office(id):\n res = q.get_one_office(id)\n return res", "def get_coord_file(cls, modelid):\n if not PDBMapSwiss._modelid2info:\n raise Exception(\"PDBMapSwiss.load_swiss_INDEX_JSON must be called before using this method.\")\n d = PDBMapSwiss._modelid2info[modelid]\n uniprot_directories = d['unp']\n dash_or_end = uniprot_directories.rfind('-')\n # In the SwissModel filename system\n # The Uniprot Isoform is dropped to locate the directory\n # However,the coordinate_id quasi-guid could vary per isoform\n if (dash_or_end == -1):\n dash_or_end = len(uniprot_directories)\n return \"%s/%s/%s/%s/swissmodel/%d_%d_%s_%s.pdb\" % (\n PDBMapSwiss.swiss_dir,\n d['unp'][0:2],\n d['unp'][2:4],\n d['unp'][4:dash_or_end],\n # d['unp'][4:dash_or_end],\n int(d['start']),\n int(d['end']),\n d['template'],\n d['coordinate_id'] #\n )", "def ReadIDs(self,path):\r\n # todo Odczytanie informacji z nazwy pliku: 4-znakowe ID stacji, dzień roku, numer sekwencji (lub numer godziny/minut - co 15)\r\n self.fid = os.path.splitext(os.path.basename(path))[0]\r\n self.sid = os.path.splitext(os.path.basename(path))[0][:4]", "def get_orientation_idx(self):\n return self.orientation_idx", "def get_file_by_id(id):\n return my_query(construct_get_file_by_id(id))", "def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file", "def _cell_id_to_row_col(self, cell_id):\n row = (cell_id // self.n) + 1\n col = (cell_id % self.n) + 1\n return row, col", "def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)", "def get_office_by_id(self, office_id):\n\n self.curr.execute(\"\"\" SELECT * FROM offices WHERE office_id={}\"\"\".format(office_id))\n office = self.curr.fetchone()\n self.conn.commit()\n self.curr.close()\n return json.dumps(office, default=str)", "def get_i_path(self, name):\n if name is None:\n return self.ivec_path\n return os.path.join(self.path, 'ivec_%s' % name)", "def orientation(self):\n directions = self._directions_of_edges()[0]\n orientation = []\n for C in self.pd_code():\n if C[0] == C[1] or C[2] == C[3]:\n orientation.append(-1)\n elif C[1] == C[2] or C[0] == C[3]:\n orientation.append(1)\n elif directions[C[1]] == C:\n orientation.append(-1)\n else:\n orientation.append(1)\n return orientation", "def _get_xl_orientation_parameterisation(self, experiment_id):\n\n param_set = self._exp_to_param[experiment_id]\n xl_op = None\n if param_set.xl_ori_param is not None:\n xl_op = self._xl_orientation_parameterisations[param_set.xl_ori_param]\n\n return xl_op" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
替换传入的logger中的所有file_handler的stream。 替换方法为: 将stream使用的文件如filename, 更改为 filename.进程名 目的: 避免多个进程写同一个文件导致错误,已知错误有:切日志异常、日志会丢失、日志会混乱等
def trans_logger(logger): if not isinstance(logger, logging.Logger): return for handler in logger.handlers: if isinstance(handler, logging.FileHandler): handler.baseFilename = handler.baseFilename + '.' + multiprocessing.current_process().name old_stream = handler.stream if old_stream: try: old_stream.flush() finally: if hasattr(old_stream, "close"): old_stream.close() handler.stream = handler._open()
[ "def processTempLog(file_name):", "def logger_file(self, value):\n self.__logger_file = value\n if self.__logger_file:\n # If set logging file,\n # then add file handler and remove stream handler.\n self.logger_file_handler = logging.FileHandler(self.__logger_file)\n self.logger_file_handler.setFormatter(self.logger_formatter)\n for _, logger in iteritems(self.logger):\n logger.addHandler(self.logger_file_handler)\n if self.logger_stream_handler:\n logger.removeHandler(self.logger_stream_handler)\n else:\n # If not set logging file,\n # then add stream handler and remove file handler.\n self.logger_stream_handler = logging.StreamHandler()\n self.logger_stream_handler.setFormatter(self.logger_formatter)\n for _, logger in iteritems(self.logger):\n logger.addHandler(self.logger_stream_handler)\n if self.logger_file_handler:\n logger.removeHandler(self.logger_file_handler)", "def change_logfile(logname=None):\n global LOG_FILENAME, handler, formatter, dblogger\n basename = 'dbprocessing_{0}'.format(logname if logname else 'log')\n old_filename = LOG_FILENAME\n LOG_FILENAME = os.path.expanduser(os.path.join(log_dir, '{0}.log.{1}'.format(\n basename, utctoday)))\n dblogger.info(\"Logging file switched from {0} to {1}\".format(old_filename, LOG_FILENAME))\n new_handler = logging.handlers.TimedRotatingFileHandler(\n LOG_FILENAME, when='midnight', interval=1, backupCount=0, # keep them all\n utc=True, encoding='ascii')\n new_handler.setFormatter(formatter)\n dblogger.removeHandler(handler)\n handler.close()\n dblogger.addHandler(new_handler)\n handler = new_handler\n dblogger.info(\"Switching logging file from {0} to {1}\".format(old_filename, LOG_FILENAME))", "def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()", "def disable_logging_to_file() -> None:\n global logging_to_file_handler\n if logging_to_file_handler: # pragma: no cover\n logging_to_file_handler.close()\n logging.getLogger().removeHandler(logging_to_file_handler)\n logging_to_file_handler = None", "def logger_file(self, value):\n self.__logger_file = value\n if self.__logger_file:\n # If set logging file,\n # then add file handler and remove stream handler.\n self.logger_file_handler = logging.FileHandler(self.__logger_file)\n self.logger_file_handler.setFormatter(self.logger_formatter)\n for _, logger in self.logger.items():\n logger.addHandler(self.logger_file_handler)", "def SetLoggingFile(log_file):\n global logger\n new_logger = logging.getLogger('dragon_filehandler')\n new_logger.setLevel(logger.level)\n file_handler = logging.FileHandler(log_file, mode=\"w\", encoding=\"UTF-8\")\n new_logger.addHandler(file_handler)\n logger = new_logger", "def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config", "def set_file_handler(self):\n filename = (\n app_dir(self.app_name)\n / \"logs\"\n / f\"{datetime.now().strftime('%Y%m%d_%H-%M-%S')}.log\"\n )\n ch = logging.FileHandler(str(filename))\n ch.setLevel(logging.INFO)\n\n # create and add formatter to handle\n formatter = logging.Formatter(\n fmt=\"%(asctime)s [%(levelname)s]: %(message)s\", datefmt=\"%Y/%m/%d %H:%M:%S\"\n )\n ch.setFormatter(formatter)\n\n # add ch to logger\n self.logger.addHandler(ch)", "def create_file_logger(file_name: str, stream=sys.stdout, level=logging.INFO) -> logging.Logger:\n # https://stackoverflow.com/questions/15727420/using-python-logging-in-multiple-modules\n logging.basicConfig(stream=stream, level=level)\n file_name_stripped = file_name.replace('__', ' ')\n return logging.getLogger(file_name_stripped)", "def add_file_handler_to_root(file_name):\n formatter = logging.Formatter('%(asctime)-15s:' + logging.BASIC_FORMAT)\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n root_logger = logging.getLogger()\n root_logger.addHandler(file_handler)", "def update_logger_handlers(\n log_config_file: str,\n job_out: typing.Optional[str] = None,\n job_err: typing.Optional[str] = None,\n) -> None:\n if os.path.exists(log_config_file):\n conf = __read_log_config_file(log_config_file)\n if job_err:\n handler = \"error_worker_file_handler\"\n if handler in conf[\"handlers\"]:\n conf[\"handlers\"][handler][\"filename\"] = job_err\n if job_out:\n handler = \"info_worker_file_handler\"\n if handler in conf[\"handlers\"]:\n conf[\"handlers\"][handler][\"filename\"] = job_out\n handler = \"debug_worker_file_handler\"\n if handler in conf[\"handlers\"]:\n conf[\"handlers\"][handler][\"filename\"] = job_out\n CONFIG_FUNC(conf)\n else:\n logging.basicConfig(level=logging.INFO) # NOSONAR", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def rename_log_file(filename, fb_meta):\n # original log file name - all log statements\n log_file = f'{logoutputpath}{\"flowsa.log\"}'\n # generate new log name\n new_log_name = (f'{logoutputpath}{filename}_v'\n f'{fb_meta.tool_version}'\n f'{\"_\" + fb_meta.git_hash if fb_meta.git_hash else \"\"}'\n f'.log')\n # create log directory if missing\n create_paths_if_missing(logoutputpath)\n # rename the standard log file name (os.rename throws error if file\n # already exists)\n shutil.copy(log_file, new_log_name)\n # original log file name - validation\n log_file = f'{logoutputpath}{\"validation_flowsa.log\"}'\n # generate new log name\n new_log_name = (f'{logoutputpath}{filename}_v'\n f'{fb_meta.tool_version}'\n f'{\"_\" + fb_meta.git_hash if fb_meta.git_hash else \"\"}'\n f'_validation.log')\n # create log directory if missing\n create_paths_if_missing(logoutputpath)\n # rename the standard log file name (os.rename throws error if file\n # already exists)\n shutil.copy(log_file, new_log_name)", "def add_file_handler_to_logger(logger):\n # This makes \n if AppState().log_file is None:\n return\n\n # Create file handler which logs even DEBUG messages.\n fh = logging.FileHandler(AppState().log_file)\n\n # Set logging level for this file.\n fh.setLevel(logging.DEBUG)\n\n # Create formatter and add it to the handlers.\n formatter = logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n fh.setFormatter(formatter)\n\n # Add the handler to the logger.\n logger.addHandler(fh)", "def remove_file_handler(self):\n self._log.removeHandler(self.fileHandler)\n self._log.info('Removed file handler')", "def start_log_file(self, file_name):\n assert type(self.log).__name__ == 'multi_out'\n log_file = open(file_name, \"w\")\n self.log.replace_stringio(\n old_label=\"log_buffer\",\n new_label=\"log\",\n new_file_object=log_file)\n return self.log", "def _rotate_log(self, handler):\n handler[\"logger\"].close()\n handler[\"logger\"] = self._get_logger(handler)", "def disable_stream_handler(func: Callable):\n\n def _wrapper(*args, **kwargs):\n for logger in LOGGER_TABLE.values():\n logger.removeHandler(STREAM_HANDLER)\n ret = func(*args, **kwargs)\n for logger in LOGGER_TABLE.values():\n logger.addHandler(STREAM_HANDLER)\n return ret\n\n return _wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the list of films in which a character appears
def getFilms(character): ret = [] for film in character.get('films'): number = int(film.rstrip('/').rpartition('/')[2]) if number not in cache: response = requests.get(film) response = response.json() title = response.get('title') cache[number] = title ret.append(cache.get(number)) return ret
[ "def castFilmography (movies, minAppearances):\n actors = {}\n for (k,v) in movies.items():\n for a in v[2:7]:\n actors[a] = actors.get(a, []) + [k]\n return sorted([ [k] + v for (k,v) in actors.items() if len(v) >= minAppearances ])", "def get_films():\n\n # TODO: Get films from DB\n\n return {\n \"count\": 10,\n \"max\": 100,\n \"films\": {\n \"id\": 1,\n \"name\": \"Marriage Story\",\n \"director\": \"\",\n \"movie_db_id\": 492188,\n \"image_path\": \"blah.png\",\n \"released\": \"2019-11-06\",\n \"genres\": [\"drama\"],\n \"imdb\": {\n \"link\": \"https://www.imdb.com/title/tt7653254/\",\n \"score\": 7.9\n },\n \"rotten_tomatoes\": {\n \"link\": \"https://www.rottentomatoes.com/m/marriage_story_2019\",\n \"score\": 94\n },\n \"watched\": None\n }\n }", "def search_film_by_title(self, titlu):\r\n filmList = []\r\n for film in self.get_all():\r\n if titlu in film.getTitlu():\r\n filmList.append(film)\r\n return filmList", "def filter_file_on_chars(smiles_fname, voc_fname):\n smiles = []\n with open(smiles_fname, 'r') as f:\n for line in f:\n smiles.append(line.split()[0])\n print(smiles[:10])\n chars = []\n with open(voc_fname, 'r') as f:\n for line in f:\n chars.append(line.split()[0])\n print(chars)\n valid_smiles = filter_on_chars(smiles, chars)\n with open(smiles_fname + \"_filtered\", 'w') as f:\n for smiles in valid_smiles:\n f.write(smiles + \"\\n\")", "def get_three_films():\n films = []\n three_films = list()\n my_file = open('film_names.txt', 'r')\n for line in my_file:\n line = line[:-2]\n films.append(line)\n for _ in range(3):\n result = random.choice(films)\n three_films.append(result)\n return three_films", "def get_frequent_vocabulary(cpts, vocabulary, frequency=3):\n captions_flattened = [cpt for image_captions in cpts.values() for cpt in image_captions]\n all_captions = ' '.join(captions_flattened)\n frequent_vocabulary = []\n for i,v in enumerate(vocabulary):\n if all_captions.count(v) >= frequency: frequent_vocabulary.append(v)\n return frequent_vocabulary", "def get_common_movies(your_list: List) -> Union[str, List]:\n\n if len(your_list) in [0, 1]:\n result_nochar = \"| RESULT | -> One or both characters weren't found on the database.\"\n return result_nochar\n\n elif len(your_list) > 2:\n raise SystemExit(\"ERROR: There are more than two sublists on list.\")\n\n else:\n films_char_1 = your_list[0]\n films_char_2 = your_list[1]\n set_results = sorted(list(set(films_char_1) & set(films_char_2)))\n\n if len(set_results) == 0:\n result_nomatch = \"| RESULT | -> No matches were found.\"\n return result_nomatch\n else:\n return set_results", "def title_by_actor(matches: List[str]) -> List[str]:\n \n def compare(movie):\n result = []\n for n in get_actors(movie):\n if matches[0] == n:\n result = True\n return result\n\n r = list(map(get_title,(filter(compare,movie_db))))\n\n return r", "def __generate_character_appearances_dict(self, films) -> dict:\n appearances_counter = {}\n for film in films:\n for character_link in film[\"characters\"]:\n count = appearances_counter.get(character_link, 0)\n appearances_counter.update({character_link: count + 1})\n return self.__sort_appearances_by_count(appearances_counter)", "def get_cards(self):\n return [Flashcard.from_word(word) for word in self.get_words()]", "def search_character(realm_list, PATH):\r\n dict_char = {}\r\n for realm in realm_list:\r\n char_list = os.listdir(PATH + realm)\r\n dict_char[realm] = char_list\r\n return dict_char, realm_list", "def words_apple(hist):\n x = filter(lambda hist: 'apple' in hist, hist)\n return list(x)", "def get_cast_filmographies_as_string(query):\n cast_filmographies = get_cast_filmographies(query)\n s = ''\n width = 40\n for cast_entry in cast_filmographies:\n s += 4*u'\\n' + u'{:>{}} -- {}\\n\\n'.format(\n cast_entry['role']['character'],\n width,\n cast_entry['role']['name'])\n for production in cast_entry['filmography'][:5]:\n if production['media_type'] == 'movie':\n s += u'{:>{}} in \"{}\"\\n'.format(\n production['character'],\n width,\n production['title'])\n elif production['character'] != '':\n s += u'{:>{}} in \"{}\"\\n'.format(\n production['character'],\n width,\n production['name'])\n else:\n s += (width-8)*u' ' + u'appeared in \"{}\"\\n'.format(\n production['name'])\n return s", "def actors_by_title(matches: List[str]) -> List[str]:\n answer = []\n result =(filter(lambda movie:\n matches[0] == get_title(movie), movie_db))\n for n in result:\n answer = answer + get_actors(n)\n return answer", "def get_documents():\n documents = []\n for category in movie_reviews.categories():\n for fileid in movie_reviews.fileids(category):\n documents.append((list(movie_reviews.words(fileid)), category))\n \n return documents", "def get_imdb_list():\n list_file = 'imdb.txt'\n name_column = 26\n f = open(list_file, 'r')\n film_list = []\n pos = 0\n\n for line in f:\n pos += 1\n words = line.split()\n name = line[name_column:-1]\n # could be problematic is there are brackets in the film name\n year = name[name.find('(') + 1:name.find(')')]\n name = name.replace('(' + year + ')', '')\n film = {\n 'pos': pos,\n 'score': Decimal(words[2]),\n 'name': name.strip(),\n 'year': year\n }\n film_list.append(film)\n f.close()\n return film_list", "def search_movie_in_filmography(criteria, filmography):\n for index in range(0, len(filmography)):\n if filmography[index].__dict__[criteria.keys()[0]] == criteria[criteria.keys()[0]]:\n return filmography[index]", "def collect_film_list(self):\n res = requests.get(self.URL)\n doc = lxml.html.fromstring(res.content)\n for tablerow in doc.xpath(\".//table[1]//tr\"):\n try:\n filmname = tablerow.xpath(\"./td[1]/i//a/@href\")[0].split(\"/\")[-1] # get the exact name by the href url (\"/wiki/film_name\")\n year = tablerow.xpath(\"./td[2]/a/text()\")[0]\n if int(year) >= 2010:\n self.film_list.append(filmname)\n except Exception as e:\n pass", "def fliter_genres():\n args = parse_arguments()\n\n song_list = []\n\n scores = get_scores()\n msd_id_list = [i for i in scores.keys()]\n # msd_id_list = list(get_scores())[:50]\n\n # Iterate through song IDs\n for msd_id in msd_id_list:\n # Read metadata for song\n metadata = h5py.File(msd_id_to_h5(msd_id), \"r\")\n\n # Iterate through top-5 \"artist_terms\" (i.e. genre tags) and check if they contain keywords.\n has_key = False\n for term in [str(i, 'utf-8') for i in metadata['metadata']['artist_terms'][:5]]:\n if np.any([i in term for i in args.keyword_list]):\n has_key = True\n\n if has_key:\n song_list.append(msd_id)\n\n return song_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients() when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times. We append all the embeddings gradients to a list.
def _register_embedding_gradient_hooks(self, embedding_gradients): def hook_layers(module, grad_in, grad_out): embedding_gradients.append(grad_out[0]) backward_hooks = [] embedding_layer = self.get_embeddings_layer() backward_hooks.append(embedding_layer.register_backward_hook(hook_layers)) return backward_hooks
[ "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first call, empirically, autograd\n # fires it at the end for this param, which makes sense.\n p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.\n assert p_tmp.grad_fn is not None\n grad_acc = p_tmp.grad_fn.next_functions[0][\n 0] # Gets its GradAccumulation object.\n handle = grad_acc.register_hook(\n functools.partial(self._post_backward_hook, p))\n p._shard_bwd_hook = (grad_acc, handle)", "def backward_hook(grad_out):\n target = grad_out.detach()\n assert isinstance(target, torch.Tensor)\n av_grad = av_norm(target)\n av_log_grad = av_norm(target, average_logs=True)\n\n # Add to our running list (gradients must be prepended, since\n # they're added in reverse order)\n self.grad.appendleft(av_grad)\n self.log_grad.appendleft(av_log_grad)", "def _register_pre_backward_hooks(self, outputs: Any) -> Any:\n if not torch.is_grad_enabled():\n return outputs # don't register hooks if grad isn't enabled\n\n if self._is_root:\n # This actually means that only root instance has\n # _post_backward_callback_queued defined. Accidentally accessing this field\n # will assert on all other instances, giving us a nice bug checker.\n self._post_backward_callback_queued = False\n\n def _pre_backward_hook(t_grad: torch.Tensor) -> None:\n # try to queue final backward callback only once for root, so\n # that final backward callback is attached to the outer most\n # backward graph task and called after all the backward\n # calls are completed.\n if self._is_root:\n self._queue_wait_for_post_backward()\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n # All-gather full parameters or switching to the full params.\n # Note, ``self._rebuild_full_params`` is idempotent. So in case it is called\n # unnecessarily, it doesn't incur much overhead.\n if self.reshard_after_forward:\n dependency_tensors = []\n if self.optimization_barrier_in_backward:\n # Ensure that backward pass ops of feature gradients, parameter\n # gradient and sharding, and full-param freeing (which are usually\n # performed in previous modules and are registered to\n # self._backward_opt_barrier_tensors in _grad_opt_barrier_hook,\n # _pre_backward_hook, and _post_backward_hook) are finished before\n # rebuilding the full params of this FSDP module.\n dependency_tensors = self._backward_opt_barrier_tensors\n self._rebuild_full_params(\n dependency_tensors=dependency_tensors,\n apply_opt_barrier=self.optimization_barrier_in_backward)\n self._clear_backward_opt_barrier_lists()\n\n # Only run the following once per iteration (i.e. in case\n # it is multiple outputs or multiple forward passes).\n if not self._pre_backward_hook_has_run:\n self._pre_backward_hook_has_run = True\n # Start of a backward pass for the first time in an iteration.\n self.assert_state([TrainingState.IDLE, TrainingState.BACKWARD_PRE])\n # Check p.grad to make sure that it is in the right shape, device, etc.\n for p, p_shard in zip(self.full_params, self.sharded_params):\n if p.grad is not None:\n assert p.grad.device == p_shard.device\n assert p.grad.size() == p_shard._orig_size\n\n # Transition to BACKWARD_PRE state if currently IDLE. We can transition from BACKWARD_POST\n # to IDLE when FSDP is within activation checkpointing and called multiple times, due to the\n # extra forward pass for re-computation.\n if self.training_state == TrainingState.IDLE:\n self.training_state = TrainingState.BACKWARD_PRE\n self.assert_state(\n [TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST])\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n self.optimization_barrier_op([t_grad])\n t_grad = t_grad.view(t_grad.size()) # a view with barrier applied\n return t_grad\n\n _registered = 0\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n # We don't register the pre_backward hook on the same tensor that has been\n # returned from an inner FSDP, unless it is the first one.\n nonlocal _registered\n assert self._output_pre_backward_hook_registered is not None\n if t.requires_grad and (_registered == 0 or id(t)\n not in self._output_pre_backward_hook_registered):\n t.register_hook(_pre_backward_hook)\n self._output_pre_backward_hook_registered.add(id(t))\n _registered += 1\n return t\n\n # Attach hooks to Tensor outputs.\n outputs = apply_to_tensors(_register_hook, outputs)\n\n return outputs", "def extend_backprops(storage: ModuleDict):\n\n assert global_settings.hook_handles, \"No hooks have been registered.\"\n hook_called = [False]\n\n def hook(layer: nn.Module, _input, output):\n storage.setdefault(layer, []).extend([output[0].detach()])\n hook_called[0] = True\n\n global_settings.backward_hooks.append(hook)\n yield\n assert hook_called[0], \"Backward hook was never called.\"\n global_settings.backward_hooks.pop()", "def _register_pre_backward_hooks(\n state: _State,\n outputs: Any,\n handles: List[FlatParamHandle],\n) -> None:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n if state._is_root:\n state._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward, we\n # conservatively assume that they will be used in the backward\n state._needs_pre_backward_unshard[handles_key] = False\n state._ran_pre_backward_hook[handles_key] = False\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, state, handles))\n state._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)", "def on_backward(\n self,\n trainer: \"GradientDescentTrainer\",\n batch_outputs: Dict[str, torch.Tensor],\n backward_called: bool,\n **kwargs,\n ) -> bool:\n return False", "def backward(self):\n # print(f'Backward of op {self.op}.')\n\n # extract the gradients over the outputs (these have been computed already)\n goutputs_raw = [output.grad for output in self.outputs]\n\n # compute the gradients over the inputs\n ginputs_raw = self.op.backward(self.context, *goutputs_raw)\n\n if not type(ginputs_raw) == tuple:\n ginputs_raw = (ginputs_raw,)\n\n # store the computed gradients in the input nodes\n for node, grad in zip(self.inputs, ginputs_raw):\n assert node.grad.shape == grad.shape, f'node shape is {node.size()} but grad shape is {grad.shape}'\n\n node.grad += grad\n # -- Note that we add the gradient to the one already there. This means that for TensorNodes that are the\n # input to two ops, we are automatically implementing the multivariate chain rule. Every op adds its part\n # of the gradient to the .grad part of its inputs.\n\n for node in self.inputs:\n node.backward(start=False)", "def backward(ctx, G):\n backend = ctx.backend\n aliases = ctx.aliases\n formula = ctx.formula\n signature = ctx.signature\n sum_index = ctx.sum_index\n args = ctx.saved_tensors # Unwrap the saved variables\n\n # number of arguments (including parameters)\n nvars = 0;\n for sig in signature[1:]:\n nvars += 1\n\n # If formula takes 5 variables (numbered from 0 to 4), then the gradient\n # wrt. the output, G, should be given as a 6-th variable (numbered 5),\n # with the same dim-cat as the formula's output.\n eta = \"Var(\" + str(nvars) + \",\" + str(signature[0][0]) + \",\" + str(signature[0][1]) + \")\"\n grads = [] # list of gradients wrt. args;\n arg_ind = 5 # current arg index (4 since backend, ... are in front of the tensors); \n var_ind = 0 # current Variable index;\n\n for sig in signature[1:]: # Run through the actual parameters, given in *args in the forward.\n if not ctx.needs_input_grad[arg_ind]: # If the current gradient is to be discarded immediatly...\n grads.append(None) # Don't waste time computing it.\n else: # Otherwise, the current gradient is really needed by the user:\n # adding new aliases is waaaaay too dangerous if we want to compute\n # second derivatives, etc. So we make explicit references to Var<ind,dim,cat> instead.\n var = \"Var(\" + str(var_ind) + \",\" + str(sig[0]) + \",\" + str(sig[1]) + \")\" # V\n formula_g = \"Grad(\" + formula + \",\" + var + \",\" + eta + \")\" # Grad<F,V,G>\n args_g = args + (G,) # Don't forget the gradient to backprop !\n \n # N.B.: if I understand PyTorch's doc, we should redefine this function every time we use it?\n genconv = GenericSum().apply\n\n if sig[1] == 2: # we're referring to a parameter, so we'll have to sum both wrt 'i' and 'j'\n sumindex_g = 1 # The first sum will be done wrt 'i'\n signature_g = [ [sig[0],1] ] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n # Then, sum 'grad' wrt 'j' :\n # I think that \".sum\"'s backward introduces non-contiguous arrays,\n # and is thus non-compatible with GenericSum:\n # grad = grad.sum(0) \n # We replace it with a \"handmade hack\" :\n grad = Variable(torch.ones(1, grad.shape[0]).type_as(grad.data)) @ grad\n grad = grad.view(-1)\n else :\n # sumindex is \"the index that stays in the end\", not \"the one in the sum\"\n # (It's ambiguous, I know... But it's the convention chosen by Joan, which makes\n # sense if we were to expand our model to 3D tensors or whatever.)\n sumindex_g = sig[1] # The sum will be \"eventually indexed just like V\".\n signature_g = [sig] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n grads.append(grad)\n\n # increment the Variable counts\n arg_ind += 1 ; var_ind += 1 \n\n # Grads wrt. backend, aliases, formula, signature, sum_index, *args\n return (None, None, None, None, None, *grads)", "def _custom_forward_backward(layer_input, next_layer_grad, custom_layer,\n returns_params_grad=False):\n\n custom_layer_output = custom_layer.forward(layer_input)\n custom_layer_input_grad = custom_layer.backward(next_layer_grad)\n if returns_params_grad:\n dX, dW, db = custom_layer_input_grad\n return custom_layer_output, dX, dW, db\n else:\n return custom_layer_output, custom_layer_input_grad", "def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w", "def _register_pre_backward_hooks(\n self,\n outputs: Any,\n handles: List[FlatParamHandle],\n ) -> Any:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n\n if self._is_root:\n self._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward,\n # we conservatively assume that they will be used in the backward\n self._needs_pre_backward_unshard[handles_key] = False\n self._ran_pre_backward_hook[handles_key] = False\n\n def _pre_backward_hook(_handles: List[FlatParamHandle], *unused: Any) -> None:\n \"\"\"Prepares ``_handles`` 's ``FlatParameter`` s for gradient\n computation.\"\"\"\n _handles_key = tuple(_handles) # avoid shadowing `handles_key`\n # Only run the pre-backward hook once per group of handles involved\n # in the same module forward computation\n if _handles_key and self._ran_pre_backward_hook.get(_handles_key, False):\n return\n\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._pre_backward_hook\"\n ):\n # Queue the post-backward callback once for the root FSDP\n # instance to attach it to the outermost backward graph task so\n # that it is called after all backward calls complete\n if self._is_root and not self._post_backward_callback_queued:\n self._queue_wait_for_post_backward()\n elif _handles_key:\n self._assert_state([TrainingState_.IDLE])\n self.training_state = TrainingState_.BACKWARD_PRE\n # Queueing the post-backward callback is the only logic that is\n # not per-handle in the pre-backward hook, so we can return\n # early here if there are no handles.\n if not _handles_key:\n return\n for handle in _handles:\n handle._training_state = HandleTrainingState.BACKWARD_PRE\n\n # If the handles have been prefetched, this `_unshard()` simply\n # switches to using the unsharded parameter\n self._unshard(_handles)\n torch.cuda.current_stream().wait_stream(self._streams[\"all_gather\"])\n\n # Set this to `False` to ensure that a mistargeted prefetch\n # does not actually unshard these handles\n self._needs_pre_backward_unshard[_handles_key] = False\n self._prefetch_handles(_handles_key)\n for handle in _handles:\n handle.prepare_gradient()\n self._ran_pre_backward_hook[_handles_key] = True\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, handles))\n self._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)", "def word_embedding_backward(dout, cache):\n dW = None\n ##############################################################################\n # TODO: Implement the backward pass for word embeddings. #\n # #\n # HINT: Look up the function np.add.at #\n ##############################################################################\n x, W = cache\n # create a copy since add.at changes the matrix\n W_new = W.copy()\n # it is just adding the derivates specified in dout at proper index\n # x gives the indices . dout gives the derivates that needs to be added.\n np.add.at(W_new, x, dout)\n dW = W_new - W\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dW", "def _backward_impl(self):\n if self._grad_func is not None:\n grad = self._grad_func(self._scores, self._labels)\n if not isinstance(grad, nd.NDArray):\n grad = nd.array(grad)\n self._scores_grad = grad\n else:\n raise NotImplementedError()", "def apply_gradients(self,\n grads_and_vars,\n global_step=None,\n name=None,\n decay_var_list=None):\n self._decay_var_list = set(decay_var_list) if decay_var_list else False\n return super(DecoupledWeightDecayExtension, self).apply_gradients(\n grads_and_vars, global_step=global_step, name=name)", "def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):\n pass", "def _register_grad_opt_barrier_hooks(\n self, dependency_tensors: List[torch.Tensor]) -> None:\n if not torch.is_grad_enabled():\n return # don't register hooks if grad isn't enabled\n\n def _grad_opt_barrier_hook(t_grad: torch.Tensor):\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n self.optimization_barrier_op([t_grad])\n return t_grad.view(t_grad.size()) # a view with barrier applied\n\n for t in dependency_tensors:\n if t.requires_grad:\n t.register_hook(_grad_opt_barrier_hook)", "def _backward(self, outputs):\n def _backward_function(*args):\n call_op = outputs[0].op\n return self._rewrite_forward_and_call_backward(call_op, *args)\n return _backward_function, outputs", "def backward_step(optimizer, input_tensor, output_tensor,\n output_tensor_grad, timers):\n\n # NOTE: This code currently can handle at most one skip connection. It\n # needs to be modified slightly to support arbitrary numbers of skip\n # connections.\n args = get_args()\n\n if timers is not None:\n timers('backward-compute', log_level=2).start()\n\n # Retain the grad on the input_tensor.\n unwrap_input_tensor_grad = False\n if not isinstance(input_tensor, list):\n input_tensor = [input_tensor]\n unwrap_input_tensor_grad = True\n for x in input_tensor:\n if x is not None:\n x.retain_grad()\n\n if not isinstance(output_tensor, list):\n output_tensor = [output_tensor]\n if not isinstance(output_tensor_grad, list):\n output_tensor_grad = [output_tensor_grad]\n\n # Backward pass.\n if output_tensor_grad[0] is None:\n output_tensor = optimizer.scale_loss(output_tensor[0])\n custom_backward(output_tensor[0], output_tensor_grad[0])\n\n # Collect the grad of the input_tensor.\n input_tensor_grad = [None]\n if input_tensor is not None:\n input_tensor_grad = []\n for x in input_tensor:\n if x is None:\n input_tensor_grad.append(None)\n else:\n input_tensor_grad.append(x.grad)\n\n # Handle single skip connection if it exists (encoder_hidden_state in\n # model with encoder and decoder).\n if mpu.get_pipeline_model_parallel_world_size() > 1 and \\\n mpu.is_pipeline_stage_after_split() and \\\n args.model_type == ModelType.encoder_and_decoder:\n if output_tensor_grad[1] is not None:\n input_tensor_grad[-1].add_(output_tensor_grad[1])\n if unwrap_input_tensor_grad:\n input_tensor_grad = input_tensor_grad[0]\n\n if timers is not None:\n timers('backward-compute').stop()\n\n return input_tensor_grad", "def create_hooks(self, output_ind):\n @torch.no_grad()\n def forward_hook(rnn_cell, inputs, outputs):\n \"\"\"\n Returns a forward hook to be applied to an RNN cell\n\n These modules have type (input, hx, cx) -> (hy, cy), while the\n hook has type (module, input, output) -> None\n \"\"\"\n # Get the average and average log of the value of interest\n if not isinstance(outputs, tuple):\n assert isinstance(outputs, torch.Tensor)\n assert output_ind == 0\n outputs = (outputs,)\n target = outputs[output_ind].detach()\n assert isinstance(target, torch.Tensor)\n av_act = av_norm(target)\n av_log_act = av_norm(target, average_logs=True)\n\n # Add to our running list\n self.act.append(av_act)\n self.log_act.append(av_log_act)\n\n @torch.no_grad()\n def backward_hook(grad_out):\n \"\"\"\n Returns a backward hook to be applied to an output tensor\n\n This hook has type (grad) -> None\n \"\"\"\n target = grad_out.detach()\n assert isinstance(target, torch.Tensor)\n av_grad = av_norm(target)\n av_log_grad = av_norm(target, average_logs=True)\n\n # Add to our running list (gradients must be prepended, since\n # they're added in reverse order)\n self.grad.appendleft(av_grad)\n self.log_grad.appendleft(av_log_grad)\n\n return forward_hook, backward_hook" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
some tokenizers don't have 'eos_token' and 'bos_token' attributes. Thus, we need some trick to get them.
def special_tokens(self, ): if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None: special_tokens = self.tokenizer.build_inputs_with_special_tokens([]) special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens) self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token return special_tokens
[ "def _generate_tokenizers(self):\n self.input_tokenizer = \"\"\n self.output_tokenizer = \"\"", "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def _construct_tokenizer(self, model):\n return None", "def oov_token(self):\n return None", "def get_tokenizer(errors='replace',\n unk_token='<|endoftext|>',\n bos_token='<|endoftext|>',\n eos_token='<|endoftext|>',\n add_prefix_space=False,\n ckpt_dir=None):\n merges_file = utils.download(ckpt_dir, 'https://www.dropbox.com/s/7f5n1gf348sy1mt/merges.txt?dl=1')\n vocab_file = utils.download(ckpt_dir, 'https://www.dropbox.com/s/s93xkhgcac5nbmn/vocab.json?dl=1')\n\n return GPT2Tokenizer(vocab_file=vocab_file,\n merges_file=merges_file,\n errors=errors,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n add_prefix_space=add_prefix_space)", "def tokens(self):\n tokens = [k for k in self.word2idx.keys()\n if k not in {PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD, SEP_WORD}]\n return tokens", "def _generate_tokenizers(self):\n # This model uses a shared tokenizer for both input/output\n if self.num_encoder_tokens != self.num_decoder_tokens:\n # for shared embedding check input/output vocabs are equal\n raise ValueError\n print(\"Fitting tokenizers\")\n self.input_tokenizer = text.Tokenizer(\n num_words=self.num_encoder_tokens,\n lower=True,\n char_level=False,\n oov_token=\"<UNK>\"\n )\n self.input_tokenizer.fit_on_texts(\n self.encoder_texts + self.decoder_texts\n )\n self.output_tokenizer = self.input_tokenizer", "def get_tokens(self, document):\n raise NotImplementedError()", "def pre_tokenizer(self) -> Dict:\n return self.config.get(\"pre_tokenizer\", {\"tokenizer\": None})", "def tokens(self):\n \n return self._priorized(\"tokens\")", "def tokenizer(doc):\n\ttokens = word_tokenize(doc)\n\treturn tokens", "def get_token_annotations(self):\r\n return self._tokens", "def _get_tokens(self):\n return self._retrieve_tokens()", "def _find_tokens(self) -> ty.List[str]:\n return self.chunk.tokenize()", "def _gettoken(c,chars,knownsigils):\n verbose = False\n token = None\n if (c!= \"end\"):\n toktext = []\n matches = knownsigils[c][0]\n toktype = knownsigils[c][1]\n if verbose: print(\"BEF toktype:\",toktype,\" matches:\",matches)\n while (True):\n c = next(chars, \"end\")\n if verbose: print(\"c->\",c)\n if c in matches:\n toktext.append(c)\n else:\n break\n if verbose: print(\"AFT toktype:\",toktype,\" toktext:\",toktext)\n token = (''.join(toktext), toktype)\n return (c,token)", "def tokenize(self, sent):\n raise NotImplementedError(\"BPETokenizer is not implemented\")", "def init_tokens(self):\n raise NotImplementedError('Abstract method.')", "def tokenize(G, w):\n if not w:\n return [G.EOF]\n\n w = normalize(w)\n w = w[:-1].split(' ')\n \n f = G.symbDict\n\n tokens = []\n for token in w:\n if f.get(token) and f[token].IsTerminal:\n tokens.append(f[token])\n else:\n return \"token no definido: \" + token\n tokens.append(G.EOF)\n return tokens", "def process_token(self, token):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the euclidean distance between each word in the vocab and each word in the source.
def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False): # compute square norm to avoid compute all the directions vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2 src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2 # dot product dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds) # reshape for broadcasting vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1 # compute squared difference sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product if squared: return sq_norm else: # relu + epsilon for numerical stability sq_norm = F.relu(sq_norm) + 1e-20 # take the square root return sq_norm.sqrt()
[ "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def word_distance(self, word1, word2):\n \n if word1 not in self.vocab:\n raise RuntimeError('Word \"{}\" not in vocabulary.'.format(word1))\n if word2 not in self.vocab:\n raise RuntimeError('Word \"{}\" not in vocabulary.'.format(word2))\n \n idx1, idx2 = self.vocab.index(word1), self.vocab.index(word2)\n word_rep1 = self.params.word_embedding_weights[idx1, :]\n word_rep2 = self.params.word_embedding_weights[idx2, :]\n diff = word_rep1 - word_rep2\n return np.sqrt(np.sum(diff ** 2))", "def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def euclidean_distance(doc1, doc2):\n\n distance = 0\n v1, v2 = doc1.vector, doc2.vector\n features = list(set(v1.keys()).union(v2.keys()))\n\n for feature in features:\n distance += pow((v1[feature] - v2[feature]), 2)\n\n return math.sqrt(distance)", "def _l2_distance(queries, embeddings):\n queries = queries.unsqueeze(dim=1).unsqueeze(dim=2) # (q, 1, 1, dim)\n embeddings = embeddings.unsqueeze(dim=0) # (1, b, e, dim)\n\n return ((queries - embeddings) ** 2).sum(dim=3).sqrt() # (q, b, e)", "def _pairwise_cosine_distance(embeddings):\r\n # TODO\r\n\r\n\r\n pass", "def diff(self, word1, word2):\n v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]\n return v / np.linalg.norm(v)", "def euclidean_dist(x, y):\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n if d != y.size(1):\n raise Exception\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n return torch.pow(x - y, 2).sum(2)", "def wordDistance(word1, word2):\n assert len(word1) == len(word2)\n count = 0\n\n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count += 1\n return count", "def sentence_distance(d, words, vecs, s1, s2, sigmasq=1.0):\n pairs = itertools.product(s1,s2)\n numpairs = 0\n distance = 0\n # this for loop is annoyingly long\n for pair in pairs:\n numpairs += 1\n # get the distance\n p1, p2 = pair\n if type(p1) == int:\n i1 = p1\n elif type(p1) == str or type(p1) == unicode:\n try:\n # this is a slow step\n i1 = words.index(p1)\n except ValueError:\n # don't know this word\n pair_dist = 0\n distance += np.exp(-(pair_dist*pair_dist)/sigmasq)\n continue\n else:\n print 'WTF???'\n if type(p2) == int:\n i2 = p2\n elif type(p2) == str or type(p2) == unicode:\n try:\n i2 = words.index(p2)\n except ValueError:\n # don't know this word\n pair_dist = 0\n distance += np.exp(-(pair_dist*pair_dist)/sigmasq)\n continue\n if not d == None:\n # already have pairwise distances\n if len(d.shape) == 2:\n # d is a matrix\n # NOTE d MUST CORRESPOND TO INDEXING OF THE WORDS\n pair_dist = d[i1,i2]\n elif len(d.shape) == 1:\n # d is a distance vector (:()\n n = d.shape[0]\n pair_dist = d[square_to_condensed(i1,i2,n)]\n else:\n # no pairwise distances\n # these vectors should exist becuse we always checked for invalid indices\n v1 = vecs[i1, :]\n v2 = vecs[i2, :]\n # OBSERVE CHOICE OF METRIC\n # this is a slow step\n pair_dist = sps.distance.cosine(v1,v2)\n distance += np.exp(-(pair_dist*pair_dist)/sigmasq)\n return distance/numpairs", "def EuclideanDistance(self, x, c):\n dis = 0.0\n for i in range(0, len(x)):\n dis += (x[i]-c[i]) ** 2\n return dis", "def test_euclidean_distance(self):\n v1 = np.array((1, 2, 3))\n v2 = np.array((1, 1, 1))\n assert self.face_recognizer.euclidean_distance(v1, v2) == 2.23606797749979", "def euclidean(training, test, length):\n\n total = 0\n # normalize(training, test)\n\n for i in range(length):\n # calculate sum for euclidean per line in trainingSet\n\t\ttotal += pow((float(training[i]) - float(test[i])), 2)\n\n return math.sqrt(total)\n # returning sqrt from sum(i) above", "def word_distance(w1, w2):\n\n syl_pairs = itertools.izip_longest(\n w1.syllables,\n w2.syllables,\n fillvalue=EMPTY_SYLLABLE\n )\n score = sum(syllable_distance(s1, s2) for s1, s2 in syl_pairs)\n\n # finger on the scale here.\n # Penalize differences in the first sound more.\n score += (2 if w1.sounds[0] != w2.sounds[0] else 0)\n # Penalize differences in the last sound a bit more\n score += (1 if w1.sounds[-1] != w2.sounds[-1] else 0)\n # Penalize mismatches in syllable counts more.\n score += (2 if len(w1.syllables) != len(w2.syllables) else 0)\n return score", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def _distance_from_weights(self, data):\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[2])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)", "def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))", "def calculate_euclidean_distance(self, *, embeddings, entitiy_to_P_URI, entitiy_to_N_URI):\n\n total_distance_from_attractives = 0\n total_distance_from_repulsives = 0\n\n for index in range(len(entitiy_to_P_URI)):\n index_of_attractive_entitites = np.array(list(entitiy_to_P_URI[index].keys()), dtype=np.int32)\n index_of_repulsive_entitites = np.array(list(entitiy_to_N_URI[index]), dtype=np.int32)\n\n total_distance_from_attractives += np.linalg.norm(\n embeddings[index_of_attractive_entitites] - embeddings[index])\n\n total_distance_from_repulsives += np.linalg.norm(\n embeddings[index_of_repulsive_entitites] - embeddings[index])\n\n print('Distance comparision d(A)/d(R) ', total_distance_from_attractives / total_distance_from_repulsives)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If TASK_USE_PATH is set rely on PATH to look for task binaries. Otherwise ../src/ is used by default.
def task_binary_location(cmd="task"): return binary_location(cmd, TASK_USE_PATH)
[ "def test_task_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')", "def get_celery_path():\n\n return get_executable_path('celery')", "def TaskRelativeName(cls, task):\n if not task: return None\n return os.path.relpath(cls.TaskNormalizedName(task),\n PipelineConfig.Instance().pipeline_base_dir())", "def get_tasks_directory():\n return get_tasks_directory.tasks_directory", "def task(path, **kwargs):\n\n # Get model configuration\n config = None\n if isinstance(path, (list, tuple)) and hasattr(path[0], \"config\"):\n config = path[0].config\n elif isinstance(path, str):\n config = AutoConfig.from_pretrained(path, **kwargs)\n\n # Attempt to resolve task using configuration\n task = None\n if config:\n architecture = config.architectures[0] if config.architectures else None\n if architecture:\n if any(x for x in [\"LMHead\", \"CausalLM\"] if x in architecture):\n task = \"language-generation\"\n elif \"QuestionAnswering\" in architecture:\n task = \"question-answering\"\n elif \"ConditionalGeneration\" in architecture:\n task = \"sequence-sequence\"\n\n return task", "def find_taskfile(self):\n filename = self.cmdline.file\n curdir = self.cmdline.dir\n\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Taskrun search directory: {0}\".format(curdir))\n self.env.errorln(\"Taskrun search filename: {0}\".format(filename))\n self.env.errorln(\"Taskrun walk path: {0}\".format(str(self.cmdline.walk)))\n\n self.taskfile = None\n while True:\n taskfile = os.path.join(curdir, filename)\n if os.path.isfile(taskfile):\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Task file found: {0}\".format(taskfile))\n self.taskfile = taskfile\n return\n\n if not self.cmdline.walk:\n return\n\n (head, _) = os.path.split(curdir)\n if head and head != curdir:\n curdir = head\n else:\n break", "def get_python():\n return path.join(TaskCreator.bin_dir, \"python\")", "def build_task(current_target):\n\n target = current_target[\"current_config\"]\n build_path = target.get(\"dp.build_dir\")\n if not os.path.exists(build_path):\n os.makedirs(build_path)\n builder = _make_builder(\n current_target,\n lambda r: SimpleBuild(r, current_target))\n builder.configure(target.get(\"dp.src_dir\"), build_path)\n builder.build(build_path)\n if \"no_install\" not in target:\n builder.install(build_path, path=target.get(\"install_path\",\n \"install\"))", "def prepare_taskfile(taskfile):\n path = os.path.dirname(taskfile)\n taskmodulename = os.path.splitext(os.path.basename(taskfile))[0]\n logging.info(\"Loading task file %s from %s\", taskmodulename, path)\n fp, pathname, description = imp.find_module(taskmodulename, [path])\n try:\n return imp.load_module(taskmodulename, fp, pathname, description)\n finally:\n if fp: \n fp.close()", "def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )", "def app_tasks(name, path):\n @task(pre=reset_project.pre, name=\"reset_project\")\n def _reset_project(ctx):\n reset_project(ctx, path)\n\n _reset_project.__doc__ = \"Reset Mynewt project files for {}\".format(name)\n\n @task(pre=install_project.pre, name=\"install_project\")\n def _install_project(ctx):\n install_project(ctx, path)\n\n _install_project.__doc__ = \"Install Mynewt project dependencies for {}\".format(name)\n\n @task(pre=build.pre, name=\"build\")\n def _build(ctx, export_path=None, board=None):\n build(ctx, name, path, export_path, board)\n\n _build.__doc__ = \"Build {} for Pylon\".format(name)\n\n @task(pre=run.pre, name=\"run\")\n def _run(ctx, sn=None, board=None): # pylint: disable=C0103\n run(ctx, name, path, sn, board)\n\n _run.__doc__ = \"Flash and run {} on Pylon\".format(name)\n\n @task(pre=debug.pre, name=\"debug\")\n def _debug(ctx, sn=None, port=None, board=None): # pylint: disable=C0103\n debug(ctx, name, path, sn, port, board)\n\n _debug.__doc__ = \"Debug {} on Pylon\".format(name)\n\n return _install_project, _reset_project, _build, _run, _debug", "def bitbake_path_string():\n\n assert(os.environ.get('BUILDDIR', False)), \"BUILDDIR must be set\"\n\n current_dir = os.open(\".\", os.O_RDONLY)\n os.chdir(os.environ['BUILDDIR'])\n\n # See the recipe for details about this call.\n subprocess.check_output([\"bitbake\", \"-c\", \"prepare_recipe_sysroot\", \"mender-test-dependencies\"])\n\n os.fchdir(current_dir)\n\n bb_testing_variables = get_bitbake_variables(\"mender-test-dependencies\")\n\n return bb_testing_variables['PATH'] + \":\" + os.environ['PATH']", "def default_tasks():\n tasks = {'run': run, 'bash': bash}\n for entry_point in pkg_resources.iter_entry_points('jarbas_task'):\n tasks[entry_point.name] = entry_point.load()\n return tasks", "def _get_task(self, path):\n if '/' not in path:\n return self.root\n\n names = path.split('/')[1:]\n task = self.root\n for n in names:\n for t in task.gather_children() + [None]:\n if t is None:\n raise ValueError('No task matching the specified path')\n if t.name == n:\n task = t\n break\n\n return task", "def getTaskModule():\n if pyrafglobals._use_ecl:\n from . import irafecl\n return irafecl\n else:\n return iraftask", "def test_build_base_command_with_no_classpath(self):\n new_config = {\n 'exe_paths': {\n 'java': '/path/to/java',\n },\n }\n\n with self.override_config(new_config):\n self.assertEqual(\n self.tool.build_base_command(),\n [\n '/path/to/java',\n 'a.b.c.Main',\n ])", "def addDep(loader, config):\n\n # Linux-based workloads depend on this task\n loader.addTask({\n 'name': 'build_busybox',\n 'actions': [(buildBusybox, [config])],\n 'targets': [wlutil.getOpt('initramfs-dir') / 'disk' / 'bin' / 'busybox',\n wlutil.getOpt('initramfs-dir') / 'nodisk' / 'bin' / 'busybox'],\n 'file_dep': [wlutil.getOpt('wlutil-dir') / 'busybox-config'],\n 'uptodate': [wlutil.config_changed(wlutil.checkGitStatus(wlutil.getOpt('busybox-dir'))),\n wlutil.config_changed(wlutil.getToolVersions())]\n })\n\n hostInit = []\n # Host-init task always runs because we can't tell if its uptodate and we\n # don't know its inputs/outputs.\n if 'host-init' in config:\n loader.addTask({\n 'name': str(config['host-init']),\n 'actions': [(handleHostInit, [config])],\n })\n hostInit = [str(config['host-init'])]\n\n # Add a rule for the binary\n bin_file_deps = []\n bin_task_deps = [] + hostInit + config['base-deps']\n bin_targets = []\n if 'linux' in config:\n bin_file_deps += config['linux']['config']\n bin_task_deps.append('build_busybox')\n bin_targets.append(config['dwarf'])\n\n if config['use-parent-bin']:\n bin_task_deps.append(str(config['base-bin']))\n\n diskBin = []\n if 'bin' in config:\n if 'dwarf' in config:\n targets = [str(config['bin']), str(config['dwarf'])]\n else:\n targets = [str(config['bin'])]\n\n moddeps = []\n if 'firmware' in config:\n moddeps.append(config['firmware']['source'])\n\n bin_calc_dep_tsks = [\n submoduleDepsTask(moddeps, name=\"_submodule_deps_\"+config['name']),\n ]\n\n if 'linux' in config:\n moddeps.append(config['linux']['source'])\n bin_calc_dep_tsks.append(kmodDepsTask(config, name=\"_kmod_deps_\"+config['name']))\n\n for tsk in bin_calc_dep_tsks:\n loader.addTask(tsk)\n\n loader.addTask({\n 'name': str(config['bin']),\n 'actions': [(makeBin, [config])],\n 'targets': targets,\n 'file_dep': bin_file_deps,\n 'task_dep': bin_task_deps,\n 'calc_dep': [tsk['name'] for tsk in bin_calc_dep_tsks]\n })\n diskBin = [str(config['bin'])]\n\n # Add a rule for the nodisk version if requested\n nodiskBin = []\n if config['nodisk'] and 'bin' in config:\n nodisk_file_deps = bin_file_deps.copy()\n nodisk_task_deps = bin_task_deps.copy()\n if 'img' in config:\n nodisk_file_deps.append(config['img'])\n nodisk_task_deps.append(str(config['img']))\n\n if 'dwarf' in config:\n targets = [str(wlutil.noDiskPath(config['bin'])), str(wlutil.noDiskPath(config['dwarf']))]\n else:\n targets = [str(wlutil.noDiskPath(config['bin']))]\n\n uptodate = []\n if 'firmware' in config:\n uptodate.append(wlutil.config_changed(wlutil.checkGitStatus(config['firmware']['source'])))\n if 'linux' in config:\n uptodate.append(wlutil.config_changed(wlutil.checkGitStatus(config['linux']['source'])))\n\n loader.addTask({\n 'name': str(wlutil.noDiskPath(config['bin'])),\n 'actions': [(makeBin, [config], {'nodisk': True})],\n 'targets': targets,\n 'file_dep': nodisk_file_deps,\n 'task_dep': nodisk_task_deps,\n 'uptodate': uptodate\n })\n nodiskBin = [str(wlutil.noDiskPath(config['bin']))]\n\n # Add a rule for running script after binary is created (i.e. for ext. modules)\n # Similar to 'host-init' always runs if exists\n postBin = []\n post_bin_task_deps = diskBin + nodiskBin # also used to get the bin path\n if 'post-bin' in config:\n loader.addTask({\n 'name': str(config['post-bin']),\n 'actions': [(handlePostBin, [config, post_bin_task_deps[0]])],\n 'task_dep': post_bin_task_deps,\n })\n postBin = [str(config['post-bin'])]\n\n # Add a rule for the image (if any)\n img_file_deps = []\n img_task_deps = [] + hostInit + postBin + config['base-deps']\n img_calc_deps = []\n img_uptodate = []\n if 'img' in config:\n if 'base-img' in config:\n img_file_deps.append(config['base-img'])\n\n if 'files' in config or 'overlay' in config:\n # We delay calculation of files and overlay dependencies to runtime\n # in order to catch any generated inputs\n fdepsTask = fileDepsTask(config['name'], taskDeps=img_task_deps,\n overlay=config.get('overlay'),\n files=config.get('files'))\n img_calc_deps.append(fdepsTask['name'])\n loader.addTask(fdepsTask)\n if 'guest-init' in config:\n img_file_deps.append(config['guest-init'].path)\n img_task_deps.append(str(config['bin']))\n if 'runSpec' in config and config['runSpec'].path is not None:\n img_file_deps.append(config['runSpec'].path)\n if 'cfg-file' in config:\n img_file_deps.append(config['cfg-file'])\n if 'distro' in config:\n img_uptodate += config['builder'].upToDate()\n\n loader.addTask({\n 'name': str(config['img']),\n 'actions': [(makeImage, [config])],\n 'targets': [config['img']],\n 'file_dep': img_file_deps,\n 'task_dep': img_task_deps,\n 'calc_dep': img_calc_deps,\n 'uptodate': img_uptodate\n })", "def import_task_by_ref(task_strref):\n app_label, flow_path = task_strref.split('/')\n flow_path, task_name = flow_path.rsplit('.', 1)\n flow_class = import_string('{}.{}'.format(get_app_package(app_label), flow_path))\n return flow_class._meta.node(task_name)", "def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def pythonpath_init():\n # Get this file's directory path\n my_dir = os.path.dirname(os.path.abspath(__file__))\n\n # Remove some paths from \"sys.path\" to avoid unexpected import resolution.\n\n # For each path in the list\n for path in ['', '.', my_dir]:\n # If the path is in \"sys.path\"\n if path in sys.path:\n # Remove the path from \"sys.path\"\n sys.path.remove(path)\n\n # Add \"src\" directory to \"sys.path\".\n # This is the import resolution we want.\n\n # Get \"src\" directory path\n src_dir = os.path.dirname(my_dir)\n\n # If \"src\" directory path is not in \"sys.path\"\n if src_dir not in sys.path:\n # Add \"src\" directory to \"sys.path\"\n sys.path.insert(0, src_dir)", "def manipulate_paths_like_upstream(_executable, sys_path):\n bin_dir = os.path.dirname(os.path.abspath(_executable))\n root_dir = os.path.dirname(bin_dir)\n lib_dir = os.path.join(root_dir, \"lib\")\n sys_path.insert(0, lib_dir)", "def add_bin_path() -> None:\n os_name = platform.system() # 'Windows' or 'Darwin' or 'Linux'\n bin_path = Path(__file__).parent / \"bin\" / os_name\n sep = \";\" if os_name == \"Windows\" else \":\"\n env_path = f\"{bin_path}{sep}{os.environ['PATH']}\"\n os.environ[\"PATH\"] = env_path", "def bitbake_path_string():\n\n assert(os.environ.get('BUILDDIR', False)), \"BUILDDIR must be set\"\n\n current_dir = os.open(\".\", os.O_RDONLY)\n os.chdir(os.environ['BUILDDIR'])\n\n # See the recipe for details about this call.\n subprocess.check_output([\"bitbake\", \"-c\", \"prepare_recipe_sysroot\", \"mender-test-dependencies\"])\n\n os.fchdir(current_dir)\n\n bb_testing_variables = get_bitbake_variables(\"mender-test-dependencies\")\n\n return bb_testing_variables['PATH'] + \":\" + os.environ['PATH']", "def __init__(self, path=\"\", use_env=True, no_path_recurse=False,\n verbose=False):\n\n Repository.__init__(self)\n self.dirs = []\n self.no_path_recurse = no_path_recurse\n self.modules = None\n self.verbose = verbose\n\n for directory in path.split(os.pathsep):\n self._add_directory(directory)\n\n while use_env:\n use_env = False\n modpath = os.getenv('YANG_MODPATH')\n if modpath is not None:\n for directory in modpath.split(os.pathsep):\n self._add_directory(directory)\n\n home = os.getenv('HOME')\n if home is not None:\n self._add_directory(os.path.join(home, 'yang', 'modules'))\n\n inst = os.getenv('YANG_INSTALL')\n if inst is not None:\n self._add_directory(os.path.join(inst, 'yang', 'modules'))\n break # skip search if install location is indicated\n\n default_install = os.path.join(\n sys.prefix, 'share', 'yang', 'modules')\n if os.path.exists(default_install):\n self._add_directory(default_install)\n break # end search if default location exists\n\n # for some systems, sys.prefix returns `/usr`\n # but the real location is `/usr/local`\n # if the package is installed with pip\n # this information can be easily retrieved\n import pkgutil\n if not pkgutil.find_loader('pip'):\n break # abort search if pip is not installed\n\n # hack below to handle pip 10 internals\n # if someone knows pip and how to fix this, it would be great!\n location = None\n try:\n import pip.locations as locations\n location = locations.distutils_scheme('pyang')\n except:\n try:\n import pip._internal.locations as locations\n location = locations.distutils_scheme('pyang')\n except:\n pass\n if location is not None:\n self._add_directory(\n os.path.join(location['data'], 'share', 'yang', 'modules'))\n\n if verbose:\n sys.stderr.write('# module search path: %s\\n'\n % os.pathsep.join(self.dirs))", "def _set_spawn_exe_path():\n if sys.argv[0].endswith(\".py\"):\n\n def guess_path(package_root):\n # If all we have is a python module path, we'll need to make a guess for\n # the actual executable path.\n if \"bazel-out\" in sys.argv[0] and package_root in sys.argv[0]:\n # Guess the binary path under bazel. For target\n # //tensorflow/python/distribute:input_lib_test_multiworker_gpu, the\n # argv[0] is in the form of\n # /.../tensorflow/python/distribute/input_lib_test.py\n # and the binary is\n # /.../tensorflow/python/distribute/input_lib_test_multiworker_gpu\n package_root_base = sys.argv[0][: sys.argv[0].rfind(package_root)]\n binary = os.environ[\"TEST_TARGET\"][2:].replace(\":\", \"/\", 1)\n possible_path = os.path.join(package_root_base, package_root, binary)\n if os.access(possible_path, os.X_OK):\n return possible_path\n return None\n\n path = (\n guess_path(\"org_tensorflow\")\n or guess_path(\"org_keras\")\n or guess_path(\"org_tensorflow_privacy\")\n )\n if path is not None:\n sys.argv[0] = path\n multiprocessing.get_context().set_executable(sys.argv[0])", "def query_python_path(self, binary=\"python\"):\n if binary not in self.python_paths:\n bin_dir = 'bin'\n if self._is_windows():\n bin_dir = 'Scripts'\n virtualenv_path = self.query_virtualenv_path()\n if virtualenv_path:\n self.python_paths[binary] = os.path.abspath(os.path.join(virtualenv_path, bin_dir, binary))\n else:\n self.python_paths[binary] = self.query_exe(binary)\n return self.python_paths[binary]", "def load_libsrc():\n import sys\n ops_dir = os.path.dirname(os.path.realpath(__file__))\n fst_package = ops_dir + '/../lib_src/fst_pipeline'\n sys.path.append(fst_package)\n return", "def set_bin_folder(bf_user=None, append_to_python_path=True):\n bf_source = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'build')\n bf_home = os.path.expanduser('~')\n name = 'pykeops-{}-{}'.format(version, sys.implementation.cache_tag)\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n name += \"-gpu\" + os.environ['CUDA_VISIBLE_DEVICES'].replace(',', '-')\n\n if bf_user is not None: # user provide an explicit path\n bin_folder = os.path.expanduser(bf_user)\n elif os.path.isdir(bf_source): # assume we are loading from source\n bin_folder = bf_source\n elif os.path.isdir(bf_home): # assume we are using wheel and home is accessible\n bin_folder = os.path.join(bf_home, '.cache', name)\n else:\n import tempfile\n bin_folder = tempfile.mkdtemp(prefix=name)\n\n # Clean path name\n bin_folder = os.path.realpath(bin_folder)\n if not bin_folder.endswith(os.path.sep):\n bin_folder += os.path.sep\n\n # Save the path and append in python path\n if append_to_python_path:\n sys.path.append(bin_folder)\n\n return bin_folder", "def ensure_python_bin_dir_in_path():\n import os, sys\n python_bin_dir = os.path.dirname(sys.executable)\n if python_bin_dir not in os.environ['PATH'].split(':'):\n os.environ['PATH'] = '%s:%s' % (python_bin_dir, os.environ['PATH'])", "def _expand_paths():\n if env.has_key(\"local_install\"):\n if not exists(env.local_install):\n run(\"mkdir -p %s\" % env.local_install)\n with cd(env.local_install):\n with settings(hide('warnings', 'running', 'stdout', 'stderr'),\n warn_only=True):\n result = run(\"pwd\")\n env.local_install = result", "def EnsurePathContainsLLVM(self):\n\n llvm_path = os.path.join(self.chrome_src(), \"third_party\",\n \"llvm-build\", \"Release+Asserts\", \"bin\")\n if self.llvm_path() not in os.environ[\"PATH\"]:\n raise errors.UserInstructions(\n \"Please add:\\n%s\\nto the beginning of $PATH\\nExample: export PATH=%s:$PATH\" %\n (self.llvm_path(), self.llvm_path()))", "def setup_path_arg(args):\n if args['path']:\n os.environ['PATH'] = '{}:{}'.format(args['path'], os.environ['PATH'])", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def test_file_sys_path():\n file_path, _ = os.path.split(\"tests/resources/relative_import.hy\")\n file_relative_path = os.path.realpath(file_path)\n\n output, _ = run_cmd(\"hy tests/resources/relative_import.hy\")\n assert repr(file_relative_path) in output", "def whereis(program):\n for path in os.environ.get('PATH', '').split(':'):\n if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)):\n return os.path.join(path, program)\n return None", "def test_add_srcdirs_to_syspath(root_path: Path) -> None:\n add_srcdirs_to_syspath()\n\n # Test to see if runtime_syspath's 'src' directory in now in sys.path\n src_path: Path = root_path / \"src\"\n src_path_str: str = os.fspath(src_path)\n sys_paths: List[str] = list()\n found_src_path: bool = False\n syspath_member: str\n for syspath_member in sys.path:\n sys_paths.append(syspath_member)\n if src_path_str == syspath_member:\n found_src_path = True\n break\n\n if not found_src_path:\n msg: str = f\"{src_path.as_posix()} is not in:\"\n syspath_mem: str\n for syspath_mem in sorted(sys_paths):\n msg += f\"\\n\\t{Path(syspath_mem).as_posix()}\"\n pytest.fail(msg)", "def _add_dotfiles_bin_path(self):\n\n user_home = os.path.expanduser('~')\n dotfiles_bin_path = os.path.join(user_home, '.dotfiles/bin')\n if dotfiles_bin_path not in self.paths:\n self.paths.append(\n dotfiles_bin_path\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and\n not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n files = [cmd]\n\n seen = set()\n for dir_ in path:\n normdir = os.path.normcase(dir_)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir_, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n cmd_path = os.path.join(path, cmd)\n if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):\n return cmd_path\n\n return None", "def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None", "def find_command(command):\n\n if 'PATH' not in os.environ:\n return None\n\n for path in os.environ['PATH'].split(os.pathsep):\n command_file = os.path.join(path, command)\n if gpodder.ui.win32 and not os.path.exists(command_file):\n for extension in ('.bat', '.exe'):\n cmd = command_file + extension\n if os.path.isfile(cmd):\n command_file = cmd\n break\n if os.path.isfile(command_file) and os.access(command_file, os.X_OK):\n return command_file\n\n return None", "def which(cls, cmd):\n abs_path_cmd = None\n if sys.version_info >= (3, 3):\n abs_path_cmd = shutil.which(cmd)\n else:\n abs_path_cmd = find_executable(cmd)\n return abs_path_cmd", "def find_on_path(command):\n\n if 'PATH' not in os.environ:\n return False\n\n path = os.environ['PATH']\n for element in path.split(os.pathsep):\n if not element:\n continue\n filename = os.path.join(element, command)\n if os.path.isfile(filename) and os.access(filename, os.X_OK):\n return True\n\n return False", "def cmdGetPath(self, cmd, die=True):\n rc, out, err = self.prefab.core.run(\"which %s\" % cmd, die=False, showout=False, profile=True)\n if rc > 0:\n if die:\n raise j.exceptions.RuntimeError(\"Did not find command: %s\" % cmd)\n else:\n return False\n return out.split(\"\\n\")[-1]", "def which(executable):\n # type: (str) -> Optional[str]\n paths = os.environ[\"PATH\"].split(os.pathsep)\n for path in paths:\n full_path = os.path.join(path, executable)\n\n if os.path.exists(full_path) and os.access(full_path, os.X_OK):\n return full_path\n\n return None", "def find_binary_in_path(filename):\n if 'PATH' not in os.environ:\n return None\n for directory in os.environ['PATH'].split(get_variable_separator()):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n return None", "def get_path_fn(path_type):\n if path_type not in _PATH_OPTIONS:\n raise KeyError(\"Path optimizer '{}' not found, valid options are {}.\"\n .format(path_type, set(_PATH_OPTIONS.keys())))\n\n return _PATH_OPTIONS[path_type]", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def try_fspath(path):\n try:\n return os.fspath(path)\n except (AttributeError, TypeError):\n return path", "def abspath(path: Path, must_exist: bool = True, proc=None):\n\n if not proc or not proc.is_remote:\n try:\n rpath = path.resolve()\n except OSError as err:\n raise Error(f\"failed to get real path for '{path}': {err}\")\n if must_exist and not rpath.exists():\n raise Error(f\"path '{rpath}' does not exist\")\n return rpath\n\n if must_exist:\n opt = \"-e\"\n else:\n opt = \"-m\"\n\n stdout, _ = proc.run_verify(f\"readlink {opt} -- {path}\")\n return Path(stdout.strip())", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def _find_operation_from_path(path, operation):\n for op in path:\n if op.type == operation:\n return op\n return None", "def getmode(path, use_stat=False):\n try:\n statresult = (os.stat if use_stat else os.lstat)(path)\n except OSError as err:\n # OSX raises EBADF on some /dev/fdfiles instead of ENOENT\n if err.errno in [errno.ENOENT, errno.EBADF]:\n return None\n raise\n return statresult.st_mode" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter
def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False): amount_currency = False currency_id = False if src_currency and src_currency != company_currency: amount_currency = amount amount = src_currency.with_context(self._context).compute(amount, company_currency) currency_id = src_currency.id debit = amount > 0 and amount or 0.0 credit = amount < 0 and -amount or 0.0 if invoice_currency and invoice_currency != company_currency and not amount_currency: amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency) currency_id = invoice_currency.id return debit, credit, amount_currency, currency_id
[ "def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company) if company else self.env.user.company_id\n if src_currency and src_currency != company_currency:\n amount_currency = amount\n amount = src_currency._convert(amount, company_currency, company, date)\n currency_id = src_currency.id\n debit = amount > 0 and amount or 0.0\n credit = amount < 0 and -amount or 0.0\n return debit, credit, amount_currency, currency_id", "def _compute_amount_fields(self, amount, src_currency, company_currency):\n # TODO - Remove method\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(\n company) if company else self.env.user.company_id\n if src_currency and src_currency != company_currency:\n amount_currency = amount\n amount = src_currency._convert(\n amount, company_currency, company, date)\n currency_id = src_currency.id\n debit = amount if amount > 0 else 0.0\n credit = -amount if amount < 0 else 0.0\n return debit, credit, amount_currency, currency_id", "def getValue(currency=None):", "def getFactor(currency):", "def getUserCurrency():", "def getCurrencies():", "def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):\n if context is None:\n context={}\n currency_obj = self.pool.get('res.currency')\n currency_rate_obj = self.pool.get('res.currency.rate')\n user = self.pool.get('res.users').browse(cr, uid, uid, context=context)\n user_currency_id = user.company_id.currency_id.id\n currency_rate_id = currency_rate_obj.search(\n cr, uid, [\n ('rate', '=', 1),\n '|',\n ('currency_id.company_id', '=', user.company_id.id),\n ('currency_id.company_id', '=', False)\n ], limit=1, context=context)[0]\n base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id\n res = {}\n ctx = context.copy()\n for item in self.browse(cr, uid, ids, context=context):\n ctx['date'] = item.date\n price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)\n price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)\n residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)\n res[item.id] = {\n 'user_currency_price_total': price_total,\n 'user_currency_price_average': price_average,\n 'user_currency_residual': residual,\n }\n return res", "def getCurrencyFactor(id=None):", "def getBaseCurrency():", "def currency_converter(currency_1, currency_2, date):\n response = requests.get(\n 'https://api.exchangeratesapi.io/' + str(date) +\n '?symbols=' + currency_1 + ',' + currency_2)\n output = json.loads(response.content)\n return Decimal(output['rates'][currency_1] / output['rates'][currency_2])", "def deposit_convert_currency(self,deposit_currency,exchange_currency,amount):\n #If currencies are same return amount\n if deposit_currency == exchange_currency:\n return amount\n rate = Get_currency_api.get_crypto_rate(deposit_currency,exchange_currency)['rate']\n converted_amount = float(amount) * float(rate)\n return float(converted_amount)", "def process_conversion(queries, query, src, dst, val, currencies, wf):\n ####################################################################################################\n # Make the currency case insensitive\n ####################################################################################################\n if src:\n src = src.upper()\n if dst:\n dst = dst.upper()\n\n ####################################################################################################\n # Validate the currencies to check if its a currency or not\n ####################################################################################################\n if not validate_currencies(queries, query, src, dst, currencies, wf):\n return 100\n\n rate = search_rate(src, dst, wf)\n\n if rate == -1:\n wf.add_item('No exchange rate found for the especified currencies...', icon=ICON_ERROR)\n return 1\n\n ####################################################################################################\n # Gets the currency info\n ####################################################################################################\n src_currency_info = currencies[src]\n dst_currency_info = currencies[dst]\n\n cur_src_name = get_currency_name(src_currency_info)\n cur_dst_name = get_currency_name(dst_currency_info)\n\n cur_dst_symbol = str.decode(dst_currency_info['Simbol'], encoding='utf-8')\n flag_file_icon = wf.workflowfile('flags/{}'.format(dst_currency_info['Flag']))\n\n if not val:\n val = 1\n\n converted_rate = Decimal(val) * rate\n\n decimal_places = get_decimal_places_to_use(rate)\n\n fmt_converted_rate = format_result(wf, converted_rate, decimal_places)\n\n # module 1 will result in just the decimal part, if the decimal part is 0, then i'll show only 2 decimal places\n if (rate % Decimal(1)).compare(Decimal('0')) == 0:\n fmt_rate = format_result(wf, rate, 2)\n else:\n fmt_rate = format_result(wf, rate, decimal_places)\n\n title = cur_dst_symbol + ' ' + fmt_converted_rate\n sub_title = u'({}) -> ({}) with rate {} for query: {}'.format(cur_src_name, cur_dst_name, fmt_rate,\n ' '.join(query).upper())\n\n wf.add_item(title, sub_title, valid=True, arg=str(converted_rate), icon=flag_file_icon)\n\n ############################################################################################\n # Checks if an update is available, and add it to the output\n ############################################################################################\n if wf.update_available:\n handle_check_update(wf)\n\n return 0", "def convert_currency(date, value, currency1, currency2):\n if isinstance(date, str):\n date = datetime.strptime(date, \"%Y-%m-%d\")\n currencyvals = get_currency_values(date)\n return Decimal(value) * currencyvals[currency1] / currencyvals[currency2]", "def convert_total_cost(value,user_currency): \n print(\"inside convert_total\")\n try:\n c = CurrencyRates()\n result=c.convert('USD',user_currency,value)\n return round(result, 2)\n except Exception as e:\n logger.error(e)\n logger.error(\"Failed while converting currency\")", "def get_amount_in_currency(amount, input_currency, output_currency):\n get_request = 'http://xe.com/currencyconverter/convert/?Amount={}&From={}&To={}'\\\n .format(amount, input_currency, output_currency)\n session = requests.Session()\n search = session.get(get_request)\n soup = BeautifulSoup(search.content, 'html.parser')\n converted_amount = soup.find('span', attrs={'class': 'uccResultAmount'}).next\n return float(converted_amount.replace(',', ''))", "def convert_currency(amount, input_currency, output_currency):\n input_currency, output_currency = get_currency(input_currency, output_currency)\n result = {\n 'input': {\n 'amount': amount,\n 'currency': input_currency\n }\n }\n if output_currency:\n converted_amount = {}\n converted_amount[output_currency] = get_amount_in_currency(amount, input_currency, output_currency)\n result['output'] = converted_amount\n else:\n result['output'] = get_amount_in_all_currencies(amount, input_currency)\n return result", "def test_get_currency_using_get(self):\n pass", "def get_currency_values_if_valid(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.home_currency_input.text == '':\n self.root.ids.home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.foreign_currency_input.text == '':\n self.root.ids.foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.foreign_currency_input.text)\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.foreign_currency_input.text = ''\n self.root.ids.foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.home_currency_input.text)\n valid_home_amount = True\n except ValueError:\n self.root.ids.home_currency_input.text = ''\n self.root.ids.home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n\n return home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True, \\\n home_amount, foreign_amount", "def course(self, currency, sum):\n if currency == \"USD\":\n url = \"https://finance.rambler.ru/currencies/USD/\"\n elif currency == \"EUR\":\n url = \"https://finance.rambler.ru/currencies/EUR/\"\n else:\n return sum * 1000\n site = requests.get(url)\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\n com = float(soup.find(\"div\", attrs={\"class\": \"finance-currency-plate__currency\"}).text.split()[0])\n return com * sum * 1000" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Create a bucket with no max_ttl 2. Upload 1000 docs with exp = 100s 3. Set maxTTL on bucket as 60s 4. After 60s, run expiry pager, get item count, must be 1000 5. After 40s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 60s and get item count, must be 0
def test_set_maxttl_on_existing_bucket(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=60) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 60s, item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Items with larger expiry before maxTTL updation deleted!") self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry set before maxTTL " "updation not deleted after elapsed TTL!") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry not " "deleted after elapsed maxTTL!")
[ "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def fetch_and_delete(bucket, now, size=100):", "def run_expiry_pager(self, val=10):\n for bucket in self.__buckets:\n ClusterOperationHelper.flushctl_set(\n self.__master_node,\n \"exp_pager_stime\",\n val,\n bucket)\n self.__log.info(\"wait for expiry pager to run on all these nodes\")\n time.sleep(val)", "def test_get_object_after_expiry_time(self):\n # the 10s is important, because the get calls can take 3s each\n # some times\n metadata = {'X-Delete-After': '10'}\n self._test_object_expiry(metadata)", "def test_get_object_at_expiry_time(self):\n metadata = {'X-Delete-At': str(int(time.time()) + 10)}\n self._test_object_expiry(metadata)", "def quota_reserve(self, context, resources, quotas, deltas, expire,\n until_refresh, max_age, project_id):", "def load_bucket(self, bucket, num_items, value_size=512, exp=0,\n kv_store=1, flag=0, only_store_hash=True,\n batch_size=1000, pause_secs=1, timeout_secs=30):\n task = self.async_load_bucket(bucket, num_items, value_size, exp,\n kv_store, flag, only_store_hash,\n batch_size, pause_secs, timeout_secs)\n task.result()", "def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1, purge_frequency=0)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc[\"test2\"] = \"value2\" # set value to activate purge\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def __init__(self,\n bucket_size=10,\n refresh_amount=1,\n refresh_period_ms=1000,\n start_filled=True):\n super(TokenBucket, self).__init__()\n\n self.bucket_size = bucket_size\n self.refresh_amount = refresh_amount\n self.refresh_period_ms = refresh_period_ms\n self.timer_queue = queue.Queue(self.bucket_size)\n self.stopper = threading.Event()\n\n if start_filled:\n # Fill the queue.\n for _i in range(self.bucket_size):\n self.timer_queue.put(True)\n\n # Create a thread which handles the filling of the queue.\n self.token_thread = threading.Thread(name=\"tokenbucket\", target=self._token_filler)\n self.token_thread.setDaemon(True)\n self.token_thread.start()", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def block_ratelimit(key, secs):\n conn = redis.StrictRedis()\n while True:\n if conn.set(key, '', ex=secs, nx=True):\n return\n time.sleep(0.1)", "def rate_limit(key, actions_allowed=60, how_often=60):\n\n # We need access to the raw redis client because calling incr on\n # a django_redis RedisCache object raises an error if the key\n # doesn't exist.\n client = get_redis_client().client.get_client()\n\n # Increment the key. If they key doesn't exist (or already expired),\n # redis sets the value to 0 before incrementing.\n value = client.incr(key)\n\n if value == 1 or client.ttl(key) == -1:\n # Set the key's expiration if it's the first action we're granting.\n # As a precauation, we also check to make sure that the key actually has\n # an expiration set in case an error occurred the first time we tried to\n # set the expiration. If it doesn't have an expiration (ttl == -1), then\n # we'll set it here again.\n client.expire(key, how_often)\n\n return value <= actions_allowed", "def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)", "def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass", "def auto_pg(bucket, data_type, limit=None, page=None, pager_name=None, with_page=True,\n path=None, status=None, level_start=None, level_end=None, excludes=None,\n sort_by=None, ignore_marked_id=False, prefix_to_ignore=None,\n min_limit=0, return_total_count=False, date_start=None, date_end=None):\n not_matched = False\n if not isinstance(bucket, string_types):\n not_matched = True\n if not bucket: # or not data_type\n not_matched = True\n if not data_type and level_end is None and level_start is None:\n not_matched = True\n if not_matched:\n if return_total_count:\n return 0\n else:\n return []\n\n if limit is None:\n limit = get_site_config(['%s_per_page'%data_type, '%ss_per_page'%data_type, 'per_page'], type_required=int)\n\n # default per page is 5\n limit = to_int(limit, 5, max_value=MAX_PER_PAGE) #不能超过 1000\n\n if min_limit and limit < min_limit:\n limit = min_limit\n if limit > MAX_PER_PAGE:\n limit = MAX_PER_PAGE\n\n if not with_page and not page:\n page = 1\n else:\n page = page or get_context_value_from_request(\"page\") or 1\n\n paginator = Paginator(bucket=bucket, data_type=data_type, per_page=limit, page=page,\n path=path, sort_by=sort_by, level_start=level_start, level_end=level_end,\n ignore_marked_id=ignore_marked_id, prefix_to_ignore=prefix_to_ignore,\n date_start=date_start, date_end=date_end,)\n\n if return_total_count:\n return paginator.total_count\n\n if with_page: # 启用分页\n if not hasattr(request, 'paginators'):\n request.paginators = []\n if not hasattr(request, 'paginators_dict'):\n request.paginators_dict = {}\n request.paginators.append(paginator)\n if pager_name:\n request.paginators_dict[pager_name] = paginator\n\n records = paginator.list_object # objects list\n\n if path and path.startswith('_'):\n pass\n else:\n records = filter_records(records, path_prefix=path, status=status, data_type=data_type, excludes=excludes)\n\n return records", "def async_load_bucket(self, bucket, num_items, value_size=512, exp=0,\n kv_store=1, flag=0, only_store_hash=True,\n batch_size=1000, pause_secs=1, timeout_secs=30):\n if not self.gen:\n seed = \"%s-key-\" % self.__name\n self.__kv_gen[\n OPS.CREATE] = BlobGenerator(\n seed,\n seed,\n value_size,\n end=num_items)\n self.gen = copy.deepcopy(self.__kv_gen[OPS.CREATE])\n task = self.__clusterop.async_load_gen_docs(self.__master_node, bucket.name, self.gen,\n bucket.kvs[kv_store],OPS.CREATE, exp,\n flag, only_store_hash, batch_size, pause_secs,\n timeout_secs, compression=self.sdk_compression)\n return task" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Create a bucket with ttl = 60s 2. Upload 1000 docs with exp = 40s 3. After 20s, Update docs with exp = 60s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 20s, run expiry pager again and get item count, must be 0
def test_maxttl_with_doc_updates(self): rest = RestConnection(self.master) for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=40) self.sleep(20, "waiting to update docs with exp=60s...") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=60) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != self.num_items: self.fail("FAIL: Docs with updated expiry deleted unexpectedly!") self.sleep(20, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != 0: self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")", "def fetch_and_delete(bucket, now, size=100):", "def test_update_bucket(self):\n pass", "def test_get_object_after_expiry_time(self):\n # the 10s is important, because the get calls can take 3s each\n # some times\n metadata = {'X-Delete-After': '10'}\n self._test_object_expiry(metadata)", "def quota_reserve(self, context, resources, quotas, deltas, expire,\n until_refresh, max_age, project_id):", "def __init__(self,\n bucket_size=10,\n refresh_amount=1,\n refresh_period_ms=1000,\n start_filled=True):\n super(TokenBucket, self).__init__()\n\n self.bucket_size = bucket_size\n self.refresh_amount = refresh_amount\n self.refresh_period_ms = refresh_period_ms\n self.timer_queue = queue.Queue(self.bucket_size)\n self.stopper = threading.Event()\n\n if start_filled:\n # Fill the queue.\n for _i in range(self.bucket_size):\n self.timer_queue.put(True)\n\n # Create a thread which handles the filling of the queue.\n self.token_thread = threading.Thread(name=\"tokenbucket\", target=self._token_filler)\n self.token_thread.setDaemon(True)\n self.token_thread.start()", "def test_get_object_at_expiry_time(self):\n metadata = {'X-Delete-At': str(int(time.time()) + 10)}\n self._test_object_expiry(metadata)", "def run_expiry_pager(self, val=10):\n for bucket in self.__buckets:\n ClusterOperationHelper.flushctl_set(\n self.__master_node,\n \"exp_pager_stime\",\n val,\n bucket)\n self.__log.info(\"wait for expiry pager to run on all these nodes\")\n time.sleep(val)", "def _put_object_perf(self, bucket):\n s3_put_obj_times = []\n bolt_put_obj_times = []\n\n # Upload objects to Bolt / S3.\n for key in self._keys:\n value = self._generate(characters=string.ascii_lowercase, length=self.OBJ_LENGTH)\n value_bytes = value.encode()\n\n # Upload object to S3.\n put_obj_start_time = time.time()\n self._s3_client.put_object(Bucket=bucket, Key=key, Body=value_bytes)\n put_obj_end_time = time.time()\n # calc latency\n put_obj_time = put_obj_end_time - put_obj_start_time\n s3_put_obj_times.append(put_obj_time)\n\n # Upload object to Bolt.\n put_obj_start_time = time.time()\n self._bolts3_client.put_object(Bucket=bucket, Key=key, Body=value_bytes)\n put_obj_end_time = time.time()\n # calc latency\n put_obj_time = put_obj_end_time - put_obj_start_time\n bolt_put_obj_times.append(put_obj_time)\n\n # calc s3 perf stats\n s3_put_obj_perf_stats = self._compute_perf_stats(s3_put_obj_times)\n\n # calc bolt perf stats\n bolt_put_obj_perf_stats = self._compute_perf_stats(bolt_put_obj_times)\n\n return {\n 'object_size': \"{:d} bytes\".format(self.OBJ_LENGTH),\n 's3_put_obj_perf_stats': s3_put_obj_perf_stats,\n 'bolt_put_obj_perf_stats': bolt_put_obj_perf_stats\n }", "def test_update_single_doc_n_times(self):\n count = 0\n self.assertIs(\n (self.fragmentation <= 0 or self.fragmentation >= 100),\n False, msg=\"Fragmentation value can't be <=0 or >=100\")\n update_count = int(\n math.ceil(\n float(\n self.fragmentation * self.num_items) / (\n 100 - self.fragmentation)))\n self.log.info(\"{} is the count with which doc will be updated \\\n \".format(update_count))\n self.doc_ops = \"update\"\n\n self.client = SDKClient([self.cluster.master],\n self.bucket_util.buckets[0],\n scope=CbServer.default_scope,\n collection=CbServer.default_collection)\n self.gen_update = doc_generator(\n self.key, 0, 1,\n doc_size=self.doc_size,\n doc_type=self.doc_type,\n target_vbucket=self.target_vbucket,\n vbuckets=self.cluster_util.vbuckets,\n key_size=self.key_size,\n mutate=count,\n randomize_doc_size=self.randomize_doc_size,\n randomize_value=self.randomize_value,\n mix_key_size=self.mix_key_size,\n deep_copy=self.deep_copy)\n key, val = self.gen_update.next()\n\n for node in self.cluster.nodes_in_cluster:\n shell = RemoteMachineShellConnection(node)\n shell.kill_memcached()\n shell.disconnect()\n self.assertTrue(\n self.bucket_util._wait_warmup_completed(\n [self.cluster_util.cluster.master],\n self.bucket_util.buckets[0],\n wait_time=self.wait_timeout * 10))\n\n while count < (update_count + 1):\n self.log.debug(\"Update Iteration count == {}\".format(count))\n val.put(\"mutated\", count+1)\n self.client.upsert(key, val)\n count += 1\n self.bucket_util._wait_for_stats_all_buckets()\n\n disk_usage = self.get_disk_usage(\n self.bucket_util.get_all_buckets()[0],\n self.servers)\n _res = disk_usage[0]\n self.log.info(\"After all updates disk usage is {}MB\\\n \".format(_res))\n usage_factor = (\n (float(self.num_items + update_count\n ) / self.num_items) + 0.5)\n self.log.debug(\"Disk usage factor is {}\".format(usage_factor))\n self.assertIs(\n _res > usage_factor * self.disk_usage[\n self.disk_usage.keys()[0]],\n False, \"Disk Usage {}MB After all Updates'\\n' \\\n exceeds Actual'\\n' \\\n disk usage {}MB by {}'\\n' \\\n times\".format(\n _res,\n self.disk_usage[self.disk_usage.keys()[0]],\n usage_factor))\n data_validation = self.task.async_validate_docs(\n self.cluster, self.bucket_util.buckets[0],\n self.gen_update, \"update\", 0,\n batch_size=self.batch_size,\n process_concurrency=self.process_concurrency,\n pause_secs=5, timeout_secs=self.sdk_timeout)\n self.task.jython_task_manager.get_task_result(data_validation)\n self.enable_disable_swap_space(self.servers, disable=False)\n self.log.info(\"====test_update_single_doc_n_times====\")", "def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass", "def post_bucketlist():\n pass", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def testInsert_expiration(self):\n self.assertTrue(Subscription.insert(\n self.callback, self.topic, self.token, self.secret))\n sub = Subscription.all().get()\n expiration1 = sub.expiration_time\n time.sleep(0.5)\n self.assertFalse(Subscription.insert(\n self.callback, self.topic, self.token, self.secret))\n sub = db.get(sub.key())\n expiration2 = sub.expiration_time\n self.assertTrue(expiration2 > expiration1)", "def progress_update(sent, total):\n l.debug(\"%d of %d Mb uploaded to Amazon S3.\", sent / 1000000, total / 1000000)", "def test_add_bucketlist_item(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(1)\n # import pdb; pdb.set_trace()\n initial_bucketlist_items = len(test_bucketlist['items'])\n self.test_store.add_bucketlist_item(1, \"Tour Big Ben\", \"12 Nov 2017\")\n final_bucketlist_items = len(test_bucketlist['items'])\n self.assertEquals(\n 1, final_bucketlist_items-initial_bucketlist_items,\n 'Bucketlist item not created properly')", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)", "def bucketlist_update():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use active_ids from the context to fetch the leads
def default_get(self, cr, uid, fields, context=None): if context is None: context = {} record_ids = context.get('active_ids', False) res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context) if record_ids: opp_ids = [] opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context) for opp in opps: opp_ids.append(opp.id) if 'lead_ids' in fields: res.update({'lead_ids': opp_ids}) return res
[ "def get_list_ids():", "def get_all_activities(self):\n return map(lambda x: self.__activity_controller.find_by_id(x.activity_id),list(filter(lambda x: x.activity_id in [e.entity_id for e in self.__activity_controller.get_all()],self.get_all())))", "def _get_objs_for_report(self, docids, data):\n if docids:\n ids = docids\n elif data and 'context' in data:\n ids = data[\"context\"].get('active_ids', [])\n else:\n ids = self.env.context.get('active_ids', [])\n return self.env[self.env.context.get('active_model')].browse(ids)", "def default_get(self, cr, uid, fields, context=None):\n lead_obj = self.pool.get('crm.lead')\n\n res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context)\n if context.get('active_id'):\n tomerge = set([int(context['active_id'])])\n\n email = False\n partner_id = res.get('partner_id')\n lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context)\n\n #TOFIX: use mail.mail_message.to_mail\n email = re.findall(r'([^ ,<@]+@[^> ,]+)', lead.email_from or '')\n\n if partner_id:\n # Search for opportunities that have the same partner and that arent done or cancelled\n ids = lead_obj.search(cr, uid, [('partner_id', '=', partner_id), ('probability', '<', '100')])\n for id in ids:\n tomerge.add(id)\n if email:\n ids = lead_obj.search(cr, uid, [('email_from', 'ilike', email[0]), ('probability', '<', '100')])\n for id in ids:\n tomerge.add(id)\n\n if 'action' in fields:\n res.update({'action' : partner_id and 'exist' or 'create'})\n if 'partner_id' in fields:\n res.update({'partner_id' : partner_id})\n if 'name' in fields:\n res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'})\n if 'opportunity_ids' in fields and len(tomerge) >= 2:\n res.update({'opportunity_ids': list(tomerge)})\n if lead.user_id:\n res.update({'user_id': lead.user_id.id})\n if lead.section_id:\n res.update({'section_id': lead.section_id.id})\n return res", "def leads(self):\n from hubspot3.leads import LeadsClient\n\n return LeadsClient(**self.auth, **self.options)", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts", "def lead_list(request):\n if request.method == 'GET':\n snippets = Lead.objects.all()\n serializer = LeadSerializer(snippets, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = LeadSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def default_get(self, cr, uid, fields, context=None):\n lead_obj = self.pool.get('crm.lead')\n\n\n res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context)\n opportunities = res.get('opportunity_ids') or []\n\n partner_id = False\n for lead in lead_obj.browse(cr, uid, opportunities, context=context):\n partner_id = lead.partner_id and lead.partner_id.id or False\n\n if not partner_id and res.get('partner_id'):\n partner_id = res.get('partner_id')\n\n ids = []\n if partner_id:\n ids = lead_obj.search(cr, uid, [('partner_id', '=', partner_id), ('type', '=', 'opportunity')])\n opportunities += ids\n\n if 'action' in fields:\n res.update({'action' : partner_id and 'exist' or 'create'})\n if 'partner_id' in fields:\n res.update({'partner_id' : partner_id})\n if 'name' in fields:\n res.update({'name' : ids and 'merge' or 'convert'})\n if 'opportunity_ids' in fields:\n res.update({'opportunity_ids': opportunities})\n\n\n return res", "def _QueryActivities():\r\n tasks = []\r\n for vp_dict in request['viewpoints']:\r\n if vp_dict.get('get_activities', False):\r\n tasks.append(gen.Task(Viewpoint.QueryActivities, client, vp_dict['viewpoint_id'],\r\n excl_start_key=vp_dict.get('activity_start_key', None),\r\n limit=limit))\r\n else:\r\n tasks.append(util.GenConstant(None))\r\n\r\n activity_results = yield tasks\r\n raise gen.Return(activity_results)", "def test_get_all_contexted_activity_comments(self):\n from max.tests.mockers import user_status_context as activity\n from max.tests.mockers import create_context, subscribe_context\n username = 'sheldon'\n other = 'penny'\n self.create_user(username)\n self.create_user(other)\n self.create_context(create_context, permissions={'write': 'subscribed', 'read': 'subscribed'})\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(other, subscribe_context)\n activity_id = self.create_activity(username, activity).json['id']\n self.testapp.get('/activities/{}/comments'.format(activity_id), \"\", headers=oauth2Header(other), status=200)", "def get_active_links(self):\r\n return self.links.filter(status=0)", "def list(self,request,*args,**kwargs):\n response=super(ListAPIView,self).list(request,*args,**kwargs)\n #add applied_filters to the response which is set when filter_queryset method is called\n response=self.addAppliedFilters(response)\n #fetch data from the related views\n return self.fetch_related(request,response,*args,**kwargs)", "def find_by_activity_date(self,date):\n return list(filter(lambda x: x.activity_id in [e.entity_id for e in self.__activity_controller.find_by_date(date)],self.get_all()))\n #return map(lambda x: self.__activity_controller.find_by_id(x.activity_id),list(filter(lambda x: x.activity_id in [e.entity_id for e in self.__activity_controller.find_by_date(date)],self.get_all())))", "def add_list_to(context):", "def get_activities(self, **kwargs):\n kwargs.update({'api_key': self.params['api_key'], })\n if self.check_required_params(kwargs, ['route_id',\n 'limit',\n 'offset', ]):\n self.response = self.api._request_get(GET_ACTIVITIES_HOST,\n kwargs)\n return self.response.json()\n else:\n raise ParamValueException('params', 'Params are not complete')", "async def filter_search(request):\n # ads in required time\n try:\n city = request.query_params[\"city\"]\n start = request.query_params[\"start\"]\n end = request.query_params[\"end\"]\n if start > end:\n return RedirectResponse(url=\"/\")\n between = await Rent.filter(\n start_date__lte=datetime.datetime.strptime(end, \"%Y-%m-%d\").date(),\n end_date__gte=datetime.datetime.strptime(start, \"%Y-%m-%d\").date(),\n ).values_list()\n rented = list(set([i[-1] for i in between]))\n print(rented)\n if rented:\n page_query = pagination.get_page_number(url=request.url)\n count = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .filter(city=city.title(), id__not_in=rented)\n .count()\n )\n paginator = pagination.Pagination(page_query, count)\n results = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .filter(city=city.title(), id__not_in=rented)\n .limit(paginator.page_size)\n .offset(paginator.offset())\n )\n page_controls = pagination.get_page_controls(\n url=request.url,\n current_page=paginator.current_page(),\n total_pages=paginator.total_pages()\n )\n # if ad not in rented list (never rented)\n # return ads by city filter\n else:\n page_query = pagination.get_page_number(url=request.url)\n count = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .filter(city=city.title())\n .count()\n )\n paginator = pagination.Pagination(page_query, count)\n results = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .filter(city=city.title())\n .limit(paginator.page_size)\n .offset(paginator.offset())\n )\n page_controls = pagination.get_page_controls(\n url=request.url,\n current_page=paginator.current_page(),\n total_pages=paginator.total_pages()\n )\n # if form is empty return all ads\n except KeyError:\n page_query = pagination.get_page_number(url=request.url)\n count = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .count()\n )\n paginator = pagination.Pagination(page_query, count)\n results = (\n await Ad.all()\n .prefetch_related(\"user\", \"ad_image\", \"ad\", \"ad_rent\")\n .limit(paginator.page_size)\n .offset(paginator.offset())\n )\n page_controls = pagination.get_page_controls(\n url=request.url,\n current_page=paginator.current_page(),\n total_pages=paginator.total_pages()\n )\n return templates.TemplateResponse(\n \"ads/filter_search.html\",\n {\n \"request\": request,\n \"results\": results,\n \"page_controls\": page_controls,\n \"count\": count\n }\n )", "def active_users(request):\n # 从数据库获取发起活动数前60名\n users1 = UserInfo.objects.order_by('-sponsor_num')\n # print('users1:', len(users1))\n if len(users1) > 60:\n set_users1 = set(users1[:10])\n else:\n set_users1 = set(users1)\n # print('set_users1:', len(set_users1))\n # 从数据库获取参与活动前60名\n users2 = UserInfo.objects.order_by('-participate_num')\n # print('users2:', len(users2))\n if len(users2) > 60:\n set_users2 = set(users2[:10])\n else:\n set_users2 = set(users2)\n set_active_users = set_users1 | set_users2 # miao!\n\n # 从集合中随机取8个用户\n if len(set_active_users) >= 8:\n active_users = random.sample(set_active_users, 8)\n else:\n active_users = set_active_users\n # print(len(active_users))\n # 将用户信息整理成列表\n data = []\n for _user in active_users:\n user_info = {}\n user_info['user_id'] = str(_user.user.id)\n user_info['nickname'] = _user.nickname\n user_info['gender'] = _user.gender\n user_info['hd_pic'] = str(_user.portrait)\n user_info['sign_words'] = _user.introduction\n user_info['sponsor_num'] = _user.sponsor_num\n user_info['participate_num'] = _user.participate_num\n user_info['tags'] = []\n # 取用户兴趣标签\n j = 0\n for tag in _user.interest.all():\n user_info['tags'].append(tag.interests)\n j += 1\n if j == 3:\n break\n data.append(user_info)\n code[200]['data'] = data\n return JsonResponse(code[200])", "def get_queryset(self):\n return Participant.active.all()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use lead_ids from the wizard and set to new stage
def action_multi_lead_stage(self, cr, uid, ids, context=None): if context is None: context = {} wizard = self.browse(cr, uid, ids[0], context=context) lead_ids = wizard.lead_ids if lead_ids: for lead in lead_ids: self.pool.get('crm.lead').write(cr, uid, [lead.id], {'stage_id':wizard.stage_id.id},context) return {'type': 'ir.actions.act_window_close'}
[ "def _onchange_stage_id_values(self, stage_id):\n if not stage_id:\n return {}\n print('1111')\n\n call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids))\n call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids))\n contact_meeting = len(self.env['contact.meeting'].browse(self.contact_meeting_ids))\n # file_attached = len(self.env['ir.attachment'].search([('res_model','=','res.partner'),('res_id','=',self.id)]))\n msg=''\n ## file attached\n file_attached = len(\n self.env['ir.attachment'].search([('res_model', '=', 'res.partner'), ('res_id', '=', self.id)]))\n if self.stage_id.id in (8, 16) and file_attached == 0:\n msg = msg + ' - Upload at least one file \\n'\n ##\n if self.stage_id.id == 2 and call_attempt == 0:\n msg = msg + ' - Call Attempt \\n'\n\n if self.stage_id.id == 3 and call_pitch == 0:\n msg = msg + ' - Call Pitch \\n'\n\n if self.stage_id.id == 9 and self.date_call_back_one == False:\n msg = msg + ' - Date (callback) '\n\n if self.stage_id.id == 10 and self.date_meeting_set == False:\n msg = msg + ' - Date (meeting set) \\n'\n\n if self.stage_id.id == 6 and self.date_preagreement == False:\n msg = msg + ' - Date (pre_agreement) \\n'\n\n ## individual and company contact\n if self.stage_id.id in (8,16) and self.mobile == False:\n msg = msg + ' - Mobile \\n'\n if self.stage_id.id in (8,16) and self.email == False:\n msg = msg + ' - Email \\n'\n if self.stage_id.id in (8, 16) and self.street == False:\n msg = msg + ' - Street in Adress \\n'\n if self.stage_id.id in (8,16) and self.lang == False:\n msg = msg + ' - Language \\n'\n if self.stage_id.id in (8, 16) and self.business_developer_id == False:\n msg = msg + ' - Business Developer \\n'\n if self.stage_id.id in (8,16) and self.vat == False:\n msg = msg + ' - TIN \\n'\n\n ## individual contact\n if self.stage_id.id in (8,16) and self.parent_id and self.parent_id.street== False:\n msg = msg + ' - Invoicing Address (Company Adress) \\n'\n if self.stage_id.id in (8,16) and self.inami == False:\n msg = msg + ' - INAMI \\n'\n if self.stage_id.id in (8,16) and self.subscription_type == False:\n msg = msg + ' - Subscription Type \\n'\n if self.stage_id.id in (8,16) and not self.title and self.is_company != True:\n msg = msg + ' - Title \\n'\n if self.stage_id.id in (8,16) and self.specialization == False:\n msg = msg + ' - Specialization \\n'\n ### Prospection process\n if self.stage_id.id in (8,16) and self.date_signed == False:\n msg = msg + ' - Date(Signed) \\n'\n if self.stage_id.id in (8, 16) and self.bd_signed == False:\n msg = msg + ' - Business Developer (Signed) \\n'\n if self.stage_id.id in (8, 16) and self.comment_signed == False:\n msg = msg + ' - Comment (Signed) \\n'\n\n ### Subscription details\n if self.stage_id.id in (8,16) and self.subscription_month == False:\n msg = msg + ' - Monthly subscription \\n'\n if self.stage_id.id in (8,16) and self.subscription_commitment == False:\n msg = msg + ' - Commitment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_payment == False:\n msg = msg + ' - Upfront Payment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_turnover == False:\n msg = msg + ' - Upfront turnover \\n'\n if self.stage_id.id in (8,16) and self.subsciption_part_condition == False:\n msg = msg + ' - Particular Conditions \\n'\n\n ## stage activated and only individuals\n if self.stage_id.id == 16 and self.doctor_admin == False:\n msg = msg + ' - Doctor AdminID \\n'\n ### stage account managment\n if self.stage_id.id == 16 and self.first_email == False:\n msg = msg + ' - 1st email (activation) \\n'\n if self.stage_id.id == 16 and self.service_completed == False:\n msg = msg + ' - Services completed \\n'\n if self.stage_id.id == 16 and self.price_completed == False:\n msg = msg + ' - Prices completed \\n'\n if self.stage_id.id == 16 and self.cv_completed == False:\n msg = msg + ' - CV/experiences completed \\n'\n if self.stage_id.id == 16 and self.duration_completed == False:\n msg = msg + ' - Duration completed \\n'\n if self.stage_id.id == 16 and self.personal_message_completed == False:\n msg = msg + ' - Personal message completed \\n'\n if self.stage_id.id == 16 and self.profile_picture == False:\n msg = msg + ' - Profile picture \\n'\n if self.stage_id.id == 16 and self.photo_practice == False:\n msg = msg + ' - Photo Practice \\n'\n if self.stage_id.id == 16 and self.marketing_kit == False:\n msg = msg + ' - Marketing kit \\n'\n if self.stage_id.id == 16 and self.synchronisation_completed == False:\n msg = msg + ' - Synchronization \\n'\n if self.stage_id.id == 16 and self.backlink == False:\n msg = msg + ' - Backlink \\n'\n if self.stage_id.id == 16 and self.google_profile == False:\n msg = msg + ' - Google profile \\n'\n if self.stage_id.id == 16 and self.voicemail == False:\n msg = msg + ' - Voicemail \\n'\n if self.stage_id.id == 16 and self.mail_signature == False:\n msg = msg + ' - Mail signature \\n'\n if self.stage_id.id == 16 and self.email_to_patient == False:\n msg = msg + ' - Email to patient \\n'\n if self.stage_id.id == 16 and self.translation == False:\n msg = msg + ' - Translation \\n'\n if self.stage_id.id == 16 and self.business_card == False:\n msg = msg + ' - Manuel Sent \\n'\n if self.stage_id.id == 16 and self.manuel_sent == False:\n msg = msg + ' - Business cards \\n'\n if self.stage_id.id == 16 and self.widget == False:\n msg = msg + ' - Widget \\n'\n if self.stage_id.id == 16 and self.voice_mail == False:\n msg = msg + ' - Voicemail + email signature \\n'\n if self.stage_id.id == 16 and self.website_ok == False:\n msg = msg + ' - Website \\n'\n if self.stage_id.id == 16 and self.customer_service_number == False:\n msg = msg + ' - Customer service number on google profile \\n'\n if self.stage_id.id == 16 and self.website_backlink == False:\n msg = msg + ' - Backlink on website \\n'\n\n ## Lost paying, tab lost\n if self.stage_id.id == 17 and self.date_lost == False:\n msg = msg + ' - Lost Date \\n'\n if self.stage_id.id == 17 and self.reason_lost == False:\n msg = msg + ' - Lost Reason \\n'\n\n\n\n\n ##\n if msg:\n raise ValidationError('To move to this step you first need to fill those fields : \\n' + msg)\n\n return {}", "def devpiserver_stage_created(stage):", "def stage(self, request):\n player_id = request.data['player_id'] \n player = Player.objects.get(discord_id=player_id)\n \n game = player.get_game()\n\n if not game:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n \n # Set stage\n stage_name = request.data['stage_name']\n \n stage_obj = Stage.objects.filter(name=stage_name).first()\n game.stage = stage_obj\n game.save()\n\n return Response(status=status.HTTP_200_OK)", "def stage(self, stage):\n\n self._stage = stage", "def stage_id(self, value: int = None):\n print(\n \"Warning: `stage_id = i` is deprecated, please use \\n\",\n \" set_stage(i, placement) for easy and efficient Pipeline parallel experience.\",\n )\n\n self._is_null = False\n self._stage_id = value", "def increment_stage_in_forms(forms):\n for index, form in enumerate(forms.all(), 1):\n form.stage = index\n form.save(update_fields=['stage'])", "def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n record_ids = context.get('active_ids', False)\n res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context)\n\n if record_ids:\n opp_ids = []\n opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)\n for opp in opps:\n opp_ids.append(opp.id)\n if 'lead_ids' in fields:\n res.update({'lead_ids': opp_ids})\n\n return res", "def renumerate_stages(pipeline):\n stages = pipeline['stages']\n\n main_index = 1\n for idx, stage in enumerate(stages):\n if stage['name'].startswith('Git Tag'):\n stage['requisiteStageRefIds'] = [str(main_index)]\n stage['refId'] = str(main_index * 100)\n elif stage['name'].startswith('Attach Scaling'):\n stage['requisiteStageRefIds'] = [str(main_index)]\n stage['refId'] = str(main_index * 101)\n elif stage['name'].startswith('ServiceNow'):\n stage['requisiteStageRefIds'] = [str(main_index)]\n stage['refId'] = str(main_index * 102)\n elif stage['type'] == 'bake' or idx == 0:\n stage['requisiteStageRefIds'] = []\n stage['refId'] = str(main_index)\n else:\n stage['requisiteStageRefIds'] = [str(main_index)]\n main_index += 1\n stage['refId'] = str(main_index)\n\n LOG.debug('step=%(name)s\\trefId=%(refId)s\\t'\n 'requisiteStageRefIds=%(requisiteStageRefIds)s', stage)\n\n return pipeline", "def switch_stage(self, next_stage_index):\n prev_stage = self.current_stage()\n self.stage_index = next_stage_index\n curr_stage = self.current_stage() # set stage timer to zero\n curr_stage.start_stage()\n if isinstance(prev_stage, GrowthStage) and isinstance(curr_stage, WaitingStage):\n curr_stage.set_stress(prev_stage.overwatered, prev_stage.underwatered, prev_stage.stress_time)\n # print(f\"Plant {self.id} moving to new stage!\")\n # print(self.current_stage())", "def _onchange_stage_id_values(self, stage_id):\n crm_stage = self.env['crm.stage']\n res = super(Lead,self)._onchange_stage_id_values(stage_id)\n for rec in self.monthly_revenue_ids:\n rec.update({'percentage':res.get('probability')})\n print (\"self.stage_id.show_when_chaing\",self.stage_id.show_when_chaing)\n print (\"self.stage_id.requirements\",self.stage_id.requirements)\n if self.stage_id.show_when_chaing:\n if self.stage_id.requirements:\n text = self.stage_id.requirements\n self.env.user.notify_info(message=text,sticky=True)\n return res", "def _addToKnowledge(stage):\n _byOid[stage.getOid()] = stage\n _bySequence.append(stage)\n _byName[stage.getName()] = stage\n publicId = stage.getPublicId()\n if publicId != None:\n _byPublicId[publicId] = stage\n\n return", "def action_apply(self, cr, uid, ids, context=None):\n record_id = context and context.get('active_ids') or False\n if not record_id:\n return {'type': 'ir.actions.act_window_close'}\n\n leads = self.pool.get('crm.lead')\n models_data = self.pool.get('ir.model.data')\n\n # Get Opportunity views\n result = models_data._get_id(\n cr, uid, 'crm', 'view_crm_case_opportunities_filter')\n opportunity_view_search = models_data.browse(\n cr, uid, result, context=context).res_id\n opportunity_view_form = models_data._get_id(\n cr, uid, 'crm', 'crm_case_form_view_oppor')\n opportunity_view_tree = models_data._get_id(\n cr, uid, 'crm', 'crm_case_tree_view_oppor')\n if opportunity_view_form:\n opportunity_view_form = models_data.browse(\n cr, uid, opportunity_view_form, context=context).res_id\n if opportunity_view_tree:\n opportunity_view_tree = models_data.browse(\n cr, uid, opportunity_view_tree, context=context).res_id\n\n for lead in leads.browse(cr, uid, record_id, context=context):\n if(lead.section_id):\n stage_ids = self.pool.get('crm.case.stage').search(cr, uid, [('type','=','opportunity'),('sequence','>=',1), ('section_ids','=', lead.section_id.id)])\n else:\n stage_ids = self.pool.get('crm.case.stage').search(cr, uid, [('type','=','opportunity'),('sequence','>=',1)])\n\n data = self.browse(cr, uid, ids[0], context=context)\n partner_ids = []\n if data.action == 'create':\n partner_ids = self._create_partner(cr, uid, ids, context=context)\n\n partner_id = partner_ids and partner_ids[0] or data.partner_id.id\n self._convert(cr, uid, ids, lead, partner_id, stage_ids, context=context)\n if data.name == 'merge':\n merge_obj = self.pool.get('crm.merge.opportunity')\n self.write(cr, uid, ids, {'opportunity_ids' : [(6,0, [data.opportunity_ids[0].id])]}, context=context)\n context.update({'lead_ids' : record_id})\n return merge_obj.merge(cr, uid, data.opportunity_ids, context=context)\n\n return {\n 'name': _('Opportunity'),\n 'view_type': 'form',\n 'view_mode': 'form,tree',\n 'res_model': 'crm.lead',\n 'domain': [('type', '=', 'opportunity')],\n 'res_id': int(lead.id),\n 'view_id': False,\n 'views': [(opportunity_view_form, 'form'),\n (opportunity_view_tree, 'tree'),\n (False, 'calendar'), (False, 'graph')],\n 'type': 'ir.actions.act_window',\n 'search_view_id': opportunity_view_search\n }", "def stage(self, stage: osbuild.Stage):", "def stage_next(self):\n STAGE = self.env['anytracker.stage']\n for ticket in self:\n method = ticket.project_id.method_id\n if not method:\n raise except_orm(_('Warning !'),\n _('No method defined in the project.'))\n stage = ticket.stage_id\n stages = STAGE.search([('method_id', '=', method.id)])\n if stage == stages[-1]: # last stage\n raise except_orm(_('Warning !'),\n _(\"You're already in the last stage\"))\n elif stage not in stages: # no stage\n next_stage = stages[0]\n else:\n next_stage = stages[list(stages).index(stage) + 1]\n ticket.write({'stage_id': next_stage.id})", "def _read_group_stage_ids(self, stages, domain, order):\n stage_ids = self.env['salon.stage'].search([])\n return stage_ids", "def assign_stage_index(self):\n if self.pre_part_head is not None:\n stage_index = 0\n cur_nf = self.pre_part_head\n while cur_nf != self.pre_part_tail:\n cur_nf.stage_index = stage_index\n stage_index = stage_index + 1\n cur_nf = cur_nf.next_nf\n\n if self.post_part_head is not None:\n stage_index = 0\n cur_nf = self.pre_part_head\n while cur_nf is not None:\n cur_nf.stage_index = stage_index\n stage_index = stage_index + 1\n cur_nf = cur_nf.next_nf", "def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0):\n for vdi_uuid in vdi_uuids:\n source = os.path.join(sr_path, \"%s.vhd\" % vdi_uuid)\n link_name = os.path.join(staging_path, \"%d.vhd\" % seq_num)\n _link(source, link_name)\n seq_num += 1", "def update_tracking_tool(self, new):\n print(f\"Update tracking_tool in preproc/reg stages to {new}\")\n self.stages[\"Preprocessing\"].config.tracking_tool = new\n self.stages[\"Registration\"].config.tracking_tool = new", "def create(self, vals):\n result = super(CrmLeadProductTemplate, self).create(vals)\n active_lead = self.env['crm.lead'].browse(self._context.get('active_id'))\n \n if active_lead:\n active_lead.inquiry_product = result.id\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }