query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Bust our cache for a given type, can bust multiple caches
|
def bust_cache(type, user_pk):
bust_keys = BUST_CACHES[type]
keys = [CACHE_TYPES[k] % user_pk for k in bust_keys]
cache.delete_many(keys)
|
[
"def applyCache(self, path, type):\n\t\t\n\t\treturn None",
"def _clear_type_cache():\n\tpass",
"def dynCache():\n pass",
"def gen_cache(self, key, value=None, type=Cache):\n if type == MultiHeadAttention.StaticCache: # static_kv\n k, v = self.compute_kv(key, value)\n return self.StaticCache(k, v)\n elif value is None: # incremental_state\n fill_shape = [-1, self.num_heads, 0, self.head_dim]\n fill_shape[0] = paddle.shape(key)[0].item()\n k = paddle.full(fill_shape, 0, key.dtype)\n v = paddle.full(fill_shape, 0, key.dtype)\n return self.Cache(k, v)\n else:\n # incremental_state with initial value, mainly for usage like UniLM\n return self.Cache(key, value)",
"def add_to_cache(self, data_type: str, data_name: str, data: Any) -> None:\n if data_type not in self.cache:\n self.cache[data_type] = {}\n\n self.cache[data_type][data_name] = data",
"def flush_from_cache(self):\r\n self.__class__.flush_cached_instance(self)",
"def invalidate_caches():",
"def cachetype(self) :\n try :\n return self._cachetype\n except Exception as e:\n raise e",
"def _writeTmpCacheToCache(self, tmpCache, type_):\n cursor = self._conn.cursor()\n for index in tmpCache:\n data = tmpCache[index]\n values = index + tuple(data)\n cursor.execute(\"\"\"INSERT INTO %ss_cache\n VALUES(%s)\"\"\" % (type_, ('?,'*len(values))[0:-1]), values)\n cursor.close()",
"def set_cacheable(cacheable):",
"def clear_caches(self):",
"def cache_clear():\n\n cache.clear()\n wrapper.hits = wrapper.misses = 0",
"def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n y = self.key_tracker.get(key) + 1\n self.key_tracker.pop(key)\n else:\n y = 1\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.least_frequent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: y + self.count / 1000})\n self.count += 1",
"def insert_into_cache(self, cache, key, value):\n if cache == self.t1:\n evicted = self.t1.write(key, value)\n if evicted != None:\n return self.insert_into_cache(self.b1, evicted['key'], evicted['value'])\n\n if cache == self.b1:\n return self.b1.write(key, value)\n\n if cache == self.t2:\n evicted = self.t2.write(key, value)\n if evicted != None:\n return self.insert_into_cache(self.b2, evicted['key'], evicted['value'])\n\n if cache == self.b2:\n return self.b2.write(key, value)",
"def _register_cache(key, cache):\n _all_caches_[key] = cache",
"def _cache_merchant_items(self,items,merchantid):\n\n\t\t#check if we should cache\n\t\tif \"cache.write\" not in self.task.meta:\n\t\t\treturn\n\n\t\t#get section to cache to\n\t\tsection=self.task.meta[\"cache.write\"]\n\t\t\t\n\t\t#write to cache\n\t\tCache(section).set( \"matcher.legacy.items.\"+str(merchantid),\n\t\t\t\t json.dumps([item.to_dict() for item in items]) )",
"def cache(tag = \"*\", design = \"*\", store = \"*\"):\r\n\r\n job = {\r\n \"type\" : \"cache\",\r\n \"tag\" : tag,\r\n \"design\" : design,\r\n \"store\" : store\r\n }\r\n return job",
"def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any,\n key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult':\n return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint)",
"def cachePage(self, page):\n pass",
"def _invalidate_caches(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of all users who favour the given recipe
|
def favorers(self, recipe):
#key = cache_key('following', user.pk)
#following = cache.get(key)
#if following is None:
qs = Favorite.objects.filter(recipe=recipe).all()
favorers = [u.favorer for u in qs]
#cache.set(key, following)
return favorers
|
[
"def get_queryset(self):\n queryset = super().get_queryset()\n all_favorites = self.request.user.favorites.all().values('recipe')\n return queryset.filter(id__in=all_favorites)",
"def get_favorited_recyclers(user_id):\n\n return FavRecycler.query.filter(\n FavRecycler.user_id == user_id).all()",
"def get_web_fav_users(ref, getter, scraper):\r\n fav_tree = getter.get_legacy_part(ref, \"favs\")\r\n fav_recs = scraper.get_legacy_part(fav_tree)\r\n ffset = set()\r\n ffdict = dict()\r\n if fav_recs:\r\n ffset = {int(x.id) for x in fav_recs}\r\n ffdict = {int(x.id): x.alias for x in fav_recs}\r\n return ffset, ffdict",
"def get(self, user):\n query = request.args.get(\"search\")\n ret = {}\n ret[\"recipes\"] = search_own_recipes(query, user)",
"def show_recipes_count_per_user(value):\n return Recipe.objects.filter(author=value).count()",
"def circuit_favoriting_users(self, circuit_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n return self.RS.smembers(key)",
"def favorites(request):\n\n # Find favorites in DB according to user id\n favorites = Favorite.objects.filter(user_id=request.user.id)\n\n context = {\n \"favorites\": favorites,\n }\n\n return render(request, \"search/favorites.html\", context)",
"def favorites():\n\n # Select the current user's favorited fish\n rows = db.execute(\"SELECT fishname FROM favorites WHERE user_id = :user_id\",\n user_id=session[\"user_id\"])\n\n # If GET, show the users favotited fish\n if request.method == \"GET\":\n\n return render_template(\"favorites.html\", rows=rows)\n\n # If POST, render the selected fish's HTML page\n else:\n\n fish = request.form.get(\"fishname\")\n return render_template(\"fish.html\", fish=fish, verify=1)",
"def get_user_favorites(self):\n\n return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',\n headers=self.__get_header_with_auth()))",
"def users_for_exam(cls, exam):\n users = cls.objects.filter(\n scenario__task__exam=exam\n ).values('user').distinct()\n user_list = [u.get('user') for u in users]\n return user_list",
"def friends_in_listing(user, listing):\n\n friends = []\n\n for roommate in listing.users: \n if roommate in user.friends: \n friends.append(roommate)\n primary = User.query.get(listing.primary_lister)\n\n if primary in user.friends: \n friends.append(primary)\n\n return set(friends)",
"def check_favorite(user, obj):\n return get_fav(obj, user)",
"def get_recommendations(users_to_recommend):\n\tdata = get_all_data()\n\tprint \"loaded data\"\n\tprecomputed_predictions = loadReccFile('ratings')\n\tprint \"loaded precomputed predictions\"\n\tcf = collaborative_filtering(data, precomputed_predictions)\n\tprint \"initialized collaborative filter model\"\n\tfor user_id in users_to_recommend:\n\t\trecommendation = cf.recommendation(user_id)\n\t\tprint \"Recommendations for user : \" + str(user_id)\n\t\tprint [recc[0] for recc in recommendation]",
"def items_for_you(df, similar_users, main_user):\n set_main = set(df[df['Customer'] == main_user]['tags'])\n for user in similar_users:\n items = []\n if user != main_user:\n set_user = set(df[df['Customer'] == user]['tags'])\n items = set_main - set_user\n if len(items) > 0:\n return items\n else:\n return 'There is nothing to recommend'",
"def favourite(self, request, *args, **kwargs):\n\n is_favourite = bool(int(request.data.get('is_favourite')))\n id_foodrecipe = request.data.get('id_foodrecipe')\n user = request.user\n foodrecipe = FoodRecipe.objects.get(pk=id_foodrecipe)\n if is_favourite:\n user.foodrecipe.add(foodrecipe)\n foodrecipe.lovers = foodrecipe.user_set.all().count()\n else:\n user.foodrecipe.remove(foodrecipe)\n foodrecipe.lovers = foodrecipe.user_set.all().count()\n foodrecipe.save()\n return Response({'is_favourite': is_favourite})",
"def showFavorites(request):\n\ttemplate = 'pages/favorites.html'\n\n\tfavoritesList = []\n\n\tif request.method == 'GET':\n\t\t\"\"\"collecte the user profil\"\"\"\n\t\tuser = request.user\n\t\tprofil = search_profil(user.username)\n\n\t\tprofil = profil[0]\n\n\t\t\"\"\"collect user favorite in a list\"\"\"\n\t\tfavoritesList = profil.favorites.all()\n\n\treturn render(request, template, {'detailForm': DetailForm(),\n\t\t'searchForm': SearchForm(),\n\t\t'favoritesList': favoritesList})",
"def view_favorites():\n\n favorite_items = User.objects(id = session['user']['id']).get().favorites_list\n \n items = []\n\n for i in range(0, len(favorite_items)):\n\n item = Item.objects(id = favorite_items[i]).first()\n items.append(item)\n \n return render_template(\"user/favorites_list.html\", items = items)",
"def ingredient_search(self, roles):\n\n ingredients = []\n for ingredient in self.ingredients:\n if (ingredient.role in roles\n and ingredient not in ingredients):\n ingredients.append(ingredient)\n\n return ingredients",
"def recommend_per_user(\n self, user, n_recommendations, filter_out_interacted_items=True\n ):\n\n u_recommended_items = []\n if self.user_sequences.get(user) is not None:\n u_items = self.user_sequences.get(user)\n u_recommended_items = list(\n list(\n zip(\n *self.model.wv.most_similar(\n u_items,\n topn=n_recommendations\n + len(u_items) * filter_out_interacted_items,\n )\n )\n )[0]\n )\n if filter_out_interacted_items:\n u_recommended_items = [\n i for i in u_recommended_items if i not in u_items\n ][:n_recommendations]\n return (\n [user]\n + u_recommended_items\n + [None] * (n_recommendations - len(u_recommended_items))\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create 'favorer' favorites 'recipe' relationship
|
def add_favorite(self, favorer, recipe):
relation, created = Favorite.objects.get_or_create(favorer=favorer, recipe=recipe)
if created is False:
raise AlreadyExistsError("User '%s' already favors '%s'" % (favorer, recipe))
recipient = User.objects.get(id=recipe.author_id)
favorite_created.send(sender=self, favorer=favorer)
favorer_created.send(sender=self, recipe=recipe)
favorite_recipe_created.send(sender=self, favorers=relation)
notify.send(favorer, actor=favorer, recipient=recipient, verb='added to his favorites your recipe', target=recipe)
print("sent notification - has followed your recipe to: ", recipient)
return relation
|
[
"def create_favorite(user_id, fish_id):\n favorite = Favorite(user_id = user_id, \n fish_id = fish_id)\n\n db.session.add(favorite)\n db.session.commit()\n return favorite",
"def post(self, request, format=None):\n recipe_id = request.data['id']\n user = self.request.user,\n serializer = FavoriteSerializer(\n data={\n 'recipe': recipe_id,\n 'user': user[0].id\n })\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def favorers(self, recipe):\n #key = cache_key('following', user.pk)\n #following = cache.get(key)\n\n #if following is None:\n qs = Favorite.objects.filter(recipe=recipe).all()\n favorers = [u.favorer for u in qs]\n #cache.set(key, following)\n\n return favorers",
"def create_recipe_ingredient(recipe, ingredient):\n\n recipe_ingredient = RecipeIngredient(recipe=recipe, ingredient=ingredient)\n\n db.session.add(recipe_ingredient)\n db.session.commit()\n\n return recipe_ingredient",
"def favourite(self, request, *args, **kwargs):\n\n is_favourite = bool(int(request.data.get('is_favourite')))\n id_foodrecipe = request.data.get('id_foodrecipe')\n user = request.user\n foodrecipe = FoodRecipe.objects.get(pk=id_foodrecipe)\n if is_favourite:\n user.foodrecipe.add(foodrecipe)\n foodrecipe.lovers = foodrecipe.user_set.all().count()\n else:\n user.foodrecipe.remove(foodrecipe)\n foodrecipe.lovers = foodrecipe.user_set.all().count()\n foodrecipe.save()\n return Response({'is_favourite': is_favourite})",
"def add_rest_to_db():\n\n for restaurant in restaurants:\n info = restaurants[restaurant]\n address = ', '.join(info['address'])\n\n category = Restaurant(rest_id=info['rest_id'],\n rest_title=info['rest_title'],\n rest_alias=info['rest_alias'],\n rating=info['rating'],\n num_reviews=info['num_reviews'],\n address=address,\n phone=info['phone']\n )\n\n db.session.add(category)\n db.session.commit()",
"def get_queryset(self):\n queryset = super().get_queryset()\n all_favorites = self.request.user.favorites.all().values('recipe')\n return queryset.filter(id__in=all_favorites)",
"def fav_a_recycler(user_id, location_id):\n\n fav_recycler = FavRecycler(user_id=user_id, location_id=location_id)\n\n db.session.add(fav_recycler)\n db.session.commit()\n\n return fav_recycler",
"def create_favorite(self, _type: str, id):\n\n if _type == \"dashboard\":\n url = f\"api/dashboards/{id}/favorite\"\n elif _type == \"query\":\n url = f\"api/queries/{id}/favorite\"\n else:\n return\n\n return self._post(url, json={})",
"def save_to_favorites(drink_id):\n if session['username']:\n new_favorite = Favorites(\n username=session['username'], drink_id=drink_id)\n db.session.add(new_favorite)\n db.session.commit()\n session['favorites'] = {new_favorite.drink_id: new_favorite.id}\n flash(\"Recipe saved! Click My Mixology to see all saved recipes.\")\n return redirect(f\"/display-recipe/{drink_id}\")\n else:\n flash(\"You must be logged in to save recipes!\")\n return redirect(f\"/display-recipe/{drink_id}\")",
"def favorite( self, favorite ) :\n return self.client.ticketfav( self.project, self, favorite )",
"def add_recipe():\n\n recipe_id = db.recipes.insert(\n name=request.json.get('name'),\n steps=request.json.get('steps'),\n cook_time=request.json.get('cook_time'),\n shared=request.json.get('shared'),\n image_url=request.json.get('image_url'),\n )\n\n recipe_ingredients = request.json.get('ingredients')\n \n for recipe_ingredient in recipe_ingredients:\n # get ingredient id if the ingredient is already known to the app\n ingredient_id = db(\n db.ingredients.name == recipe_ingredient[\"ingredient\"]\n ).select().first()\n\n # if not found, insert new ingredient (visible to every user and\n # reusable in every future recipe)\n if ingredient_id is None:\n ingredient_id = db.ingredients.insert(\n name = recipe_ingredient[\"ingredient\"]\n )\n\n # link the ingredient and recipe with a new recipe-ingredient entry\n recipe_ingredient[\"id\"] = db.recipe_ingredients.insert(\n recipe = recipe_id, \n ingredient = ingredient_id, \n quantity = recipe_ingredient[\"amount\"],\n )\n \n return dict(id=recipe_id, myingredients=recipe_ingredients)",
"def new_favourite():\n\n user_id = int(request.args['user_id'])\n photo_id = int(request.args['photo_id'])\n\n # check if photo is already in favourites\n for post in get_favourites(user_id):\n if post[\"photo_id\"] == photo_id:\n return \"NoSucces\"\n\n # add favourite into database\n add_favourite(user_id, photo_id)\n\n return \"Succes\"",
"def remove_favorite(self, favorer, recipe):\n try:\n rel = Favorite.objects.get(favorer=favorer, recipe=recipe)\n favorite_removed.send(sender=rel, favorer=rel.favorer)\n favorer_removed.send(sender=rel, recipee=rel.recipe)\n favorite_recipe_removed.send(sender=rel, favorers=rel)\n recipient = User.objects.get(id=recipe.author_id)\n notify.send(rel.favorer, actor=favorer, recipient=recipient, verb='removed form his favorites your recipe', target=recipe)\n print(\"sent notification - has UNfollowed your recipe to: \", recipient)\n rel.delete()\n return True\n except Favorite.DoesNotExist:\n return False",
"def makeDrink(ingredients, preferences):\n import random\n drinkName = random.choice(names[\"adjective\"]) + \" \" + random.choice(names[\"noun\"])\n drink = {\n \"name\": drinkName,\n \"ingredients\": [],\n }\n \n for preference in preferences:\n if preferences[preference] == True:\n drink[\"ingredients\"].append(random.choice(ingredients[preference]))\n \n return drink",
"def create_review(review, stars, reviewer_email, restaurant_id):\n new_review = Review()\n new_review.restaurant_id = restaurant_id\n new_review.review = review\n new_review.stars = stars\n new_review.reviewer_email = reviewer_email\n\n db_session = current_app.config[\"DB_SESSION\"]\n db_session.add(new_review)\n db_session.commit()\n return db_session.query(Review).filter(Review.id == new_review.id).first()",
"def favorite_review():\n\n review_id = request.form.get('reviewID')\n asin = request.form.get('asin')\n user_id = session['user']['id']\n\n user = User.query.get(user_id)\n\n # Adds or removes a product from a user's favorites\n favorite_status = user.update_favorite_review(review_id)\n\n # If the user favorites a review, automatically favorite the product\n if favorite_status == \"Favorited\":\n user.add_favorite_product_from_review(asin)\n\n return favorite_status",
"def create_relationship(client: Client, indicator: str, threats: List, entity_a_type: str) -> List:\n relationships = []\n if client.create_relationships:\n for threat in threats:\n for block in threat.get('blockSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=block.get('data'),\n entity_b_type=check_indicator_type(block.get('data')),\n brand=BRAND))\n for exec_set in threat.get('executableSet', {}):\n relationships.append(\n EntityRelationship(name='related-to',\n entity_a=indicator,\n entity_a_type=entity_a_type,\n entity_b=exec_set.get('md5Hex'),\n entity_b_type=FeedIndicatorType.File,\n brand=BRAND))\n return relationships",
"def add_to_favorite(drink_id):\n username1 = session['user_id']\n \n user = User.query.filter_by(username=username1).first()\n if user:\n user_num = user.id\n else:\n print(\"didnt find user\")\n print(user)\n \n new_drink = [Drink(id=drink_id, user_id=user_num)]\n db.session.add_all(new_drink)\n db.session.commit()\n return redirect(\"/search\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove 'favorer' favorites 'recipe' relationship
|
def remove_favorite(self, favorer, recipe):
try:
rel = Favorite.objects.get(favorer=favorer, recipe=recipe)
favorite_removed.send(sender=rel, favorer=rel.favorer)
favorer_removed.send(sender=rel, recipee=rel.recipe)
favorite_recipe_removed.send(sender=rel, favorers=rel)
recipient = User.objects.get(id=recipe.author_id)
notify.send(rel.favorer, actor=favorer, recipient=recipient, verb='removed form his favorites your recipe', target=recipe)
print("sent notification - has UNfollowed your recipe to: ", recipient)
rel.delete()
return True
except Favorite.DoesNotExist:
return False
|
[
"def delete_relationship(self, rel_id) -> Relationship:",
"def recipe_no_id():\n return remove_id(recipe())",
"def remove_pizza_from_menu(self, pizza: Pizza):\n if pizza in self.recipes:\n self.recipes.remove(pizza)\n return",
"def remove_relationship(self, relationship):\n a = (relationship.sensor_x.get_uuid(),\n relationship.sensor_y.get_uuid())\n self.relationships.pop(frozenset(a), None)\n return",
"def _remove_from_relation(self, query, relationname, toremove=None):\n submodel = get_related_model(self.model, relationname)\n for dictionary in toremove or []:\n remove = dictionary.pop('__delete__', False)\n if 'id' in dictionary:\n subinst = get_by(self.session, submodel, dictionary['id'])\n else:\n subinst = self.query(submodel).filter_by(**dictionary).first()\n for instance in query:\n getattr(instance, relationname).remove(subinst)\n if remove:\n self.session.delete(subinst)",
"def clear_relationship_terms(self):\n pass",
"def _remove_from_relation(self, query, relationname, toremove=None):\r\n submodel = get_related_model(self.model, relationname)\r\n for dictionary in toremove or []:\r\n remove = dictionary.pop('__delete__', False)\r\n if 'id' in dictionary:\r\n subinst = get_by(self.session, submodel, dictionary['id'])\r\n else:\r\n subinst = self.query(submodel).filter_by(**dictionary).first()\r\n for instance in query:\r\n getattr(instance, relationname).remove(subinst)\r\n if remove:\r\n self.session.delete(subinst)",
"def test_manage_remove_favorite(self):\n\n service.manage_add_or_remove_favorite(\n self.mock_product2, self.mock_user)\n for value in self.mock_product.favorites.values():\n self.assertEqual(value, None)",
"def delete_old_entries(model, recipe_id):\n model.delete().where(model.recipe == recipe_id).execute()",
"def remove_uptake_and_secretion_reactions(reactions):\n\n toremove = set()\n for r in reactions:\n if r.startswith('upsr_'):\n toremove.add(r)\n\n for r in toremove:\n reactions.pop(r)\n return reactions",
"def get_queryset(self):\n queryset = super().get_queryset()\n all_favorites = self.request.user.favorites.all().values('recipe')\n return queryset.filter(id__in=all_favorites)",
"def remove_recipe(recipe_id, owned):\n if(owned == \"False\"):\n if(update_recipes_array(recipe_id, remove = True)):\n mongo.db.recipes.update_one({\"_id\": ObjectId(recipe_id)},\n {'$inc': {\"upvotes\" : -1}})\n flash(\"Recipe has been unpinned!\")\n return redirect(url_for('show_recipe', recipe_id=recipe_id))\n else:\n remove_image(recipe_id)\n mongo.db.recipes.delete_one({\"_id\": recipe_id})\n print('Removing!')\n update_recipes_array(recipe_id, type_of_array=\"recipes_owned\", remove = True)\n flash(\"Recipe has been removed from database!\")\n return redirect(url_for('get_recipes'))",
"def rm_favourite():\n\n user_id = request.args['user_id']\n photo_id = request.args['photo_id']\n\n remove_favourite(user_id, photo_id)\n\n flash(\"Picture was deleted from your favourites!\")\n return redirect(url_for(\"favourites\"))",
"def favorers(self, recipe):\n #key = cache_key('following', user.pk)\n #following = cache.get(key)\n\n #if following is None:\n qs = Favorite.objects.filter(recipe=recipe).all()\n favorers = [u.favorer for u in qs]\n #cache.set(key, following)\n\n return favorers",
"def test_remove(self):\n\n Favourite.objects.create(user=self.user, product=self.product)\n\n response = self.client.get('/remove/{}'.format(self.product.id),\n HTTP_REFERER='/myfood')\n\n f = Favourite.objects.filter(user=self.user, product=self.product)\n\n self.assertEquals(f.count(), 0)\n\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/myfood')",
"def remove_favorite(self, ticker):\n company_obj = NasdaqCompanies.objects.get(symbol=ticker)\n fav_list = self.favorites.split(',')\n fav_list.remove(str(company_obj.companyid))\n if len(fav_list) == 0:\n self.favorites = None\n else:\n self.favorites = ','.join(fav_list)\n self.save()",
"def remove_favorite(self):\n if request.method == 'POST':\n try:\n userID = get_jwt_identity()\n info = request.json\n id_post = info[\"post_id\"]\n\n favorite = User_has_Post_as_favorite.query.filter_by(user_id=userID, post_id=id_post).first()\n\n if not favorite:\n return Response(dumps({\"message\": \"IT IS NOT FAVORITE\"}), status=422, mimetype=\"application/json\")\n \n db.session.delete(favorite)\n db.session.commit()\n\n return Response(dumps({\"message\": \"SUCCESS\"}), status=200, mimetype=\"application/json\")\n\n except HTTPException as e:\n return Response(dumps({\"message\": str(e)}), status=500, mimetype=\"application/json\")\n\n return Response(dumps({\"message\": \"NOT POST\"}), status=403, mimetype=\"application/json\")",
"def delete_favourite():\n if request.method == \"POST\":\n user_id = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"_id\"]\n favourite = request.form.get(\"wine_id\")\n\n mongo.db.users.update({\"_id\": ObjectId(user_id)}, {\"$pull\":\n {'favourites': {\"wine_id\": favourite}}})\n\n flash(\"Wine has now been removed from your favourites\")\n return redirect(url_for('profile'))",
"def delete_ride():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a like for a spesific user
|
def add_like(self, liker, recipe):
like, created = Like.objects.get_or_create(liker=liker, recipe=recipe)
if created is False:
raise AlreadyExistsError("User '%s' already likes '%s'" % (liker, recipe))
recipient = User.objects.get(id=recipe.author_id)
like_created.send(sender=self, liker=liker)
like_recipe_created.send(sender=self, recipe=recipe)
notify.send(liker, actor=liker, recipient=recipient, verb='liked your recipe',target=recipe)
return like
|
[
"def add_like(obj, user):\n obj_type = ContentType.objects.get_for_model(obj)\n with atomic():\n like, is_created = Like.objects.get_or_create(\n content_type=obj_type, object_id=obj.id, user=user\n )\n\n return like",
"def test_user_add_like_message(self):\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n testuser2 = User.signup(username=\"testuser2\",\n email=\"test2@test.com\",\n password=\"testuser\",\n image_url=None)\n \n db.session.add(testuser2)\n db.session.commit()\n\n user2Message = Message(text=\"Test Test\", timestamp=None, user_id=testuser2.id)\n\n db.session.add(user2Message)\n db.session.commit()\n\n resp = c.post(f\"/users/add_like/{user2Message.id}\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(Likes.query.all()), 1)",
"def sendLike(self, like, user_fbid):\n return self.__send(user_fbid, \"\", like, None, False)",
"def like(request, content_type_id, object_id):\n\n content_type = get_object_or_404(ContentType, pk=content_type_id)\n obj = get_object_or_404(content_type.model_class(), pk=object_id)\n\n # generate a like by this user for the content object\n like = Like.objects.create(user=request.user, liked=obj)",
"def add_like(cls, user_id, melody_id):\n\n try:\n like = Like.query.filter_by(user_id=user_id,\n melody_id=melody_id,\n ).one()\n\n except NoResultFound:\n like = Like(user_id=user_id,\n melody_id=melody_id,\n )\n\n db.session.add(like)\n db.session.commit()\n print \"Added new like object to the db.\"",
"def like_article():\n # if request.method == \"POST\":\n data = request.get_json(force=True)\n current_user = get_jwt_identity()\n if data[\"action\"] == \"add\":\n database_client.push_new_like(current_user[\"user_id\"], data[\"id\"])\n if data[\"action\"] == \"delete\":\n database_client.delete_like(current_user[\"user_id\"], data[\"id\"])\n return {\"msg\": \"success\"}, 200",
"def trackLikes(mentions, item, likes=False):\n if (mentions is None or not likes or (not item.get('likes', None) and\n not item.get('comments', None))):\n return\n users = []\n likes = item.get('likes', None)\n if likes:\n users.extend([like.split(';', 1)[0] for like in likes.split('|')])\n comments = item.get('comments', None)\n if comments:\n users.extend([like.split(';', 1)[0] for like in comments.split('|')])\n if not len(users):\n return\n user = item['user_name'].lower()\n mentions[user] = mentions.get(user, {})\n for mention in users:\n name = mention.lower()\n mentions[user][name] = mentions[user].get(name, 0) + 1",
"def do_like(self, with_user_id):\n logger.info(f\">>>>>>>>>>>>>>>>>> begin liking algo <<<<<<<<<<<<<<<<<<<<<<<<\")\n # select user\n user: User = self[with_user_id]\n logger.info(f\"{user} wants to like a post\")\n\n posts_this_user_already_liked = user.my_likes\n\n # select all users which still have posts with zero likes and not of this user\n users_with_posts_with_zero_likes = [\n i for i in self if i.has_posts_with_zero_likes() and i != user\n ]\n\n if len(users_with_posts_with_zero_likes) == 0:\n logger.info(f\"{user} cannot do anything since there are no other users with posts with zero likes\")\n return\n else:\n logger.info(f\"available users with posts that have zero likes\\n{users_with_posts_with_zero_likes}\")\n # select random user\n random_user = random.choice(users_with_posts_with_zero_likes)\n logger.info(f\"{user} will like posts if {random_user}\")\n # try liking any random post from \"random user\"\n random_post = random.choice(random_user.posts)\n logger.info(f\"{user} wants to like {random_post}\")\n # if this user already liked the post start over\n if random_post.id in posts_this_user_already_liked:\n logger.warning(f\"{user} cannot like {random_post}, since he already liked it\")\n return\n\n # if all is well, like the posts\n random_user_index = self.index(random_user)\n random_post_index = random_user.posts.index(random_post)\n\n self[random_user_index].posts[random_post_index].like_my_random_post(user, self.api)\n\n self[with_user_id].my_likes.append(random_post.id)\n logger.success(f\"{user} successfully liked the post\")\n return",
"def test__Channel__get_user_like__1():\n channel_id = 202209200027\n guild_id = 202209200028\n user_name = 'Cross World'\n user_discriminator = 69\n user_display_name = 'Far East'\n \n user = User.precreate(\n 202209200029,\n name = user_name,\n discriminator = user_discriminator,\n display_name = user_display_name,\n )\n user.guild_profiles[guild_id] = GuildProfile()\n channel = Channel.precreate(channel_id, channel_type = ChannelType.guild_text, guild_id = guild_id)\n role = Role.precreate(guild_id, guild_id = guild_id, permissions = Permission().update_by_keys(view_channel = True))\n guild = Guild.precreate(guild_id)\n guild.roles[guild_id] = role\n guild.users[user.id] = user\n \n for input_value, expected_output in (\n ('hello', None),\n (user.name, user),\n (user.name[:-2], user),\n (user.full_name, user),\n (user.display_name, user),\n (user.display_name[:-2], user),\n ):\n output = channel.get_user_like(input_value)\n vampytest.assert_is(output, expected_output)",
"def like_by_users(self, usernames, amount=10, randomize=False, media=None):\n if self.aborting:\n return self\n\n total_liked_img = 0\n already_liked = 0\n inap_img = 0\n commented = 0\n followed = 0\n usernames = usernames or []\n upper_follower_limit = None\n lower_follower_limit = None\n\n for index, username in enumerate(usernames):\n self.logger.info(\n 'Username [{}/{}]'.format(index + 1, len(usernames)))\n self.logger.info('--> {}'.format(str(username.encode('utf-8'))[self.bye_b]))\n following = random.randint(0, 100) <= self.follow_percentage\n\n valid_user = validate_username(self.browser,\n username,\n self.ignore_users,\n self.blacklist,\n upper_follower_limit,\n lower_follower_limit)\n if valid_user is not True:\n self.logger.info(valid_user)\n continue\n\n try:\n links = get_links_for_username(\n self.browser,\n username,\n amount,\n self.logger,\n randomize,\n media)\n except NoSuchElementException:\n self.logger.error('Element not found, skipping this username')\n continue\n\n if (self.do_follow and\n username not in self.dont_include and\n following and\n self.follow_restrict.get(username, 0) < self.follow_times):\n followed += follow_user(self.browser,\n self.follow_restrict,\n self.username,\n username,\n self.blacklist,\n self.logger,\n self.logfolder)\n else:\n self.logger.info('--> Not following')\n sleep(1)\n\n if links is False:\n continue\n\n # Reset like counter for every username\n liked_img = 0\n jumped = 0\n\n for i, link in enumerate(links):\n # Check if target has reached\n if liked_img >= amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Total liked image reached it's \"\n \"amount given: {}\".format(liked_img))\n break\n\n elif jumped >= 1:\n self.logger.info('-------------')\n self.logger.info(\"--> Like quotient reached! Total liked images: {}\".format(liked_img))\n break\n\n self.logger.info('Post [{}/{}]'.format(liked_img + 1, amount))\n self.logger.info(link)\n\n try:\n inappropriate, user_name, is_video, reason = (\n check_link(self.browser,\n link,\n self.dont_like,\n self.ignore_if_contains,\n self.ignore_users,\n self.username,\n upper_follower_limit,\n lower_follower_limit,\n self.logger,\n self.bye_b)\n )\n\n if not inappropriate:\n liked = like_image(self.browser,\n user_name,\n self.blacklist,\n self.logger,\n self.logfolder)\n\n if liked == True:\n total_liked_img += 1\n liked_img += 1\n checked_img = True\n temp_comments = []\n commenting = random.randint(\n 0, 100) <= self.comment_percentage\n\n if self.use_clarifai and (following or commenting):\n try:\n checked_img, temp_comments = (\n check_image(self.browser,\n self.clarifai_api_key,\n self.clarifai_img_tags,\n self.logger,\n self.clarifai_full_match)\n )\n except Exception as err:\n self.logger.error(\n 'Image check error: {}'.format(err))\n if (self.do_comment and\n user_name not in self.dont_include and\n checked_img and\n commenting):\n\n if temp_comments:\n # use clarifai related comments only!\n comments = temp_comments\n elif is_video:\n comments = (self.comments +\n self.video_comments)\n else:\n comments = (self.comments +\n self.photo_comments)\n commented += comment_image(self.browser,\n user_name,\n comments,\n self.blacklist,\n self.logger,\n self.logfolder,\n self.bye_b)\n else:\n self.logger.info('--> Not commented')\n sleep(1)\n\n elif liked == False:\n already_liked += 1\n elif liked == 'jumped':\n jumped += 1\n\n else:\n self.logger.info(\n '--> Image not liked: {}'.format(str(reason.encode('utf-8'))[self.bye_b]))\n inap_img += 1\n except NoSuchElementException as err:\n self.logger.error('Invalid Page: {}'.format(err))\n\n if liked_img < amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Given amount not fullfilled, \"\n \"image pool reached its end\\n\")\n\n self.logger.info('Liked: {}'.format(total_liked_img))\n self.logger.info('Already Liked: {}'.format(already_liked))\n self.logger.info('Inappropriate: {}'.format(inap_img))\n self.logger.info('Commented: {}'.format(commented))\n\n self.liked_img += liked_img\n self.already_liked += already_liked\n self.inap_img += inap_img\n self.commented += commented\n\n return self",
"def test__Channel__get_user_like__0():\n channel_id = 202209200025\n user_name = 'Cross World'\n user_discriminator = 69\n user_display_name = 'Far East'\n \n user = User.precreate(\n 202209200026,\n name = user_name,\n discriminator = user_discriminator,\n display_name = user_display_name,\n )\n channel = Channel.precreate(channel_id, channel_type = ChannelType.private, users = [user])\n \n for input_value, expected_output in (\n ('hello', None),\n (user.name, user),\n (user.name[:-2], user),\n (user.full_name, user),\n (user.display_name, user),\n (user.display_name[:-2], user),\n ):\n output = channel.get_user_like(input_value)\n vampytest.assert_is(output, expected_output)",
"def like():\n post = mongo.db.Posts\n user = mongo.db.Users\n _id = request.json['_id']\n uuid = request.json['uuid']\n post.update({'_id': ObjectId(_id)}, {\"$addToSet\": {'likes': uuid}}, True)\n user.update({'uuid': uuid}, {\"$addToSet\": {'likes': _id}}, True)\n\n return jsonify({'result': 'like it!'})",
"def interact_by_users(self,\n usernames,\n amount=10,\n randomize=False,\n media=None):\n if self.aborting:\n return self\n\n total_liked_img = 0\n already_liked = 0\n inap_img = 0\n commented = 0\n followed = 0\n upper_follower_limit = None\n lower_follower_limit = None\n\n usernames = usernames or []\n\n for index, username in enumerate(usernames):\n self.logger.info(\n 'Username [{}/{}]'.format(index + 1, len(usernames)))\n self.logger.info('--> {}'.format(str(username.encode('utf-8'))[self.bye_b]))\n\n try:\n links = get_links_for_username(self.browser,\n username,\n amount,\n self.logger,\n randomize,\n media)\n except NoSuchElementException:\n self.logger.error('Element not found, skipping this username')\n continue\n\n if links is False:\n continue\n\n # Reset like counter for every username\n liked_img = 0\n jumped = 0 #if it is not alowed to like due to like quota, but at least, allow one interaction in case of follow\n\n for i, link in enumerate(links):\n # Check if target has reached\n if liked_img >= amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Total liked image reached it's \"\n \"amount given: {}\".format(liked_img))\n break\n\n elif jumped >= 1:\n self.logger.info('-------------')\n self.logger.info(\"--> Like quotient reached! Total liked images: {}\".format(liked_img))\n break\n\n self.logger.info('Post [{}/{}]'.format(liked_img + 1, amount))\n self.logger.info(link)\n\n try:\n inappropriate, user_name, is_video, reason = (\n check_link(self.browser,\n link,\n self.dont_like,\n self.ignore_if_contains,\n self.ignore_users,\n self.username,\n upper_follower_limit,\n lower_follower_limit,\n self.logger,\n self.bye_b)\n )\n\n if not inappropriate:\n\n following = (\n random.randint(0, 100) <= self.follow_percentage)\n if (self.do_follow and\n username not in self.dont_include and\n following and\n self.follow_restrict.get(\n username, 0) < self.follow_times):\n\n followed += follow_user(\n self.browser,\n self.follow_restrict,\n self.username,\n username,\n self.blacklist,\n self.logger,\n self.logfolder)\n else:\n self.logger.info('--> Not following')\n sleep(1)\n\n liking = random.randint(0, 100) <= self.like_percentage\n if self.do_like and liking:\n liked = like_image(self.browser,\n user_name,\n self.blacklist,\n self.logger,\n self.logfolder)\n else:\n liked = True\n\n if liked == True:\n total_liked_img += 1\n liked_img += 1\n checked_img = True\n temp_comments = []\n commenting = random.randint(\n 0, 100) <= self.comment_percentage\n\n if self.use_clarifai and (following or commenting):\n try:\n checked_img, temp_comments = (\n check_image(self.browser,\n self.clarifai_api_key,\n self.clarifai_img_tags,\n self.logger,\n self.clarifai_full_match)\n )\n except Exception as err:\n self.logger.error(\n 'Image check error: {}'.format(err))\n if (self.do_comment and\n user_name not in self.dont_include and\n checked_img and\n commenting):\n\n if temp_comments:\n # use clarifai related comments only!\n comments = temp_comments\n elif is_video:\n comments = (self.comments +\n self.video_comments)\n else:\n comments = (self.comments +\n self.photo_comments)\n commented += comment_image(self.browser,\n user_name,\n comments,\n self.blacklist,\n self.logger,\n self.logfolder,\n self.bye_b)\n else:\n self.logger.info('--> Not commented')\n sleep(1)\n elif liked == False:\n already_liked += 1\n elif liked == 'jumped':\n jumped += 1\n\n else:\n self.logger.info(\n '--> Image not liked: {}'.format(str(reason.encode('utf-8'))[self.bye_b]))\n inap_img += 1\n except NoSuchElementException as err:\n self.logger.info('Invalid Page: {}'.format(err))\n\n if liked_img < amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Given amount not fullfilled, image pool \"\n \"reached its end\\n\")\n\n self.logger.info('Liked: {}'.format(total_liked_img))\n self.logger.info('Already Liked: {}'.format(already_liked))\n self.logger.info('Inappropriate: {}'.format(inap_img))\n self.logger.info('Commented: {}'.format(commented))\n\n self.liked_img += liked_img\n self.already_liked += already_liked\n self.inap_img += inap_img\n self.commented += commented\n\n return self",
"def test_message_likes(self):\r\n m = Message(\r\n text='message',\r\n user_id=self.u.id\r\n )\r\n m.id = 1\r\n m2 = Message(\r\n text='message',\r\n user_id=self.u.id\r\n )\r\n m2.id = 2\r\n db.session.add(m, m2)\r\n db.session.commit()\r\n like = Likes(user_id=1, message_id=1)\r\n db.session.add(like)\r\n db.session.commit()\r\n # Test User liking messages works\r\n self.assertEqual(like.user_id, m.id)\r\n # Test message not liked are not shown\r\n self.assertNotEqual(like.user_id, m2.id)",
"def test_message_likes(self):\n\n m_to_like = Message(\n text=\"test message 1\",\n user_id=self.uid\n )\n\n unliked_message = Message(\n text=\"message not liked\",\n user_id=self.uid\n )\n\n user = User.signup('likestestuser', 'likes@likes.com', 'password', None)\n uid = 999\n user.id = uid\n db.session.add_all([m_to_like, unliked_message, user])\n db.session.commit()\n\n # Add user likes message m\n user.likes.append(m_to_like)\n\n db.session.commit()\n\n likes = Likes.query.filter(Likes.user_id == uid).all()\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].message_id, m_to_like.id)",
"def toggle_like(user_id, message_id):\n # liked_by = User.query.get(user_id)\n current_message = Message.query.get(message_id)\n # get likers (ids of users in message.liked_by)\n likers = list(current_message.liked_by)\n\n if current_user in likers:\n # else add them to likers\n current_message.liked_by.remove(current_user)\n db.session.add(current_message)\n db.session.commit()\n else:\n current_message.liked_by.append(current_user)\n db.session.add(current_message)\n db.session.commit()\n return redirect(\n url_for('messages_show', message_id=message_id, user_id=user_id))",
"def test_user_can_like(self):\n\n post_to_like = {\n 'post': self.post.id\n }\n\n response = self.client.post(\n self.url,\n data=post_to_like,\n format='json',\n HTTP_AUTHORIZATION=f'Bearer {self.auth_token}'\n )\n\n serialized_post = serializers.PostModelSerializer(\n Post.objects.first(),\n context={'request': self.request}\n )\n self.assertEqual(1, serialized_post.data['likes'])\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def toggle_like(request):\n if request.method != \"PUT\":\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\n\n # Access currently logged-in user and the target post\n user = request.user\n data = json.loads(request.body)\n post = Post.objects.get(id=data.get(\"post\", \"\"))\n\n # If the user already likes this post, unlike. Otherwise, like.\n if Post.objects.filter(pk=post.pk, likes__pk=user.pk):\n post.likes.remove(user)\n return JsonResponse({\"message\": \"Unliked\"}, status=201)\n post.likes.add(user)\n return JsonResponse({\"message\": \"Liked\"}, status=201)",
"def show_likes(user_id):\n\n user = User.query.get_or_404(user_id)\n messages = user.likes\n\n return render_template('users/show.html', user=user, messages=messages)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Did user rate the recipe? Smartly uses caches if exists
|
def rated(self, rater, recipe):
try:
Rating.objects.get(rater=rater, recipe=recipe)
return True
except Like.DoesNotExist:
return False
|
[
"def rateThisMethod(request):\n try:\n profileId = ndb.Key(urlsafe=getattr(request, 'profileId'))\n except Exception:\n print \"Invalid profileId\"\n return Response(response=1, description=\"Invalid profileId\")\n try:\n noteBookId = ndb.Key(urlsafe=getattr(request, 'noteBookId'))\n noteBook = noteBookId.get()\n if noteBook is None:\n raise Exception(\"Invalid noteBookId\")\n except Exception, E:\n print str(E)\n return Response(response=1, description=str(E))\n rating = getattr(request, 'rating')\n if profileId in noteBook.ratedUserIds:\n idx = noteBook.ratedUserIds.index(profileId)\n del(noteBook.ratingList[idx])\n noteBook.ratedUserIds.remove(profileId)\n noteBook.ratedUserIds.append(profileId)\n noteBook.ratingList.append(rating)\n\n # setting the totalRating to avg\n noteBook.totalRating = str(sum(noteBook.ratingList) / len(noteBook.ratingList))\n\n # updating the memccache\n cacheVal = memcache.get(noteBookId.urlsafe())\n if cacheVal is not None:\n cacheVal[9] = noteBook.ratedUserIds\n cacheVal[10] = noteBook.ratingList\n cacheVal[5] = noteBook.totalRating\n memcache.set(noteBookId.urlsafe(), cacheVal)\n noteBook.put()\n\n # creating notification\n notifText = 'Someone rated your notebook!!! Suspense!'\n createNotification([noteBook.uploaderId], 'Campus Connect', notifText,\n 'rated', noteBookId.urlsafe(), noteBook.courseId.urlsafe())\n\n return Response(response=0, description=\"OK\")",
"def use(self):\n\t\tself.last_used = time()",
"def discount_available():",
"def update_cooling_demand(self, action: float):\n\n raise NotImplementedError",
"def setMashEfficiency(self,username,recipeName,efficiency,doRecalculate=\"1\"):\n\t\tsys.stderr.write(\"\\nSTART: setMashEfficiency() -> %s/%s\\n\" %(recipeName,efficiency))\n\t\tstatus=0\n\t\ttry:\n\n\t\t\tsys.stderr.write(\"updated efficiency to %s\\n\" %(efficiency))\n\t\t\tourRecipe = self.dbWrapper.GqlQuery(\"SELECT * FROM gRecipes WHERE owner = :1 AND recipename = :2\", username,recipeName)\n\t\t\tfor recipe in ourRecipe.fetch(500):\n\t\t\t\trecipe.mash_efficiency=float(efficiency)\n\t\t\t\tif doRecalculate == \"0\":\trecipe.calculationOutstanding=True\n\t\t\t\trecipe.put()\n\n\t\t\tif doRecalculate == \"1\":\n\t\t\t\tself.calculateRecipe(username,recipeName)\n\t\t\t\tself.compile(username,recipeName,None)\n\n\t\t\tstatus=1\n\t\t\tresult={}\n\t\t\tresult['stats']={}\n\t\t\tresult['stats']['mash_efficiency']=recipe.mash_efficiency\n\t\t\tsys.stderr.write(\"END: setMashEfficiency() -> %s/%s\\n\" %(recipeName,efficiency))\n\t\t\treturn {'operation':'setMashEfficiency','status':status,'json':json.dumps(result) }\n\t\texcept ImportError:\n\t\t\tsys.stderr.write(\"EXCEPTION: setMashEfficiency() -> %s/%s\\n\" %(recipeName,efficiency))\n\t\t\tsys.stderr.write(\"setMashEfficiency() Exception\\n\")\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\tfor e in traceback.format_tb(exc_traceback):\tsys.stderr.write(\"\\t%s\\n\" %(e))\n\t\t\tpass\n\t\t\n\t\treturn {'operation':'setMashEfficiency','status':status}",
"def rate_recs(recommendations):\n interest = []\n print('If interested in visiting enter 1, else 0')\n for rec in recommendations:\n interest.append(int(input(str(rec[0]) + ': ')))\n satisfaction = {'satisfaction_score': sum(interest) / 5}\n user_satisfaction.insert_one(satisfaction)",
"def _rating(kind):\n result = []\n cuser = User.current()\n cuid = None if cuser is None else cuser.id()\n\n # Generate a list of challenges issued by this user\n challenges = set()\n if cuid:\n challenges.update([ch[0] # Identifier of challenged user\n for ch in iter(ChallengeModel.list_issued(cuid, max_len = 20))])\n\n rating = memcache.get(kind, namespace=\"rating\")\n if rating is None:\n # Not found: do a query\n rating = list(RatingModel.list_rating(kind))\n # Store the result in the cache with a lifetime of 1 hour\n memcache.set(kind, rating, time=1 * 60 * 60, namespace=\"rating\")\n\n for ru in rating:\n\n uid = ru[\"userid\"]\n if not uid:\n # Hit the end of the list\n break\n is_robot = False\n usr = None\n inactive = False\n if uid.startswith(u\"robot-\"):\n is_robot = True\n nick = Game.autoplayer_name(int(uid[6:]))\n fullname = nick\n chall = False\n fairplay = False\n else:\n usr = User.load(uid)\n if usr is None:\n # Something wrong with this one: don't bother\n continue\n nick = usr.nickname()\n if not User.is_valid_nick(nick):\n nick = u\"--\"\n fullname = usr.full_name()\n chall = uid in challenges\n fairplay = usr.fairplay()\n inactive = usr.is_inactive()\n\n games = ru[\"games\"]\n if games == 0:\n ratio = 0\n avgpts = 0\n else:\n ratio = int(round(100.0 * float(ru[\"wins\"]) / games))\n avgpts = int(round(float(ru[\"score\"]) / games))\n\n result.append({\n \"rank\": ru[\"rank\"],\n \"rank_yesterday\": ru[\"rank_yesterday\"],\n \"rank_week_ago\": ru[\"rank_week_ago\"],\n \"rank_month_ago\": ru[\"rank_month_ago\"],\n\n \"userid\": uid,\n \"nick\": nick,\n \"fullname\": fullname,\n \"chall\": chall,\n \"fairplay\": fairplay,\n \"inactive\": inactive,\n\n \"elo\": ru[\"elo\"],\n \"elo_yesterday\": ru[\"elo_yesterday\"],\n \"elo_week_ago\": ru[\"elo_week_ago\"],\n \"elo_month_ago\": ru[\"elo_month_ago\"],\n\n \"games\": games,\n \"games_yesterday\": ru[\"games_yesterday\"],\n \"games_week_ago\": ru[\"games_week_ago\"],\n \"games_month_ago\": ru[\"games_month_ago\"],\n\n \"ratio\": ratio,\n \"avgpts\": avgpts\n })\n\n return result",
"def consumptionRate(self, recipe: Recipe, itemName: str,\n count: int=1) -> float:\n ingredient = recipe.getInputByName(itemName)\n return count * ingredient.count / self.craft(recipe)['duration']",
"def calculate_hitrate_theo(self, distribution='absolute order', params=None):\n self.req_count += 50\n if distribution == 'absolute order':\n if not self.surface_level_freq:\n miss = 0\n else:\n miss = len([k for k, v in self.period_freq_dict.items() if v == self.surface_level_freq])\n self.history_hit_rate = 0.5 * (self.history_hit_rate + (1 - miss / 50)) # serious bug\n return self.history_hit_rate\n elif distribution == 'sim':\n if not self.surface_level_freq:\n miss = 0 # should be the prob that the requested item is in cache * 50\n else:\n # miss = len([k for k, v in self.period_freq_dict.items() if v <= self.surface_level_freq])\n losers = [k for k, v in self.period_freq_dict.items() if v <= self.surface_level_freq]\n miss = sum([int(i in losers) for i in period_trace])\n self.history_miss += miss\n self.history_hit_rate = 1 - self.history_miss / self.req_count\n return self.history_hit_rate\n elif distribution == 'exp':\n pass\n elif distribution == 'zipf':\n pass",
"def _rate_of_spread(self):\n pass",
"def viewRecipe(self,username,recipeName,category,dontRecompile=1):\n\n\n\t\tsys.stderr.write(\"viewRecipe-> %s/%s....\\n\" %(recipeName,category))\n\t\tstatus = 0\n\n\t\tourRecipe = self.dbWrapper.GqlQuery(\"SELECT * FROM gRecipes WHERE owner = :1 AND recipename = :2\", username,recipeName)\n\t\trecipe=ourRecipe.fetch(2000)[0]\n\n\n\t\t# if we don't have a recipe stats yet we must recalculate\n\t\tsys.stderr.write(\"viewRecipe\\n ------ we have a process of %s\\n\" %(recipe.process))\n\t\tourRecipeStats = self.dbWrapper.GqlQuery(\"SELECT * FROM gRecipeStats WHERE owner = :1 AND recipe = :2 AND process = :3 AND brewlog = :4\",username,recipeName, recipe.process,\"\")\n\t\tif len(ourRecipeStats.fetch(1)) == 0:\n\t\t\tsys.stderr.write(\"MUST RECOMPILE NOW!\")\n\t\t\tself.calculateRecipe(username,recipeName)\n\t\t\tself.compile(username,recipeName,None)\n#\t\tstatus = 1\n#\t\tresult={}\n#\t\treturn {'operation' : 'viewRecipe', 'status' : status ,'json':json.dumps( result ) }\n\t\n\n\t\t# think we need to be recompiling... although do we really need to.\n\t\tif not dontRecompile:\n\t\t\ttmp = self.calculateRecipe(username,recipeName)\n\t\t\tself.compile(username,recipeName,None)\n\n\n\t\ttry:\n\t\t\tresult={}\n\t\t\tresult['stats']={}\n\n\n\n\n\n\t\t\tHOPMAP={}\n\t\t\tFERMMAP={}\t\t\n\t\t\t# this is in viewRecipe()\n\t\t\tourContributions = self.dbWrapper.GqlQuery(\"SELECT * FROM gContributions WHERE owner = :1 AND recipeName = :2 AND srm < :3\", username,recipeName,1.00)\n\t\t\tfor contribution in ourContributions.fetch(4000):\n\t\t\t\tif contribution.ingredientType==\"hops\":\n\t\t\t\t\tHOPMAP[ (contribution.ingredient,contribution.hopAddAt) ] = contribution\n\t\t\t\tif contribution.ingredientType==\"fermentables\":\n\t\t\t\t\tFERMMAP[ contribution.ingredient]=contribution \n\n\n\n\n\t\t\t\n\t\t\t\t# this will serve as what the recipe wants us to have\n\t\t\t\t# in the future we should have a \"adopt these wroking values as the real values\"\n\n\t\t\tresult['stats']['calculationOutstanding']=recipe.calculationOutstanding\n\t\t\tresult['stats']['estimated_abv'] = recipe.estimated_abv\t\n\t\t\tresult['stats']['estimated_ebc'] = recipe.estimated_ebc\n\t\t\tresult['stats']['estimated_fg'] = recipe.estimated_fg\t\n\t\t\tresult['stats']['estimated_og'] = recipe.estimated_og\t\n\t\t\tresult['stats']['estimated_ibu'] = recipe.estimated_ibu\n\t\t\tresult['stats']['postBoilTopup'] = recipe.postBoilTopup\n\t\t\tresult['stats']['process']=recipe.process\n\t\t\tresult['stats']['mash_efficiency']=recipe.mash_efficiency\n\t\t\tresult['stats']['batch_size_required']=recipe.batch_size_required\n\n\t\t\tsys.stderr.write(\"viewRecipe\\n ------ we have a process of %s\\n\" %(recipe.process))\n\t\t\tourRecipeStats = self.dbWrapper.GqlQuery(\"SELECT * FROM gRecipeStats WHERE owner = :1 AND recipe = :2 AND process = :3 AND brewlog = :4\",username,recipeName, recipe.process,\"\")\n\n\t\t\ttry:\n\t\t\t\tstats=ourRecipeStats.fetch(2000)[0]\n\t\t\t\t\t# these are our current wroking values\n\t\t\t\tresult['stats']['this_estimated_abv'] = stats.estimated_abv\t\n\t#\t\t\tresult['stats']['this_estimated_ebc'] = stats.estimated_ebc\n\t\t\t\tresult['stats']['this_estimated_fg'] = stats.estimated_fg\t\n\t\t\t\tresult['stats']['this_estimated_og'] = stats.estimated_og\t\n\t\t\t\tresult['stats']['this_estimated_ibu'] = stats.estimated_ibu\n\t\t\t\tresult['stats']['spargeWater'] = stats.sparge_water\n\t\t\t\tresult['stats']['mashWater'] = stats.mash_liquid\n\t\t\t\tresult['stats']['boilVolume'] = stats.boil_vol\n\t\t\t\tresult['stats']['totalWater'] = stats.total_water\n\t\t\t\tresult['stats']['totalGrain'] = stats.grain_weight\n\t\t\t\tresult['stats']['totalAdjuncts']=stats.nongrain_weight\n\t\t\t\tresult['stats']['totalHops']=stats.hops_weight\n\t\t\t\tresult['stats']['this_batch_size'] = stats.batchsize\n\t\t\texcept ImportError:\n\t\t\t\tresult['stats']['this_estimated_abv'] = 0\t\n\t\t\t\tresult['stats']['this_estimated_fg'] = 0 \n\t\t\t\tresult['stats']['this_estimated_og'] = 0\n\t\t\t\tresult['stats']['this_estimated_ibu'] = 0\n\t\t\t\tresult['stats']['spargeWater'] = 0\n\t\t\t\tresult['stats']['mashWater'] = 0 \n\t\t\t\tresult['stats']['boilVolume'] = 0\n\t\t\t\tresult['stats']['totalWater'] = 0 \n\t\t\t\tresult['stats']['totalGrain'] = 0\n\t\t\t\tresult['stats']['totalAdjuncts']= 0\n\t\t\t\tresult['stats']['this_batch_size'] = 0\n\n\t\t\ttmp={}\n\t\t\tresult['category'] = category\n\t\t\tresult['fermentableitems'] = []\n\t\t\tourItems = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND processIngredient = :4 ORDER BY ingredient\",username,recipeName,\"fermentables\",0)\n\t\t\titems=ourItems.fetch(20000)\n\t\t\tfor item in items:\n\t\t\t\tresult['fermentableitems'].append({})\n\t\t\t\tresult['fermentableitems'][-1]['name']=item.ingredient\n\t\t\t\tresult['fermentableitems'][-1]['qty']=\"%.2f\" %(item.qty)\n\t\t\t\tresult['fermentableitems'][-1]['originalqty']=\"%.2f\" %(item.originalqty)\n\t\t\t\tresult['fermentableitems'][-1]['unit']=item.unit\n\n\t\t\t\tif FERMMAP.has_key( item.ingredient ):\n\t\t\t\t\tresult['fermentableitems'][-1]['gravity'] = \"%.3f\" %(1+((FERMMAP[item.ingredient].gravity/1000)))\n\t\t\t\telse:\t\n\t\t\t\t\tresult['fermentableitems'][-1]['gravity'] = \"?\"\n\n\t\t\t\ttmp[item.ingredient]=\"\"\n\n\n\t\t\tresult['hopitems'] = []\n\t\t\t# it should be safe to only look at hopAddAt of >0\n\t\t\tourItems = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND processIngredient = :4 ORDER BY ingredient\",username,recipeName,\"hops\",0)\n\t\t\titems=ourItems.fetch(20000)\n\t\t\tfor item in items:\n\n\t\t\t\tif item.hopAddAt > 0:\n\t\t\t\t\tresult['hopitems'].append({})\n\t\t\t\t\tresult['hopitems'][-1]['name']=item.ingredient\n\t\t\t\t\tresult['hopitems'][-1]['hopaddat'] = item.hopAddAt\n\t\t\t\t\tresult['hopitems'][-1]['qty']=\"%.2f\" %(item.qty)\n\t\t\t\t\tresult['hopitems'][-1]['originalqty']=\"%.2f\" %(item.originalqty)\n\t\t\t\t\tresult['hopitems'][-1]['unit']=item.unit\n\n\t\t\t\t\tif HOPMAP.has_key( (item.ingredient,item.hopAddAt) ):\n\t\t\t\t\t\tresult['hopitems'][-1]['ibu'] = \"%.1f IBU\" %(HOPMAP[ (item.ingredient,item.hopAddAt)].ibu)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult['hopitems'][-1]['ibu'] = \"? IBU\"\n\t\t\t\t\ttmp[item.ingredient]=\"\"\n\n\n\n\t\t\tresult['yeastitems'] = []\n\t\t\tourItems = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND processIngredient = :4 ORDER BY ingredient\",username,recipeName,\"yeast\",0)\n\t\t\titems=ourItems.fetch(20000)\n\t\t\tfor item in items:\n\t\t\t\tresult['yeastitems'].append({})\n\t\t\t\tresult['yeastitems'][-1]['name']=item.ingredient\n\t\t\t\tresult['yeastitems'][-1]['qty']= \"%.2f\" %(item.qty)\n\t\t\t\tresult['yeastitems'][-1]['originalqty']= \"%.2f\" %(item.originalqty)\n\t\t\t\tresult['yeastitems'][-1]['unit']=item.unit\n\t\t\t\ttmp[item.ingredient]=\"\"\n\n\n\n\t\t\tresult['otheritems'] = []\n\t\t\tourItems = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND processIngredient = :4 ORDER BY ingredient\",username,recipeName,\"misc\",0)\n\t\t\titems=ourItems.fetch(20000)\n\t\t\tfor item in items:\n\t\t\t\tresult['otheritems'].append({})\n\t\t\t\tresult['otheritems'][-1]['name']=item.ingredient\n\t\t\t\tresult['otheritems'][-1]['qty']=\"%.2f\" %(item.qty)\n\t\t\t\tresult['otheritems'][-1]['originalqty']=\"%.2f\" %(item.originalqty)\n\t\t\t\tresult['otheritems'][-1]['unit']=item.unit\n\t\t\t\ttmp[item.ingredient]=\"\"\n\n\n\t\t\tresult['miscitems'] = []\n\t\t\tourItems = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND processIngredient = :4 ORDER BY ingredient\",username,recipeName,\"consumable\",0)\n\t\t\titems=ourItems.fetch(20000)\n\t\t\tfor item in items:\n\t\t\t\tresult['miscitems'].append({})\n\t\t\t\tresult['miscitems'][-1]['name']=item.ingredient\n\t\t\t\tresult['miscitems'][-1]['qty']=\"%.2f\" %(item.qty)\n\t\t\t\tresult['miscitems'][-1]['originalqty']=\"%.2f\" %(item.originalqty)\n\t\t\t\tresult['miscitems'][-1]['unit']=item.unit\n\t\t\t\ttmp[item.ingredient]=\"\"\n\n\n\t\t\tresult['ingredients'] = []\n\n\t\t\tif category==\"Consumables\":\tcategory=\"consumable\"\n\t\t\tif category==\"Other\":\tcategory=\"misc\"\n\t\t\tourIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gItems WHERE owner = :1 AND majorcategory = :2 ORDER BY name\",username,category.lower())\n#\t\t\tsys.stderr.write(\"MAJOR CATEGOR %s \\n\" %(category.lower()))\n\n\t\t\tingredients=ourIngredients.fetch(20000)\n\t\t\tfor ingredient in ingredients:\n\t\t\t\tif not tmp.has_key(ingredient.name) or category == \"Hops\":\n\t\t\t\t\tresult['ingredients'].append({})\n\t\t\t\t\tresult['ingredients'][-1]['name']=ingredient.name\n\n\n#:w\n#\t\t\tsys.stderr.write(result)\n\t\t\tstatus = 1\n\t\t\treturn {'operation' : 'viewRecipe', 'status' : status ,'json':json.dumps( result ) }\n\n\t\t\n\t\texcept ImportError:\n\t\t\tsys.stderr.write(\"EXCEPTION in viewRecipe\\n\")\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\tfor e in traceback.format_tb(exc_traceback):\tsys.stderr.write(\"\\t%s\" %( e))\n\t\treturn {'operation' : 'viewRecipe', 'status' : status }",
"def _update_stats_of_resource(self, resource, language, user):\r\n invalidate_stats_cache(resource, language, user=user)",
"def dynCache():\n pass",
"def reward(self):\n\n return self.r_time()",
"def _getWorthless(self):\r\n n = 0\r\n success = True\r\n while n == 0 and success:\r\n if self._numChewingGum() == 0:\r\n success &= self._doRequest(StoreRequest, StoreRequest.MARKET, \r\n 23)\r\n if success:\r\n self._meat -= 50\r\n success &= self._doRequest(UseItemRequest, 23)\r\n n = self._numWorthless()\r\n return n",
"def review(self, rated, recommended):",
"def refresh_cache_if_needed(self) -> None:\n latest_swatch = Swatch.objects.exclude(published=False).latest(\"date_added\")\n if latest_swatch.date_added > self.last_cache_update:\n library = Swatch.objects.exclude(published=False)\n self.update_all_color_matches(library)\n self.last_cache_update = timezone.now()\n self.save()",
"def _fill_ratelimit_cache(self) -> dict:\n response = self.get(self.build_url('rate_limit'))\n if response.status_code == 200 and response.content:\n json = response.json()\n if 'resources' in json:\n self._ratelimit_cache = json['resources']\n else:\n __log__.critical('Cannot fill ratelimit cache')",
"def reward(self):\n return self._r_sum"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A view that renders the cart contents page
|
def view_cart(request):
return render(request, 'cart/cart.html')
|
[
"def cart():\r\n cart_objects = current_user.cart_objects\r\n return render_template(\"cart.html\" , cart_objects = cart_objects)",
"def cart(request):\n\n return {'cart': Cart(request)}",
"def show_cart():\n session = connect()\n try:\n user_id = current_user.id\n address = get_address(current_user.address_id)\n except AttributeError:\n return \"Error getting user data\"\n items = session.query(CartView).filter_by(user_id=user_id).all()\n # Calculate totals\n subtotal = 0.0\n for item in items:\n subtotal += float(item.price) * item.quantity\n if subtotal > 0:\n fee = DELIVERY_FEE\n else:\n fee = 0\n tax = (subtotal + fee) * 0.07\n total = subtotal + fee + tax\n subtotal = \"{0:.2f}\".format(subtotal)\n fee = \"{0:.2f}\".format(fee)\n tax = \"{0:.2f}\".format(tax)\n total = \"{0:.2f}\".format(total)\n if address is None:\n delivery_time = 'Please enter an address to '\n delivery_time += 'calculate estimated delivery time.'\n address_string = 'No address on file.'\n else:\n delivery_time = 'Your estimated delivery time is currently '\n delivery_time += '{0:.0f}'.format(get_delivery_time()/60) + ' minutes.'\n address_string = get_address_string(address)\n return render_template('cart.html', items=items, subtotal=subtotal,\n fee=fee, tax=tax, total=total, user=current_user,\n address_string=address_string, delivery_time=delivery_time,\n edit_address=False, title=\"Checkout\")",
"def get(self):\r\n products = self.request.get_all(\"product\")\r\n self.render(\"shoppingcart.html\", products = products)",
"def basket_view(request):\n\n template = 'basket/basket.html'\n return render(request, template)",
"def view_shopping_bag(request):\n return render(request, 'shopping_bag/shopping_bag.html')",
"def cart_summary(request):\n template = \"_cart_summary.html\"\n context = {\n 'cart_item_count': cartutils.cart_distinct_item_count(request)\n }\n return render_to_response(template, context, context_instance=RequestContext(request))",
"def show_cart(request):\n bound_form = None\n bound_form_id = None\n checkout_errors = None\n\n if request.method == \"POST\":\n postdata = request.POST.copy()\n if 'Remove' in postdata:\n cartutils.remove_from_cart(request)\n if 'Update' in postdata:\n bound_form, bound_form_id = _process_update_cart_form(request, postdata)\n if 'Checkout' in postdata:\n # do a stock check before allowing the user to start the checkout process\n checkout_errors = _get_cart_errors(request)\n if not checkout_errors:\n checkout_url = reverse('checkout')\n return HttpResponseRedirect(checkout_url)\n\n try:\n cart_items = cartutils.get_cart_items(request)\n\n if checkout_errors == None:\n checkout_errors = _get_cart_errors(request)\n\n for cart_item in cart_items:\n if bound_form_id == cart_item.id:\n form = bound_form\n else:\n # create an unbound form\n form = UpdateCartItemForm(request)\n form.fields['item_id'].widget.attrs['value'] = cart_item.id\n setattr(cart_item, 'update_form', form)\n setattr(cart_item, 'wishlists', wishutils.get_wishlists_for_item(cart_item))\n\n cart_subtotal = cartutils.cart_subtotal(request)\n continue_shopping_url = get_continue_shopping_url(request)\n\n context = {\n 'cart_subtotal': cart_subtotal,\n 'continue_shopping_url': continue_shopping_url,\n 'cart_items': cart_items,\n 'checkout_errors': checkout_errors,\n 'wishlists': wishutils.get_wishlists(request),\n }\n\n return render_to_response('cart.html', context, context_instance=RequestContext(request))\n except ProductInstance.DoesNotExist:\n # something funny is happened - the user has an item in his cart that for some reason no longer exists in our\n # system. This should never happen, but I guess you never know.\n logger.exception(\"Unknown product instance in cart\")\n cartutils.clear_cart(request)\n return HttpResponseRedirect(reverse('show_cart'))",
"def catalog_page():\n categories = db_helper.get_categories()\n lastest_items_view = db_helper.get_lastest_items_view(10)\n return render_template('catalog.html', items_view=lastest_items_view, \n categories=categories, category_name = None)",
"def get_cart_contents():\n session = request.environ.get('beaker.session')\n return session.get('cart', [])",
"def checkout():\r\n furnitures = current_user.cart_objects\r\n amount= 100*sum(furniture.price for furniture in furnitures) # Amount in 1/10 kr\r\n return render_template('checkout.html', \r\n key = stripe_keys[\"publishable_key\"], \r\n amount = amount , cart_objects = furnitures)",
"def shop():\n return render_template('shop/shop.html')",
"def lfs_cart_portlet(context, title=None):\n if title is None:\n title = _(u\"Cart\")\n\n portlet = CartPortlet()\n portlet.title = title\n\n return {\n \"html\": portlet.render(context)\n }",
"def cart_articles():",
"def checkout_view(request):\n if request.method == \"POST\":\n cart_operations(request)\n context = before_checkout_context(request)\n if context.get(\"item_count\") == 0:\n return redirect('/before_checkout')\n return render(request, 'front/checkout.html', context)",
"def new_basket():\n return render_template('new_basket.html')",
"def products(request):\n products = Product.objects.all()\n return render(request, 'purchasing/products.html', {'products': products})",
"def order():\n return render_template('order.html', purchases={})",
"def shop(request):\n\n # 'user_products' key [context dictionary]\n user_products = []\n try:\n for CART_PRODUCT in request.session['CART']:\n user_products.append(CART_PRODUCT)\n except KeyError:\n request.session['CART'] = []\n\n # 'user' key [context dictionary]\n user = User.objects.get(pk=request.user.id) if request.user.is_authenticated else None\n \n # context dictionary\n context = { 'page' : 'shop', 'products' : Product.objects.all(), 'user_products' : user_products, 'user': user }\n\n # returning http request --> shop.html page\n return render(request, 'shop/shop.html', context)",
"def product(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/product.html',\n {\n 'title':'product Page',\n 'year':datetime.now().year,\n }\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finalises the compressed version of the spreadsheet. If you aren't using the context manager ('with' statement, you must call this manually, it is not triggered automatically like on a file object.
|
def close(self):
if self.default_sheet is not None:
self.default_sheet.finalizeFormat(1)
sheet_index = 2
for finalSheet in self.sheets:
finalSheet.finalizeFormat(sheet_index)
sheet_index += 1
xmlContent = self.dom.toprettyxml(encoding="utf-8")
open("TestXML.xml", "wb").write(xmlContent)
self.zipf.writestr("content.xml", self.dom.toxml().encode("utf-8"))
self.zipf.close()
|
[
"def close(self):\n self.compressed_file.close()",
"def _finalize_zip(self):\n del self.zip_file\n if self.buffer:\n self.buffer.flush()",
"def __decompress_archive(self):\n self.decompress_path = self.cwd.joinpath(PathVariables.SRC__DECOMPRESSED)\n self.log.debug(\"decompress tar to %s: \" % self.decompress_path)\n\n self.tar_archive.extractall(self.cwd.joinpath(PathVariables.SRC__DECOMPRESSED))\n self.tar_archive.close()",
"def GwArchiveDone(self):\r\n\r\n # API Call - Free resources held by the archive manager library\r\n self.gwArchiveLibrary.GwArchiveDone()",
"def Finalize(self) -> None:\n pass",
"def close(self):\n self.tar.close()\n # Close the gzip file object if necessary.\n if self.fileobj:\n self.fileobj.close()",
"def compress_storage(self):\n curr_time = dt.datetime.utcnow().strftime(self.time_format)\n zip_archive = '{}.zip'.format(curr_time)\n\n with zipfile.ZipFile(os.path.join(self.storage_path, zip_archive), 'w') as zf:\n for file in glob.iglob(os.path.join(self.storage_path, '*.jpg')):\n zf.write(filename=file)",
"def _repackage(self):\n file_suffix = f'_{APP_NAME}{self._file.extension}'\n filename = self._file.name.replace(self._file.extension, file_suffix)\n unlocked_filepath = os.path.join(APP_SAVE_DIR, filename)\n\n filepaths = self._get_file_listing(self._temp_processing_dir)\n with zipfile.ZipFile(unlocked_filepath,'w') as repackaged_zip:\n for filepath in filepaths:\n rel_filepath = filepath.replace(self._temp_processing_dir,'')\n repackaged_zip.write(filepath,arcname=rel_filepath)\n \n print('File repackaged...')",
"def finish_exporting(self):\n pass",
"def do_compression(filename):\n query=\"laszip -i \"+filename+\" -olaz\"\n subprocess.run(query)\n os.remove(filename)",
"def finalize(self):\n self._finalized = True",
"def rezip(self):\n #TODO need special handling for .gz files\n fzip = zipfile.ZipFile(self.filepath, 'w', zipfile.ZIP_DEFLATED)\n if not os.path.isdir(self.zipdir):\n raise IOError('No \"{}\" folder to rezip'.format(self.trunc))\n for root, dirs, files in os.walk(self.zipdir):\n dirname = root.replace(self.zipdir, '')\n for the_file in files:\n fzip.write(root + '/' + the_file, dirname + '/' + the_file)\n fzip.close()\n self.delete_zip_folder()",
"def _finalize_sheet(self, sheet, final_col):\n \n # Adds week number to the top of the sheet\n sheet.write('A1', 'Number of Weeks:', self.regformat)\n sheet.write('B1', final_col, self.regformat)\n \n # Resizes the widths of the columns\n sheet.set_column(1, final_col, 4)\n \n # Obtains the row of the total count and writes Total on the right\n row = self.top_write_row + (2 * len(self.series_names))\n sheet.write(row, final_col + 2, TOTAL, self.totalnameformat)\n \n # Writes the description of each item on the right of the counts\n for _, desc in reversed(self.series_names):\n row -= 2\n sheet.write(row, final_col + 2, desc, self.itemnameformat)",
"def end_final_file(self, resulting_file):\n pass",
"def _save_and_compress(self, filename = None, data = None):\n if os.path.exists(filename):\n os.remove(filename)\n \n fileContents = gzip.open(filename, 'wb', compresslevel = 3)\n pickle.dump(data, fileContents, protocol = pickle.HIGHEST_PROTOCOL)\n fileContents.close()",
"def DoneWritingArchive(self):\n self.EndFile()\n for file_name in sorted(self._files):\n print('=== begin: %s' % file_name, file=self._final_output_stream)\n self._final_output_stream.write(self._files[file_name])\n print('=== end: %s' % file_name, file=self._final_output_stream)\n self._final_output_stream.flush()",
"def Finalize(self):\n if self._TraceFile:\n simV2.VisItCloseTraceFile()\n\n simV2.VisItFinalize()\n\n # rm this file as if its left around\n # it will cause next cli to fail\n if self._CommRank == 0:\n if os.path.isfile(self._SimFile):\n os.unlink(self._SimFile)\n\n return",
"def compress(self, handle, fs):\n pass",
"def compress_download(self):\n log.debug(f\"Creating archive: {self.output_filename}\")\n\n # .tar.gz and .tar.bz2 files\n if self.compress_type in [\"tar.gz\", \"tar.bz2\"]:\n ctype = self.compress_type.split(\".\")[1]\n with tarfile.open(self.output_filename, f\"w:{ctype}\") as tar:\n tar.add(self.outdir, arcname=os.path.basename(self.outdir))\n tar_flags = \"xzf\" if ctype == \"gz\" else \"xjf\"\n log.info(f\"Command to extract files: [bright_magenta]tar -{tar_flags} {self.output_filename}[/]\")\n\n # .zip files\n if self.compress_type == \"zip\":\n with ZipFile(self.output_filename, \"w\") as zip_file:\n # Iterate over all the files in directory\n for folder_name, _, filenames in os.walk(self.outdir):\n for filename in filenames:\n # create complete filepath of file in directory\n file_path = os.path.join(folder_name, filename)\n # Add file to zip\n zip_file.write(file_path)\n log.info(f\"Command to extract files: [bright_magenta]unzip {self.output_filename}[/]\")\n\n # Delete original files\n log.debug(f\"Deleting uncompressed files: '{self.outdir}'\")\n shutil.rmtree(self.outdir)\n\n # Calculate md5sum for output file\n log.info(f\"MD5 checksum for '{self.output_filename}': [blue]{nf_core.utils.file_md5(self.output_filename)}[/]\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write a row of cells into the default sheet of the spreadsheet.
|
def writerow(self, cells):
if self.default_sheet is None:
self.default_sheet = self.new_sheet(first_row_bold = self.first_row_bold)
self.default_sheet.writerow(cells)
|
[
"def write(self, x, y, data, format=None):\n if format:\n self.sheet.write(y, x, data, self._formats[format])\n else:\n self.sheet.write(y, x, data)",
"def write_to_sheet1(data: list):\r\n range = config.get(\"sheet1_range\")\r\n print(\"\\n\\nDo not worry if program appears frozen\")\r\n values = check_for_duplicates(range, data)\r\n\r\n if values == None:\r\n print(\"\\n\\nNo new rows to add to sheet1\")\r\n return\r\n \r\n print(\"\\n\\nRows being written to sheet 1:\\n\")\r\n print(tabulate(values, headers=[\"sale data\", \"item title\", \"transaction id\"], showindex=\"always\", tablefmt=\"github\"))\r\n\r\n body = {\"values\": values}\r\n result = append_to_sheet(body, range)\r\n print(\"\\n\\nWriting to sheet1 results:\", result)",
"def update_worksheet(data, worksheet):\n print(f'Updating {worksheet} worksheet...\\n')\n worksheet_to_update = SHEET.worksheet(worksheet)\n worksheet_to_update.append_row(data)\n print(f'{worksheet} worksheet update successfully\\n')",
"def save(self, worksheet):\n pass",
"def _write_row(self, row):\n return",
"def write_data(data):\n\n spreadsheet = SpreadSheet()\n sheet_name = os.getenv('SPREADSHEET_NAME')\n if not sheet_name:\n sheet_name = 'New spreadsheet'\n spreadsheet.write(sheet_name, data)",
"def append_to_sheet(body: dict, range: str):\r\n return (\r\n service.spreadsheets()\r\n .values()\r\n .append(\r\n spreadsheetId=SPREADSHEET_ID,\r\n valueInputOption=\"USER_ENTERED\",\r\n insertDataOption=\"INSERT_ROWS\",\r\n range=range,\r\n body=body,\r\n )\r\n .execute()\r\n )",
"def rows_to_excel(self, rows, top=1, left=0):\n n_rows = len(rows)\n n_cells = len(rows[0])\n for i in range(n_rows):\n row = rows[i]\n for j in range(n_cells):\n self.sheet.write(top+i, left+j, row[j])\n return self.sheet",
"def write( \n self, \n sheet: \"sheet\", \n offset: xy = xy( 0, 0 ),\n ink: bool | color = True\n ) -> None:\n for x in range( self.size.x ): \n for y in range( self.size.y ):\n if self.read( xy( x, y ) ):\n sheet.write_pixel( offset + xy( x, y ), ink )",
"def update_sales_worksheet(data):\n print('Updating sales worksheet...\\n')\n sales_worksheet = SHEET.worksheet('sales')\n sales_worksheet.append_row(data)\n pprint('Sales worksheet updated succesfully.\\n')",
"def writeCellValue(self, ws, row, column, value):\n self.logger.info(\"Writing cell(%s, %s) value %s\" %(row, column, value))\n ws.Cells(row, column).Value = value",
"def _initialize_sheet(self, sheet_name):\n \n # Creates the sheet\n write_name = sheet_name[:31] if (len(sheet_name) > 31) else sheet_name\n self.sheets[sheet_name] = self.wb.add_worksheet(write_name)\n \n # Widens the first column\n self.sheets[sheet_name].set_column('A:A', 19)\n \n # Sets the date row format\n self.sheets[sheet_name].set_row(self.date_row, cell_format=self.dateformat)\n \n # Sets the week number row format\n self.sheets[sheet_name].set_row(self.week_row, cell_format=self.weeknumformat)\n \n # Sets the series header and row format\n row = self.top_write_row\n for series_name, _ in self.series_names:\n self.sheets[sheet_name].set_row(row, cell_format=self.itemrowformat)\n self.sheets[sheet_name].write(row, 0, series_name, self.itemnameformat)\n row += 2\n \n # Sets the total header and row format\n self.sheets[sheet_name].write(row, 0, TOTAL, self.totalnameformat)\n self.sheets[sheet_name].set_row(row, cell_format=self.totalrowformat)\n \n return self.sheets[sheet_name]",
"def editGoogleSheet(client, data, timeStamp):\r\n\r\n #get the current worksheet\r\n worksheet_feed = client.GetWorksheetsFeed(config.speedsheet_id)\r\n\r\n for d in data:\r\n\r\n #find the sheet name we care about\r\n for entry in worksheet_feed.entry:\r\n if entry.title.text == d.sheet_name:\r\n worksheet_entry = entry\r\n break\r\n else: # no-break\r\n print \"finding worksheet\"\r\n\r\n worksheet_key = worksheet_entry.id.text.split('/')[-1]\r\n\r\n #print str(d.sheet_name)\r\n #print str(d.location[0])\r\n #print str(d.location[1])\r\n #print str(d.value)\r\n\r\n row = d.location[0]\r\n col = d.location[1]\r\n value = d.value\r\n\r\n client.UpdateCell(row, col, value, config.speedsheet_id, worksheet_key)\r\n\r\n\r\n date_row = config.cell_for_date[0]\r\n date_col = config.cell_for_date[1]\r\n\r\n #find the sheet name we care about for date\r\n for entry in worksheet_feed.entry:\r\n if entry.title.text == config.cell_for_date_worksheet:\r\n worksheet_key = entry.id.text.split('/')[-1]\r\n\r\n #time stamp a cell plz\r\n client.UpdateCell(date_row, date_col, timeStamp, config.speedsheet_id, worksheet_key)",
"def insertData(price,typee,title):\n length = len(list(ws.rows))\n ws.cell(row=length+1,column=1,value=title)\n ws.cell(row=length+1,column=2,value=typee)\n ws.cell(row=length+1,column=3,value=price)\n ws.cell(row=length+1,column=4,value=return_today())\n workb.save(\"database.xlsx\")",
"def add_sheet(self, df, sheet_name=\"Sheet1\", zoom=85, freeze_row=1, freeze_col=0, cols_to_print=None,\n depth_col_name='', cols_to_indent=None, highlight_depth=False, highlight_col_limit=0,\n group_rows=False, print_index=True, col_formats={}, col_style={}):\n\n # Create output DF with only cols to print and replace N/A with empty string\n if cols_to_print:\n output_df = df[cols_to_print] # .where((pd.notnull(df)), '')\n else:\n output_df = df # .where((pd.notnull(df)), '')\n\n # If index column exists, need offset to shift all other columns\n index_col_offset = 1 if print_index else 0\n\n # Write data to Excel\n worksheet = self.workbook.add_worksheet(sheet_name)\n\n # Set zoom and freeze panes location\n worksheet.set_zoom(zoom)\n worksheet.freeze_panes(freeze_row, freeze_col)\n\n # UGLY!! Add custom format\n if 'custom' in col_formats.values():\n custom_format={}\n for col_name, style in col_style.items():\n custom_format[col_name] = self.workbook.add_format(style)\n\n\n # Write the column headers with the defined format.\n if print_index:\n worksheet.write(0, 0, 'Index', self.header_format)\n for col_num, value in enumerate(output_df.columns.values):\n worksheet.write(0, col_num + index_col_offset, value, self.header_format)\n\n # Iterate through DF rows and write to Excel file\n for row_num in range(len(output_df)):\n\n # Get the row depth (if needed for highlight, indent or grouping)\n if highlight_depth or cols_to_indent or group_rows:\n depth = int(df[depth_col_name].iloc[row_num])\n else:\n depth = None\n\n format_option = 'highlight' if highlight_depth else None\n\n # Write optional index first using highlighted or plain index format\n print_format = self.cell_format[('index', depth, format_option)]\n if print_index:\n worksheet.write(row_num + 1, 0, output_df.index[row_num], print_format)\n\n # Write rest of the row\n for col_num in range(len(output_df.columns)):\n\n col_name = output_df.columns[col_num]\n\n # Check if column should be highlighted and/or indented\n indent_col = cols_to_indent is not None and col_name in cols_to_indent\n highlight_col = highlight_depth and \\\n (highlight_col_limit == 0 or col_num < highlight_col_limit - index_col_offset)\n\n # Choose the correct format option to use\n if indent_col and highlight_col:\n format_option = 'indent_highlight'\n elif indent_col:\n format_option = 'indent'\n elif highlight_col:\n format_option = 'highlight'\n else:\n format_option = None\n\n # Get value from DF\n df_value = output_df.iloc[row_num, col_num]\n\n # Set as empty string if null - values could be lists also, hence the .any()\n value = df_value if pd.notnull([df_value]).any() else ''\n value_type = output_df.dtypes[col_num] if pd.notnull([df_value]).any() else None\n\n # Write data as number or string\n if col_formats.get(col_name)=='custom':\n worksheet.write(row_num + 1, col_num + index_col_offset, value,\n custom_format[col_name])\n\n elif value_type in ['float64'] or col_formats.get(col_name)=='float':\n worksheet.write_number(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('float', depth, format_option)])\n\n elif value_type in ['int64', 'Int64'] or col_formats.get(col_name)=='int':\n worksheet.write_number(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('default', depth, format_option)])\n\n elif value_type in ['datetime64[ns]', '<M8[ns]'] or col_formats.get(col_name)=='date':\n worksheet.write_datetime(row_num + 1, col_num + index_col_offset, value,\n self.cell_format[('date', depth, format_option)])\n\n elif col_formats.get(col_name)=='string':\n worksheet.write_string(row_num + 1, col_num + index_col_offset, str(value),\n self.cell_format[('default', depth, format_option)])\n\n else:\n worksheet.write(row_num + 1, col_num + index_col_offset, str(value),\n self.cell_format[('default', depth, format_option)])\n\n # Set optional grouping of rows\n if group_rows:\n if depth > 0:\n worksheet.set_row(row_num + 1, None, None, {'level': depth})\n\n # Autofit column width\n for col_num, width in enumerate(self.__get_col_widths(output_df)):\n\n # After the index column, check type and override width if necessary\n if col_num > 0:\n if output_df.dtypes[col_num - 1] in ['float64']:\n width = 8\n elif output_df.dtypes[col_num - 1] in ['datetime64[ns]']:\n width = 8\n elif width>80:\n width = 8\n\n # If not printing index, skip to the first column and offset\n if not print_index:\n if col_num == 0: continue\n col_num -= 1\n\n worksheet.set_column(col_num, col_num, width + 2)",
"def create_sheet(self, name, rows=1, cols=1):\n self.spread.add_worksheet(name, rows, cols)\n self._refresh_sheets()\n self.open_sheet(name)",
"def save_to_worksheet(spreadsheet_id: str,\n ws_title: str,\n data: List[List[str]],\n keep_header_row: bool) -> Dict[str, Union[str, int]]:\n\n gc = gspread.service_account(filename=constants.FILEPATH_GSHEET_CREDS)\n sheet = gc.open_by_key(spreadsheet_id)\n ws = sheet.worksheet(ws_title)\n\n start_row_idx = 2 if keep_header_row else 1 \n\n # 1. Add a new row to the end.\n ws.add_rows(1)\n # 2. Delete rows from start_row_idx till the 2nd-last row.\n ws.delete_rows(start_row_idx, ws.row_count - 1)\n # 3. Insert new data from start_row_idx onwards.\n resp = ws.insert_rows(data, start_row_idx)\n\n return {\n constants.UPDATED_RANGE: resp[constants.UPDATES][constants.UPDATED_RANGE],\n constants.UPDATED_ROWS: resp[constants.UPDATES][constants.UPDATED_ROWS],\n }",
"def write_rows(self, rows):\n for row in rows:\n self.write_row(row)",
"def edit_xlsx_file(self):\n\n try:\n file = openpyxl.load_workbook('Result/Autheurs.xlsx')\n except IOError:\n workbook = xlsxwriter.Workbook('Result/Autheurs.xlsx')\n workbook.close()\n file = openpyxl.load_workbook('Result/Autheurs.xlsx')\n worksheet = file.active\n worksheet.cell(row=1, column=1).value = \"Nom\"\n worksheet.cell(row=1, column=2).value = \"Date de naissance\"\n worksheet.cell(row=1, column=3).value = \"Description\"\n i = 2\n for quote in self.quote_lst:\n # Insert Bdd\n self.firebase = firebase.FirebaseApplication('https://pythoncnam-default-rtdb.europe-west1.firebasedatabase.app/', None)\n self.data = {'Name': quote.author.name,\n 'Naissance': quote.author.birthdate,\n 'Description' : quote.author.description }\n self.result = self.firebase.post('pythoncnam/Auteur',self.data) \n\n worksheet.cell(row=i, column=1).value = quote.author.name\n worksheet.cell(row=i, column=2).value = quote.author.birthdate\n worksheet.cell(row=i, column=3).value = quote.author.description\n i += 1\n file.save('Result/Autheurs.xlsx')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns an ODSWriter object.
|
def writer(odsfile, *args, **kwargs):
return ODSWriter(odsfile, *args, **kwargs)
|
[
"def writer_object(cls):\n # Assume this writer is a built-in.\n return writers.get_writer_class(cls.name)()",
"def create_simple_writer(outputDef, defaultOutput, outputFormat, fieldNames, compress=True, valueClassMappings=None, datasetMetaProps=None, fieldMetaProps=None):\n\n if not outputDef:\n outputBase = defaultOutput\n else:\n outputBase = outputDef\n\n if outputFormat == 'json':\n\n write_squonk_datasetmetadata(outputBase, True, valueClassMappings, datasetMetaProps, fieldMetaProps)\n\n return BasicObjectWriter(open_output(outputDef, 'data', compress)), outputBase\n\n elif outputFormat == 'tsv':\n return TsvWriter(open_output(outputDef, 'tsv', compress), fieldNames), outputBase\n\n else:\n raise ValueError(\"Unsupported format: \" + outputFormat)",
"def make_iostring_writer(cls, translatedict={}):\n strbuf = cStringIO.StringIO()\n outputhandler = XMLWriter(strbuf)\n return cls(outputhandler, None, translatedict), strbuf",
"def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]",
"def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'):\n\n if not (output_dir or output_file):\n raise ValueError(\n 'GetPackageWriter requires either output_dir or output_file')\n if output_dir and output_file:\n raise ValueError(\n 'GetPackageWriter requires only one of output_dir or output_file')\n\n if output_dir:\n package_writer = filesystem_library_package.FilesystemLibraryPackage(\n output_dir)\n else:\n out = open(output_file, 'w')\n if output_format == 'tgz':\n package_writer = tar_library_package.TarLibraryPackage(out)\n elif output_format == 'tar':\n package_writer = tar_library_package.TarLibraryPackage(out,\n compress=False)\n else:\n package_writer = zip_library_package.ZipLibraryPackage(out)\n return package_writer",
"def get_table_writer(self) -> TableWriter:\n raise NotImplementedError",
"def build(self):\n self.writer = self.CarbonWriterBuilder.build()\n return self",
"def initialize_writer():\n output_path = os.path.join(get_output_path(), output_file_name)\n return pd.ExcelWriter(output_path, engine='xlsxwriter')",
"def _CreateStorageWriter(self, event_data, base_year=None):\n storage_writer = fake_writer.FakeStorageWriter()\n storage_writer.Open()\n\n event_data_stream = events.EventDataStream()\n storage_writer.AddAttributeContainer(event_data_stream)\n\n event_data_stream_identifier = event_data_stream.GetIdentifier()\n\n if base_year:\n year_less_log_helper = events.YearLessLogHelper()\n year_less_log_helper.earliest_year = base_year\n year_less_log_helper.last_relative_year = 0\n\n year_less_log_helper.SetEventDataStreamIdentifier(\n event_data_stream_identifier)\n storage_writer.AddAttributeContainer(year_less_log_helper)\n\n event_data.SetEventDataStreamIdentifier(event_data_stream_identifier)\n storage_writer.AddAttributeContainer(event_data)\n\n return storage_writer",
"def sync_report_writer_factory(\n output_file: Union[str, bytes, os.PathLike], append: bool = False\n) -> ReportWriterABC:\n output_type = os.path.splitext(output_file)[1]\n output_type = output_type.lower()[1:]\n if output_type == \"csv\":\n return SyncReportWriterCSV(output_file, append)\n elif output_type == \"json\":\n return SyncReportWriterJSON(output_file, append)\n elif output_type in [\"sqlite\", \"db\"]:\n return SyncReportWriterSQLite(output_file, append)\n else:\n raise ValueError(f\"Unknown report file type: {output_file}\")",
"def get_writer(self):\n\t\tself.wlock.acquire()\n\t\ttry:\n\t\t\tconn = self.writers[0]\n\t\t\tself.writers.remove(conn)\n\t\t\tself.writers.append(conn)\n\t\tfinally:\n\t\t\tself.wlock.release()\n\t\treturn conn",
"def make_file_writer(cls, fname, translatedict={}):\n filehandle = open(fname, 'w')\n outputhandler = XMLWriter(filehandle)\n return cls(outputhandler, None, translatedict)",
"def to_wkt(\n self,\n ogr_compliant=False,\n reducer=None,\n ) -> Union[xr.DataArray, xr.Dataset]:\n # ogr compliant naming and attrs\n if ogr_compliant:\n obj = self.ogr_compliant(reducer=reducer)\n else:\n obj = self.update_geometry(geom_format=\"wkt\", geom_name=\"ogc_wkt\")\n return obj",
"def export_report_writer_factory(\n output_file: Union[str, bytes, os.PathLike], append: bool = False\n) -> ReportWriterABC:\n output_type = os.path.splitext(output_file)[1]\n output_type = output_type.lower()[1:]\n if output_type == \"csv\":\n return ExportReportWriterCSV(output_file, append)\n elif output_type == \"json\":\n return ExportReportWriterJSON(output_file, append)\n elif output_type in [\"sqlite\", \"db\"]:\n return ExportReportWriterSQLite(output_file, append)\n else:\n raise ValueError(f\"Unknown report file type: {output_file}\")",
"def GetConsoleWriterStream(self):\n return self.__stream_wrapper.stream",
"def writeObject(self, *args):\r\n return _osgDB.DeprecatedDotOsgWrapperManager_writeObject(self, *args)",
"def newDocument():\n return Document(HopperLowLevel.newDocument())",
"def _exportNode(self):\n node = self._getObjectNode('object')\n node.appendChild(self._extractProperties())\n self._logger.info(\"CPSTypeMaker exported.\")\n return node",
"def batch_writer(self, overwrite_by_pkeys=None):\n return BatchWriter(\n self.name, self.meta.client, overwrite_by_pkeys=overwrite_by_pkeys\n )",
"def _exportNode(self):\n output = self._doc.createElement(\"object\")\n for nodename in (\"order\", \"hidden\"):\n skins = getattr(self.context, \"_\" + nodename)\n for skin in sorted(skins):\n for name in sorted(skins[skin]):\n node = self._doc.createElement(nodename)\n node.setAttribute(\"skinname\", skin)\n node.setAttribute(\"manager\", name)\n for viewlet in skins[skin][name]:\n child = self._doc.createElement(\"viewlet\")\n child.setAttribute(\"name\", viewlet)\n node.appendChild(child)\n output.appendChild(node)\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client should raise an exception if it is missing arguments.
|
def test_client_missing_args(self):
self.assertRaises(InvalidUsage, Client, instance="test")
self.assertRaises(InvalidUsage, Client, instance="test", user="foo")
self.assertRaises(InvalidUsage, Client, instance="test", password="foo")
|
[
"def test_client_incompatible_args(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"bar\",\n session=\"foobar\",\n )",
"def test_client_invalid_raise_on_empty(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty=0,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty=\"test\",\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty={\"a\": \"b\"},\n )",
"def test_activity_map_get_command_invalid_arguments(args, message, requests_mock):\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(Exception) as error:\n ExtraHop_v2.activity_map_get_command(client, args, False)\n assert str(error.value) == message",
"def test_client_with_host_and_instance(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n host=\"test\",\n user=\"foo\",\n password=\"bar\",\n )",
"def _validate_init_args(self):\r\n\r\n birdseed_args = {\r\n 'access_key': self.access_key,\r\n 'access_secret': self.access_secret,\r\n 'consumer_key': self.consumer_key,\r\n 'consumer_secret': self.consumer_secret,\r\n 'query': self.query\r\n }\r\n\r\n # iterate through the keys of the dict\r\n # check that the value it represents is \"truthy\" (in this case, not None)\r\n # if it IS None, raise a ValueError telling the caller it must provide that argument\r\n for key in birdseed_args:\r\n if not birdseed_args[key]:\r\n raise ValueError('Please provide `{}`'.format(key))",
"def test_validate_arguments_for_get_peer_command_failure(\n args: dict, error_message: str, requests_mock\n) -> None:\n mock_client = init_mock_client(on_cloud=False, requests_mock=requests_mock)\n with pytest.raises(ExtraHop_v2.InvalidValueError) as error:\n _ = ExtraHop_v2.peers_get_command(mock_client, args, False)\n assert error_message == str(error.value)",
"def test_extrahop_devices_search_command_with_invalid_arguments(\n args, message, requests_mock\n):\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(ExtraHop_v2.InvalidValueError) as error:\n ExtraHop_v2.devices_search_command(client, args, False)\n\n assert str(error.value) == message",
"def test_require_arg(self):\n self.layer.require_arg('bobofet')\n self.assertRaises(outline.layer.LayerException, self.layer.check_required_args)\n self.layer.set_arg('bobofet', 1)\n self.layer.check_required_args()",
"def testNoParamsNeeded(self):\n req = {\n 'id': 1,\n 'jsonrpc': '2.0',\n 'method': 'greet',\n }\n res = self.send_json(req)\n self.assertEqual(res['error']['code'], -32602)\n self.assertEqual(res['error']['message'], 'Invalid params')",
"def test_gcb_get_retrohunt_command_when_empty_args_provided(client, args, err_msg):\n from GoogleChronicleBackstory import gcb_get_retrohunt_command\n\n with pytest.raises(ValueError) as e:\n gcb_get_retrohunt_command(client, args=args)\n\n assert str(e.value) == err_msg",
"def test_gcb_start_retrohunt_when_invalid_arguments_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_start_retrohunt_command\n with pytest.raises(ValueError) as e:\n gcb_start_retrohunt_command(client, args)\n assert str(e.value) == error_msg",
"def no_more_args( args):\n\tif len(args) != 0:\n\t\tdie( \"No more than one argument is expected.\")",
"def test_client_invalid_request_params(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=\"a string\",\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=[\"item0\", \"item1\"],\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=3,\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=0,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=(1, \"2\"),\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=True,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=2.89,\n )",
"def test_gcb_create_rule_version_command_when_empty_args_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_create_rule_version_command\n with pytest.raises(ValueError) as e:\n gcb_create_rule_version_command(client, args)\n assert str(e.value) == error_msg",
"def test_detections_list_command_invalid_args(requests_mock, args, error_msg):\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(Exception) as error:\n ExtraHop_v2.detections_list_command(client, args)\n assert str(error.value) == error_msg",
"def test_missing_arg(self):\n parser, config_dict = set_args()\n with self.assertRaises(SystemExit):\n args = parser.parse_args(self.cmd_args[9])",
"def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})",
"def test_gcb_get_rule_command_when_empty_args_given(client):\n from GoogleChronicleBackstory import gcb_get_rule_command\n with pytest.raises(ValueError) as e:\n gcb_get_rule_command(client, args={'id': ''})\n assert str(e.value) == 'Missing argument id.'",
"def require_args(args, min, msg):\n if len(args) < min:\n raise optparse.OptParseError(msg)",
"def test_gcb_create_reference_list_command_when_empty_args_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_create_reference_list_command\n with pytest.raises(ValueError) as e:\n gcb_create_reference_list_command(client, args)\n assert str(e.value) == error_msg"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client should raise an exception if it receives incompatible args.
|
def test_client_incompatible_args(self):
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="bar",
session="foobar",
)
|
[
"def _handle_args(self, *args):\n pass",
"def check_args(self, *args):\n if self._nb_args >= 0 and len(args) != self._nb_args:\n raise ValueError(\n \"Incorrect number of parameters specified. \"\n \"Got {}, expected {}.\".format(len(args), self._nb_args)\n )",
"def test_activity_map_get_command_invalid_arguments(args, message, requests_mock):\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(Exception) as error:\n ExtraHop_v2.activity_map_get_command(client, args, False)\n assert str(error.value) == message",
"def test_validate_arguments_for_get_peer_command_failure(\n args: dict, error_message: str, requests_mock\n) -> None:\n mock_client = init_mock_client(on_cloud=False, requests_mock=requests_mock)\n with pytest.raises(ExtraHop_v2.InvalidValueError) as error:\n _ = ExtraHop_v2.peers_get_command(mock_client, args, False)\n assert error_message == str(error.value)",
"def no_more_args( args):\n\tif len(args) != 0:\n\t\tdie( \"No more than one argument is expected.\")",
"def _test_parse_args_fails(self, args: str) -> None:\n with self.assertRaises(OatmealParseError):\n OatmealMsg._parse_args(args.encode('ascii'))",
"def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})",
"def test_gcb_start_retrohunt_when_invalid_arguments_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_start_retrohunt_command\n with pytest.raises(ValueError) as e:\n gcb_start_retrohunt_command(client, args)\n assert str(e.value) == error_msg",
"def test_extrahop_devices_search_command_with_invalid_arguments(\n args, message, requests_mock\n):\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(ExtraHop_v2.InvalidValueError) as error:\n ExtraHop_v2.devices_search_command(client, args, False)\n\n assert str(error.value) == message",
"def require_args(args, min, msg):\n if len(args) < min:\n raise optparse.OptParseError(msg)",
"def test_bluetoothctl_with_invalid_args(self):\n\n output='Too many arguments: 2 > 1'\n self.assertEqual(parse(output, quiet=True), [])",
"def test_gcb_create_rule_version_command_when_empty_args_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_create_rule_version_command\n with pytest.raises(ValueError) as e:\n gcb_create_rule_version_command(client, args)\n assert str(e.value) == error_msg",
"def test_client_invalid_raise_on_empty(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty=0,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty=\"test\",\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n raise_on_empty={\"a\": \"b\"},\n )",
"def test_gcb_get_retrohunt_command_when_empty_args_provided(client, args, err_msg):\n from GoogleChronicleBackstory import gcb_get_retrohunt_command\n\n with pytest.raises(ValueError) as e:\n gcb_get_retrohunt_command(client, args=args)\n\n assert str(e.value) == err_msg",
"def test_detections_list_command_invalid_args(requests_mock, args, error_msg):\n requests_mock.get(\n f\"{BASE_URL}/api/v1/extrahop/version\", json={\"version\": \"9.3.0.1319\"}\n )\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(Exception) as error:\n ExtraHop_v2.detections_list_command(client, args)\n assert str(error.value) == error_msg",
"def test_require_arg(self):\n self.layer.require_arg('bobofet')\n self.assertRaises(outline.layer.LayerException, self.layer.check_required_args)\n self.layer.set_arg('bobofet', 1)\n self.layer.check_required_args()",
"def test_client_invalid_request_params(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=\"a string\",\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=[\"item0\", \"item1\"],\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=3,\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=0,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=(1, \"2\"),\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=True,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=2.89,\n )",
"def test_gcb_list_retrohunts_command_when_invalid_args_provided(client, args, error_msg):\n from GoogleChronicleBackstory import gcb_list_retrohunts_command\n with pytest.raises(ValueError) as e:\n gcb_list_retrohunts_command(client, args)\n assert str(e.value) == error_msg",
"def test_metrics_list_command_invalid_args(requests_mock, args, error_msg):\n client = init_mock_client(requests_mock, on_cloud=False)\n with pytest.raises(Exception) as error:\n ExtraHop_v2.metrics_list_command(client, args)\n assert str(error.value) == error_msg"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Should be able to create a client given a requests session object.
|
def test_client_with_session(self):
session = requests.Session()
Client("snow.example.com", session=session)
|
[
"def __sessionmaker():\n\tsession = requests.ClientSession()\n\treturn session",
"def get_client(self, args):\n try:\n # Load existing session, so as to keep current dir etc.\n with open(self.session_path, \"rb\") as fhandle:\n client = pickle.load(fhandle)\n except (IOError, pickle.PickleError):\n # Init a new RadonClient\n client = self.create_client(args)\n \n if args[\"--url\"]:\n if client.url != args[\"--url\"]:\n # Init a fresh RadonClient\n client = self.create_client(args)\n client.session = requests.Session()\n return client",
"def init_client(self):\n self._transport = RequestsHTTPTransport(url=self._url,\n use_json=True,\n headers={\n \"Content-type\":\n \"application/json\",\n \"Authorization\":\n \"bearer \" +\n str(self._token).strip()\n },\n verify=False)\n self._client = Client(retries=3,\n transport=self._transport,\n fetch_schema_from_transport=False)",
"def client(\n username: Optional[str] = None,\n api_key: Optional[str] = None,\n session: Optional[sessions.Session] = None,\n):\n has_login = (username is not None) and (api_key is not None)\n has_session = session is None\n\n if not has_session:\n if has_login:\n session = sessions.Session(\n credentials.Credentials(username=username, api_key=api_key)\n )\n else:\n raise MissingAuthentication()\n return client_mod.Client(session)",
"def _create_client(self):\r\n self.association_refresh_time = {}\r\n auth_plugin = k_loading.load_auth_from_conf_options(\r\n cfg.CONF, 'placement')\r\n client = k_loading.load_session_from_conf_options(\r\n cfg.CONF, 'placement', auth=auth_plugin)\r\n client.additional_headers = {'accept': 'application/json'}\r\n return client",
"async def get_client(self) -> aiohttp.ClientSession:\n if (not self.client) or self.client.closed:\n self.client = aiohttp.ClientSession(headers=self.headers)\n return self.client",
"def _build_session(self, auth_class, *args, **kwargs):\n session = requests.session()\n if auth_class:\n session.auth = auth_class(*args, **kwargs)\n session.headers.update({requests.utils.to_native_string('CB-VERSION'): self.API_VERSION})\n session.headers.update({'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'coinbase/python/2.0'})\n return session",
"def create_session():\n session = requests.Session()\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Authorization': 'Basic NzJkNTBhZDctNjk4MC00OTQxLWFiNGQtNThkYzM0NjVmMDY5OjczMGUyNzgwMDMxNTkwNWMwYThiYzE0ODRmYTUzM2I2NWM0YWI5Mjc4NzdjZTdiZDYyMzUxODcwMWQ0MDY1ODA=',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0'\n }\n session.headers.update(headers)\n return session",
"def __make_request__(self, *args, **kwargs):\n\t\tif self.session:\n\t\t\tresponse = self.session.request(*args, **kwargs)\n\t\telse:\n\t\t\tresponse = requests.request(*args, **kwargs)\n\n\t\tif response.status_code == 401:\n\t\t\traise AuthenticationError(\n\t\t\t\tstatus_code=response.status_code,\n\t\t\t\tresponse_message=response.text\n\t\t\t)\n\n\t\treturn response",
"def create_session() -> requests.Session:\n\n agent = user_agent.generate_user_agent(os=OPERATING_SYSTEMS)\n \n session = requests.Session()\n session.headers['User-Agent'] = agent\n\n return session",
"def _get_client(self, server):\n return Client(\n server=server[0],\n authport=server[1],\n secret=server[2],\n dict=self._get_dictionary(),\n )",
"def _init_client():\n return _Client(_ARM_WS_URL)",
"def client():\n return TestClient()",
"def initiate_client(url: str):\n return Client(url)",
"def new_test_client(cls, **kwargs):\r\n\r\n client = cls(debug_logging=True)\r\n client.login(**kwargs)\r\n\r\n return client",
"def _client(self) -> hvac.Client:\n if \"session\" not in self.kwargs:\n # If no session object provide one with retry as per hvac documentation:\n # https://hvac.readthedocs.io/en/stable/advanced_usage.html#retrying-failed-requests\n adapter = HTTPAdapter(\n max_retries=Retry(\n total=3,\n backoff_factor=0.1,\n status_forcelist=[412, 500, 502, 503],\n raise_on_status=False,\n )\n )\n session = Session()\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n self.kwargs[\"session\"] = session\n\n _client = hvac.Client(url=self.url, **self.kwargs)\n if self.auth_type == \"approle\":\n self._auth_approle(_client)\n elif self.auth_type == \"aws_iam\":\n self._auth_aws_iam(_client)\n elif self.auth_type == \"azure\":\n self._auth_azure(_client)\n elif self.auth_type == \"gcp\":\n self._auth_gcp(_client)\n elif self.auth_type == \"github\":\n self._auth_github(_client)\n elif self.auth_type == \"kubernetes\":\n self._auth_kubernetes(_client)\n elif self.auth_type == \"ldap\":\n self._auth_ldap(_client)\n elif self.auth_type == \"radius\":\n self._auth_radius(_client)\n elif self.auth_type == \"token\":\n self._set_token(_client)\n elif self.auth_type == \"userpass\":\n self._auth_userpass(_client)\n else:\n raise VaultError(f\"Authentication type '{self.auth_type}' not supported\")\n\n if _client.is_authenticated():\n return _client\n else:\n raise VaultError(\"Vault Authentication Error!\")",
"def redshift_client_create(self):\n redshift = boto3.client(\"redshift\", region_name=\"us-west-2\",\n aws_access_key_id=self.key, aws_secret_access_key=self.secret)\n self.redshift_client = redshift",
"def test_client_as_context_manager_has_default_requests_session():\n with neverbounce_sdk.client() as client:\n assert client.session is not None\n assert isinstance(client.session, Session)",
"async def _get_client(loop=None):\n api_id, api_hash, phone_number, session_name = storage.get_telegram_secrets()\n if loop:\n client = TelegramClient(session_name, api_id, api_hash, loop=loop)\n else:\n client = TelegramClient(session_name, api_id, api_hash)\n await client.connect()\n\n if not await client.is_user_authorized():\n await client.send_code_request(phone_number)\n await client.sign_in(phone_number, input(\"Please enter the code you received: \"))\n return client",
"def authorized_client_factory(client: Client, settings, organisation_pk):\n\n def _inner(user):\n session = client.session\n session[\"first_name\"] = user[\"first_name\"]\n session[\"last_name\"] = user[\"last_name\"]\n session[\"user_token\"] = user[\"token\"]\n session[\"lite_api_user_id\"] = user[\"lite_api_user_id\"]\n session[\"email\"] = user[\"email\"]\n session[\"organisation\"] = organisation_pk\n session[settings.TOKEN_SESSION_KEY] = {\n \"access_token\": \"mock_access_token\",\n \"expires_in\": 36000,\n \"token_type\": \"Bearer\",\n \"scope\": [\"read\", \"write\"],\n \"refresh_token\": \"mock_refresh_token\",\n }\n session.save()\n client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key\n return client\n\n yield _inner"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client should raise an exception if it receives both host and instance
|
def test_client_with_host_and_instance(self):
self.assertRaises(
InvalidUsage,
Client,
instance="test",
host="test",
user="foo",
password="bar",
)
|
[
"def test_thaw_host_with_invalid_host(self):\n self.assertRaises(lib_exc.BadRequest,\n self.admin_volume_services_client.thaw_host,\n host='invalid_host')",
"def test_freeze_host_with_invalid_host(self):\n self.assertRaises(lib_exc.BadRequest,\n self.admin_volume_services_client.freeze_host,\n host='invalid_host')",
"def test_client_host(self):\n host = \"123.123.123.123\"\n c = Client(user=\"foo\", password=\"foo\", host=host)\n self.assertEqual(c.host, host)",
"def _check_host(self):\n if not self.available:\n _LOGGER.error(\"No HassOS availabe\")\n raise HassioNotSupportedError()",
"def test_no_duplicate_servers(self):\n with pytest.raises(corenlp.PermanentlyFailedException):\n with corenlp.CoreNLPClient(annotators=\"tokenize,ssplit\") as duplicate_server:\n raise RuntimeError(\"This should have failed\")",
"def test_invalidHostname(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"wrong-host.example.com\",\n u\"correct-host.example.com\",\n )\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, VerificationError)\n self.assertIsInstance(sErr, ConnectionClosed)",
"def test_unknown_host_sends_message_to_master(self):\n\n class TestUser(User):\n @task\n def my_task(self):\n pass\n\n with mock.patch(\"locust.rpc.rpc.Server\", mocked_rpc()) as server:\n master = self.get_runner(user_classes=[TestUser])\n server.mocked_send(Message(\"client_ready\", __version__, \"zeh_fake_client1\"))\n self.assertEqual(1, len(master.clients))\n self.assertTrue(\n \"zeh_fake_client1\" in master.clients, \"Could not find fake client in master instance's clients dict\"\n )\n\n master.start(10, 10)\n sleep(0.1)\n server.mocked_send(Message(\"stats\", UNRECOGNIZED_HOST_MESSAGE, \"unknown_host\"))\n self.assertEqual(2, len(server.outbox))",
"def test_create_same_host_two_times(self):\n h1 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n h2 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n self._plugin_controller.setLastCommandInformation(\"mock\")\n self._plugin_controller.onCommandFinished()\n self._model_controller.processAllPendingActions()\n \n self.assertTrue(len(self._model_controller.getAllHosts()) == 1, \"The controller should have just one host\")\n self.assertTrue(self._model_controller.getHost(h1) == self._model_controller.getHost(h2), \"The host should be the same\")",
"def test_bad_hostname():\n pytest.xfail(\"Bad hostname.\")\n connect_to_dremio_flight_server_endpoint(\"badHostNamE\",\n \"32010\", \"dremio\", \"dremio123\", False, False, False)",
"def test_serverHost(self):\n return self._hostpeertest(\"getHost\", True)",
"def test_remotehosts_get(self):\n pass",
"def test_validate_ip_for_get_peer_command_failure(requests_mock) -> None:\n args = {\"ip_or_id\": \"1:1:1\"}\n mock_client = init_mock_client(on_cloud=False, requests_mock=requests_mock)\n with pytest.raises(ExtraHop_v2.DemistoException) as error:\n _ = ExtraHop_v2.peers_get_command(mock_client, args, False)\n assert \"Error parsing IP Address 1:1:1\" == str(error.value)",
"def test_clientHost(self, get=\"getHost\"):\n return self._hostpeertest(\"getHost\", False)",
"def test_client_incompatible_args(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"bar\",\n session=\"foobar\",\n )",
"def validate_host(self):\n\n # Input Validation - Rock my regex ;-)\n re_hostname = re.compile(\"^[a-zA-Z0-9]+[a-zA-Z0-9-]*((([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,6})?$\")\n re_ipaddr = re.compile(\"^((25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)\\.){3}(25[0-5]|2[0-4]\\d|[01]\\d\\d|\\d?\\d)$\")\n\n if self.server == None:\n end(UNKNOWN, \"You must supply a server hostname or ip address. \" \\\n + \"See --help for details\")\n\n if not re_hostname.match(self.server) and \\\n not re_ipaddr.match(self.server):\n end(UNKNOWN, \"Server given does not appear to be a valid \" \\\n + \"hostname or ip address\")",
"def test_09_expunge_instance_in_network(self):\n\n # Validate the following\n # 1. Recover the virtual machines.\n # 2. Vm should be in stopped state. State both the instances\n # 3. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 3. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Delete virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.delete(self.apiclient)\n self.vm_2.delete(self.apiclient)\n self.vm_3.delete(self.apiclient)\n except Exception as e:\n self.fail(\"Failed to destroy the virtual instances, %s\" % e)\n\n # Check if the network rules still exists after Vm stop\n self.debug(\"Checking if NAT rules existed\")\n with self.assertRaises(Exception):\n NATRule.list(\n self.apiclient,\n id=self.nat_rule.id,\n listall=True\n )\n\n LoadBalancerRule.list(\n self.apiclient,\n id=self.lb_rule.id,\n listall=True\n )\n return",
"def test_PluggableTransport_runChecks_invalid_ip(self):\n pt = bridges.PluggableTransport()\n self.assertRaises(\n bridges.InvalidPluggableTransportIP,\n pt.updateFromStemTransport,\n self.fingerprint, 'obfs4', ('34.230.223', 37341, [\n ('iat-mode=0,'\n 'node-id=2a79f14120945873482b7823caabe2fcde848722,')]))",
"def test_multi_host(self):\n with DockerHost('host1') as host1, DockerHost('host2') as host2:\n # TODO work IPv6 into this test too\n\n # Create the network on host1, but it should be usable from all\n # hosts.\n network = host1.create_network(str(uuid.uuid4()))\n # TODO Assert that the network can be seen on host2\n\n # Check that autocreating a service for the existing network, when\n # starting a container works. Create a container on each host and\n # check that pings work.\n # TODO To make things harder, we should be able to create a\n # network using the UUID, but that doesn't work...\n # docker run --tty --interactive --detach --name workload2 --publish-service=a5accd88-869e-4149-8031-87af7c20318a.966204b315e55324148888e3808f6b4bf079a15f572142a69d4dab745bac7783 busybox\n # Error response from daemon: Cannot start container 11e8089573d188399487b1b490c1a786260dbd7cb33ec3b7817ea87528935b3f: Interface name 966204b315e55324148888e3808f6b4bf079a15f572142a69d4dab745bac7783 too long\n\n workload_host1 = host1.create_workload(\"workload1\",\n service=\"workload1\",\n network=network)\n # Precreate the service name on host1, before attaching it on\n # host 2.\n host1.execute(\"docker service publish workload2.%s\" % network.name)\n workload_host2 = host2.create_workload(\"workload2\",\n service=\"workload2\",\n network=network)\n # TODO - assert on output of endpoint show and endpoint profile\n # show commands.\n workload_host1.assert_can_ping(workload_host2.ip, retries=5)\n\n # Ping using IP addresses\n self.assert_connectivity(pass_list=[workload_host1,\n workload_host2])\n # Ping using service names\n workload_host1.execute(\"ping -c 1 -W 1 workload2\")\n workload_host2.execute(\"ping -c 1 -W 1 workload1\")\n\n # TODO - detach (\"leave\") the endpoints - (assert can't ping and\n # endpoints are removed from calicoctl)\n\n # Test deleting the network. It will fail if there are any\n # endpoints connected still.\n self.assertRaises(CommandExecError, network.delete)\n\n # Remove the workloads, so the endpoints can be unpublished, then\n # the delete should succeed.\n host1.remove_workloads()\n host2.remove_workloads()\n\n # TODO - unpublish the endpoints - (assert IPs are released)\n host1.execute(\"docker service unpublish workload1.%s\" % network)\n host1.execute(\"docker service unpublish workload2.%s\" % network)\n\n # TODO - remove the network - (assert profile is removed)\n network.delete()\n\n # TODO - Remove this calico node\n\n # TODO Would like to assert that there are no errors in the logs...",
"def test_add_remote_host(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client host property should match host passed to constructor
|
def test_client_host(self):
host = "123.123.123.123"
c = Client(user="foo", password="foo", host=host)
self.assertEqual(c.host, host)
|
[
"def test_clientHost(self, get=\"getHost\"):\n return self._hostpeertest(\"getHost\", False)",
"def test_get_remote_host_properties(self):\n pass",
"def test_add_remote_host(self):\n pass",
"def SetHost(self, host):\n self._host = host",
"def test_serverHost(self):\n return self._hostpeertest(\"getHost\", True)",
"def test_client_with_host_and_instance(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n host=\"test\",\n user=\"foo\",\n password=\"bar\",\n )",
"def __init__(self, host, port):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"%s\" % host, port))\n self.client_socket = client_socket",
"def __init__(self, hosts = None, **kwargs):\n \n # Properly initialize hosts.\n if hosts == None:\n hosts = [\"localhost:6379\"]\n elif isinstance(hosts, str):\n hosts = [hosts]\n \n assert isinstance(hosts, list), \"hosts must be a string or a list of urls os the hosts.\"\n \n self.__alive_hosts = []\n while True:\n try:\n host, port = hosts.pop(0).split(\":\")\n super(RedisReplicaClient, self).__init__(host = host, port = port, **kwargs)\n info = self.info() # If this pass, hosts is alive.\n self.__alive_hosts.append(\":\".join([host, port]))\n if info[\"role\"] == \"master\":\n self._host = host\n self._port = port\n break\n except:\n if len(hosts) == 0 and len(self.__alive_hosts) == 0:\n raise ConnectionError(\"No host available.\")\n elif len(hosts) == 0 and len(self.__alive_hosts) > 0:\n host, port = self.__alive_hosts.pop(0).split(\":\")\n super(RedisReplicaClient, self).__init__(host = host, port = port, **kwargs)\n self._host, self._port = host, port\n break\n \n continue",
"def test_host():\n parser = create_parser()\n parsed_arguments = parser.parse_args([\"--host\", \"test\"])\n assert parsed_arguments.host == \"test\", \"Wrong host\"",
"def test_construct_from_properties_with_host_scheme_port(self):\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.scheme\", \"http\")\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.port\", \"8080\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertEqual(\"http://eu.sandbox.api-ingenico.com:8080\", communicator_config.api_endpoint.geturl())",
"def host(self, value):\n if self._host:\n raise RuntimeError(\"HostManager already set!\")\n self._host = value",
"def uses_host(f):\n f.uses_host = True\n return f",
"def set_canonical_host(self, canonical_host):\n parts = canonical_host.lower().split(\":\")\n self.host = parts[0]\n if len(parts) > 1 and parts[1]:\n self.port = int(parts[1])\n else:\n self.port = None",
"def test_construct_from_properties_with_host_and_port(self):\n\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.port\", \"8443\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertEqual(\"https://eu.sandbox.api-ingenico.com:8443\", communicator_config.api_endpoint.geturl())",
"def __init__(self):\n self.options = {\n \"host\": \"app.keysms.no\",\n \"scheme\": \"http\"\n }",
"def __init__(self, host, verify_ssl=True):\n self.host = host.rstrip(\"/\")\n if not self.host.startswith(\n \"https://\") and not self.host.startswith(\"http://\"):\n raise GitLabException(\"host should start with https:// or http://\")\n\n self.api_url = self.host + \"/api/v3\"\n self.verify_ssl = verify_ssl",
"def create_host(self, host, **kwargs):\n self.clientobj = Host(\n host = host,\n user = kwargs.pop(\"user\", \"\"),\n server = kwargs.pop(\"server\", self.server),\n nfsversion = kwargs.pop(\"nfsversion\", self.nfsversion),\n proto = kwargs.pop(\"proto\", self.proto),\n port = kwargs.pop(\"port\", self.port),\n sec = kwargs.pop(\"sec\", self.sec),\n export = kwargs.pop(\"export\", self.export),\n mtpoint = kwargs.pop(\"mtpoint\", self.mtpoint),\n datadir = kwargs.pop(\"datadir\", self.datadir),\n mtopts = kwargs.pop(\"mtopts\", self.mtopts),\n nomount = kwargs.pop(\"nomount\", self.nomount),\n sudo = kwargs.pop(\"sudo\", self.sudo),\n )\n\n self.clients.append(self.clientobj)\n return self.clientobj",
"def test_remotehosts_get(self):\n pass",
"def test_default_host():\n parser = create_parser()\n parsed_arguments = parser.parse_args([])\n assert parsed_arguments.host == \"127.0.0.1\", \"Wrong host\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client instance property should match instance passed to constructor
|
def test_client_instance(self):
instance = "foo"
c = Client(user="foo", password="foo", instance=instance)
self.assertEqual(c.instance, instance)
|
[
"def test_client_with_host_and_instance(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n host=\"test\",\n user=\"foo\",\n password=\"bar\",\n )",
"def example_property(self):",
"def __init__(self, client_type):\n self._io_service = _mxclient.Asio_IoService()\n self._client_type = client_type\n self.__instance_id = None\n super(Client, self).__init__(self._io_service, client_type)",
"def __property_init__(self, document_instance, value):\n if value is not None:\n value = self.to_json(self.validate(value, required=False))\n document_instance._doc[self.name] = value",
"def test_client_incompatible_args(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"bar\",\n session=\"foobar\",\n )",
"def __init__(self, client, pool, config):\n self.is_primitive = False\n self.client = client\n self.pool = pool\n self.config = {}\n self.config.update(self.DEFAULT_CONFIG)\n self.config.update(config)",
"def instance_endpoint(self) -> \"Endpoint\":\n ...",
"def test_is_instance(self):\n self.assertIsInstance(self.obj, Square, \"created obj is not an \" +\n \"instance of Square class.\")",
"def __init__(self, resource_tester_class, service_under_test_class, log):\n ResourceImplMetatest.__init__(self, resource_tester_class, service_under_test_class, log)\n\n self.all_in_one = False",
"def test_to_check_instance_variables(self):\n self.assertEquals(self.new_source.id, 'newsbyelkwal')\n self.assertEquals(self.new_source.name, 'My News')\n self.assertEquals(self.new_source.description, 'get the latest updates')\n self.assertEquals(self.new_source.url, 'https://google.com')\n self.assertEquals(self.new_source.category, 'general')\n self.assertEquals(self.new_source.country, 'kenya') (edited)",
"def __init__(self):\n self.clients = {}",
"def test_settings_match(self):\n with mock.patch(\"bluebottle.clients.settings\", foo=1):\n p = TenantProperties()\n\n self.assertEqual(p.foo, 1)\n self.assertTrue(hasattr(p, 'foo'))",
"def test_client_valid_request_params(self):\n params = {\"foo\": \"bar\"}\n c = Client(instance=\"test\", user=\"foo\", password=\"foo\", request_params=params)\n self.assertEqual(c.request_params, params)",
"def __init__(self, api_key, api_secret):\n self.API_KEY = api_key\n self.API_SECRET = api_secret\n self.client = Client(api_key , api_secret)",
"def model_instance(self) -> any:\n pass",
"def test_instance(self):\n self.assertIsInstance(self.newtest, Amenity)",
"def is_property_instance(cls, elem):\n # We can't use ExtendedFieldURI.from_xml(). It clears the XML element, but we may not want to consume it here.\n kwargs = {\n f.name: f.from_xml(elem=elem.find(ExtendedFieldURI.response_tag()), account=None)\n for f in ExtendedFieldURI.FIELDS\n }\n xml_obj = ExtendedFieldURI(**kwargs)\n cls_obj = cls.as_object()\n return cls._normalize_obj(cls_obj) == cls._normalize_obj(xml_obj)",
"def __init__(self, client=None):\n self.client = client or boto3.client('kms')",
"def test_client_invalid_use_ssl(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n use_ssl=\"a string\",\n )\n self.assertRaises(\n InvalidUsage, Client, instance=\"test\", user=\"foo\", password=\"foo\", use_ssl=1\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client should raise an exception if `request_params` is of an invalid type
|
def test_client_invalid_request_params(self):
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params="a string",
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=["item0", "item1"],
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=3,
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=0,
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=(1, "2"),
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=True,
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
request_params=2.89,
)
|
[
"def test_client_valid_request_params(self):\n params = {\"foo\": \"bar\"}\n c = Client(instance=\"test\", user=\"foo\", password=\"foo\", request_params=params)\n self.assertEqual(c.request_params, params)",
"def test_params_type_check(test_endpoint):\n\n with pytest.raises(ValueError):\n test_endpoint.params = False",
"def test_params_value_type_check(test_endpoint):\n\n with pytest.raises(ValueError):\n test_endpoint.params = badparams(test_endpoint._Endpoint__allowed_params)",
"def _validate(self, req_dict):\n if self._required_params:\n for param in self._required_params:\n if param not in req_dict:\n raise Exception(\n \"Required parameter not specified: '{0}'\".format(param))",
"def _params_check(record_type, version, **kwargs):\n if record_type == STATUS_CHANGES and version >= Version._040_() and \"event_time\" not in kwargs:\n raise TypeError(\"The 'event_time' query parameter is required for status_changes requests.\")\n\n elif record_type == TRIPS and version >= Version._040_() and \"end_time\" not in kwargs:\n raise TypeError(\"The 'end_time' query parameter is required for trips requests.\")\n\n elif record_type == EVENTS:\n if \"start_time\" not in kwargs and \"end_time\" not in kwargs:\n raise TypeError(\"The 'start_time' and 'end_time' query paramters are required for events requests.\")\n\n two_weeks = Client._date_format(datetime.datetime.utcnow() - datetime.timedelta(days=14), version, EVENTS)\n start = Client._date_format(kwargs[\"start_time\"], version, EVENTS)\n end = Client._date_format(kwargs[\"end_time\"], version, EVENTS)\n\n # less than --> earlier in time\n if start < two_weeks or end < two_weeks:\n raise ValueError(\"The 'start_time' and 'end_time' query parameters must be within two weeks from now.\")\n\n elif record_type == VEHICLES:\n # currently no vehicles specific param checks\n pass",
"def load_params(self, req, resp):\n if not req.content_length:\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({'error': 'No request length.'})\n raise MissingParameterException()\n\n request = req.stream.read()\n log.debug(request)\n req_params = json.loads(request)\n\n for req_param, req_type in self.required_params.items():\n if not req_params.get(req_param):\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({\n 'error': '\"{}\" required parameter is missing.'.format(\n req_param)})\n\n raise MissingParameterException()\n\n if not isinstance(req_params.get(req_param), req_type):\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({\n 'error': (\n '{} required parameter is not of type {}, '\n 'it is of type {}'.format(\n req_param, req_type, type(req_param)))})\n\n raise ParameterTypeException()\n\n return req_params",
"def test_request_parameter_validation():\n from django.core.exceptions import ValidationError\n\n schema = SchemaFactory(\n paths={\n '/get/{id}/': {\n 'parameters': [\n {\n 'name': 'id',\n 'in': PATH,\n 'description': 'id',\n 'required': True,\n 'type': STRING,\n 'format': 'uuid',\n },\n {\n 'name': 'page',\n 'in': QUERY,\n 'type': INTEGER,\n },\n ],\n 'get': {\n 'responses': {200: {'description': \"Success\"}},\n },\n },\n },\n )\n\n request = RequestFactory(url='http://www.example.com/get/32/?page=abcd')\n\n with pytest.raises(ValidationError) as err:\n validate_request(\n request,\n paths=schema['paths'],\n base_path=schema.get('base_path', ''),\n context=schema,\n inner=True,\n )\n\n assert 'method' in err.value.messages[0]\n assert 'parameters' in err.value.messages[0]['method'][0][0]\n assert 'path' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'id' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]\n assert 'format' in err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['path'][0]['id'][0]['format'][0],\n MESSAGES['format']['invalid_uuid'],\n )\n\n assert 'query' in err.value.messages[0]['method'][0][0]['parameters'][0]\n assert 'page' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]\n assert 'type' in err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]\n assert_error_message_equal(\n err.value.messages[0]['method'][0][0]['parameters'][0]['query'][0]['page'][0]['type'][0],\n MESSAGES['type']['invalid'],\n )",
"def test_request_raises_for_unrecognised_prog_type(self):\n self.register(text=self.RESPONSE)\n with self.assertRaises(ValueError) as e:\n invalid_prog_type = \"invalidProgType\"\n self.mock_request(\n prog_type=invalid_prog_type,\n customer_id=self.CUSTOMER_ID,\n login_id=self.LOGIN_ID,\n amount=self.AMOUNT,\n bank_account_id=self.BANK_ACCOUNT_ID,\n invoice_id=self.INVOICE_ID,\n channel_id=self.CHANNEL_ID,\n )\n self.assertIn(e, invalid_prog_type)\n self.assertIn(e, \"not recognised\")\n self.assertIn(e, \"CustomerAccounts request\")",
"def ensure_good_request(required_parameters, accepted_parameters=None, allow_json_none=False):\n\n if accepted_parameters is None:\n accepted_parameters = required_parameters\n\n def decorator(f):\n @wraps(f)\n def fn(*args, **kwargs):\n if request.json:\n if len(request.json) > len(accepted_parameters) or len(request.json) < len(required_parameters):\n return abort(400)\n\n parameters_provided = set(request.json.keys())\n if not (parameters_provided >= required_parameters) or not (parameters_provided <= accepted_parameters):\n return abort(400)\n else:\n if not allow_json_none:\n return abort(400)\n\n try:\n return f(*args, **kwargs)\n except ValidationError:\n return abort(400)\n return fn\n\n return decorator",
"def test_immutability_of_sample_request_and_params():\n with pytest.raises(FrozenInstanceError):\n params = FrozenParams()\n params.allowed_intents = []\n\n with pytest.raises(TypeError):\n params = FrozenParams()\n params.dynamic_resource[\"a\"] = \"b\"\n\n with pytest.raises(FrozenInstanceError):\n request = Request()\n request.params = Params()\n\n with pytest.raises(TypeError):\n request = Request()\n request.frame[\"a\"] = \"b\"",
"def test_bad_requests_give_400(self) -> None:\n self.assertEqual(self._request({}), 400)",
"def test_endpoint_requires_parameter_and_expects_types(\n self, parameters, expected_status, transaction_service_client, rpc_client_id\n ):\n has_client_id = parameters.pop(\"rpc_client_id\", False)\n if has_client_id is False:\n rpc_client_id = None\n resp = transaction_service_client.post(\n f\"/rpc/client/{rpc_client_id}/transactions\", data=parameters\n )\n\n assert expected_status in resp.status",
"def test_incorrect_query_params(self):\n tester = app.test_client(self)\n response = tester.get(DUMMY_ROUTE_INCORRECT)\n self.assertEqual(response.status_code, 400)\n self.assertTrue(b'error' in response.data)",
"def test_invalid_query_params(self):\n for param in ((\"\", \"\"), (\"stringparam\", \"str\")):\n res = self.client.get(DOMAINS_URL, {'from': param[0], 'to': param[1]})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_init_params_list_bad(self):\n with self.assertRaises(ValueError):\n insightiq_api.Parameters(['one', 1, 'two', 2])",
"def test_query_params_invalid_fields(self):\n query_params = {\n \"group_by\": {\"account\": [\"account1\"]},\n \"filter\": {\n \"resolution\": \"daily\",\n \"time_scope_value\": \"-10\",\n \"time_scope_units\": \"day\",\n \"resource_scope\": [],\n },\n \"invalid\": \"param\",\n }\n serializer = OCPQueryParamSerializer(data=query_params)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def _check_params(self, params):\n params_keys = params.keys()\n assert \"bandwidth\" in params_keys\n assert \"count\" in params_keys\n assert params[\"bandwidth\"] > 0.0\n assert params[\"count\"] > 0\n if not \"enforce_no_matrix\" in params_keys:\n params[\"enforce_no_matrix\"] = False\n if not \"max_memory_usage\" in params_keys:\n params[\"max_memory_usage\"] = 512\n if not \"normalize\" in params_keys:\n params[\"normalize\"] = False\n return params",
"def test_mining_hashrate_resale_details_with_missing_field(params):\n client = Client(key, secret)\n client.mining_hashrate_resale_request.when.called_with(**params).should.throw(\n ParameterRequiredError\n )",
"def testNoParamsNeeded(self):\n req = {\n 'id': 1,\n 'jsonrpc': '2.0',\n 'method': 'greet',\n }\n res = self.send_json(req)\n self.assertEqual(res['error']['code'], -32602)\n self.assertEqual(res['error']['message'], 'Invalid params')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Invalid use_ssl type should raise InvalidUsage
|
def test_client_invalid_use_ssl(self):
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
use_ssl="a string",
)
self.assertRaises(
InvalidUsage, Client, instance="test", user="foo", password="foo", use_ssl=1
)
|
[
"def test_tlsProtocolsNoMethodWithMaximum(self):\n with self.assertRaises(TypeError) as e:\n sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n method=SSL.SSLv23_METHOD,\n lowerMaximumSecurityTo=sslverify.TLSVersion.TLSv1_2,\n )\n\n self.assertIn('method', e.exception.args[0])\n self.assertIn('lowerMaximumSecurityTo', e.exception.args[0])\n self.assertIn('exclusive', e.exception.args[0])",
"def test_tlsProtocolsNoMethodWithMinimum(self):\n with self.assertRaises(TypeError) as e:\n sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n method=SSL.SSLv23_METHOD,\n insecurelyLowerMinimumTo=sslverify.TLSVersion.TLSv1_2,\n )\n\n self.assertIn('method', e.exception.args[0])\n self.assertIn('insecurelyLowerMinimumTo', e.exception.args[0])\n self.assertIn('exclusive', e.exception.args[0])",
"def test_tlsProtocolsNoMethodWithAtLeast(self):\n with self.assertRaises(TypeError) as e:\n sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n method=SSL.SSLv23_METHOD,\n raiseMinimumTo=sslverify.TLSVersion.TLSv1_2,\n )\n\n self.assertIn('method', e.exception.args[0])\n self.assertIn('raiseMinimumTo', e.exception.args[0])\n self.assertIn('exclusive', e.exception.args[0])",
"def resolve_ssl_validation(verify_ssl):\n return verify_ssl if verify_ssl is not None else False",
"def ssl():\n pass",
"def test_protocol_sslv23(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try:\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)\n except OSError as x:\n # this fails on some older versions of OpenSSL (0.9.7l, for instance)\n if support.verbose:\n sys.stdout.write(\n \" SSL2 client to SSL23 server test unexpectedly failed:\\n %s\\n\"\n % str(x))\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')\n\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)\n\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)\n\n # Server with specific SSL options\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,\n server_options=ssl.OP_NO_SSLv3)\n # Will choose TLSv1\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,\n server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,\n server_options=ssl.OP_NO_TLSv1)",
"def test_type_incorrect(self):\n with self.assertRaisesRegex(\n CertManagerBuilderException,\n 'Invalid cert type \"any\"'\n ):\n self.__builder.type('any')",
"def is_ssl(self):\n\t\treturn self.ssl",
"def test_invalid_tls_ver(self):\n args = {\"connect\": {\"host\": \"localhost\"}, \"tls\": {\"tls_version\": \"custom_tls\"}}\n\n with pytest.raises(exceptions.MQTTTLSError):\n MQTTClient(**args)",
"def test_doesNotSwallowOtherSSLErrors(self):\n def raiser(_):\n # Unfortunately, there seems to be no way to trigger a real SSL\n # error artificially.\n raise SSL.Error([['', '', '']])\n ctx = FakeContext(SSL.SSLv23_METHOD)\n ctx.set_cipher_list = raiser\n self.patch(sslverify.SSL, 'Context', lambda _: ctx)\n self.assertRaises(\n SSL.Error,\n sslverify._expandCipherString, u'ALL', SSL.SSLv23_METHOD, 0\n )",
"def ssl_check():\n return \"All ok, mm'kay.\"",
"def test_protocol_sslv2(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)\n # SSLv23 client with specific SSL options\n if no_sslv2_implies_sslv3_hello():\n # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv2)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1)",
"def get_ssl_context(*dummy): # type: ignore\n raise ConfigurationError(\"The ssl module is not available.\")",
"def ssl(self):\n\t\tif 'with_openssl' in self.configure_options:\n\t\t\treturn True\n\t\t# Parameterized form in newer versions.\n\t\tfor x in self.configure_options:\n\t\t\tif 'with_ssl' in x:\n\t\t\t\treturn True\n\t\treturn False",
"def _handle_ssl_exception(self, err):\n if err.args[0] == ssl.SSL_ERROR_WANT_READ:\n logger.debug(\"SSL client {0} want read\".format(self._address))\n return False\n elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n logger.debug(\"SSL client {0} want write\".format(self._address))\n self._write_watcher.start()\n return False\n elif err.args[0] == ssl.SSL_ERROR_EOF:\n self.stop(msg=\"SSL EOF for peer {0}, connection closed\")\n return False\n else:\n return True",
"def uses_tls_for_glance(audit_options):\n section = audit_options['cinder-conf']['DEFAULT']\n nova_api_insecure = section.get(\"glance_api_insecure\")\n assert \"False\" == nova_api_insecure, \\\n \"nova_api_insecure should be False\"\n glance_api_servers = section.get(\"glance_api_servers\")\n assert glance_api_servers.startswith(\"https://\"), \\\n \"glance_api_servers should use https\"",
"def test_http_ssl_error(mock_base_http_request, client):\n # Configure\n mock_base_http_request.side_effect = DemistoException('SSLError')\n # Execute\n with pytest.raises(SSLError) as e:\n client.http_request('GET', MOCK_TEST_URL_SUFFIX)\n\n # Assert\n assert (\n str(e.value)\n == \"SSL Certificate Verification Failed - try selecting 'Trust any certificate' checkbox \"\n 'in the integration configuration.'\n )",
"def test_disabled_tls(self):\n args = {\n \"connect\": {\"host\": \"localhost\"},\n \"tls\": {\"certfile\": \"/lcliueurhug/ropko3kork32\"},\n }\n\n with pytest.raises(exceptions.MQTTTLSError):\n MQTTClient(**args)\n\n args[\"tls\"][\"enable\"] = False\n\n c = MQTTClient(**args)\n assert not c._enable_tls",
"def test_init_invalid_url_protocol(self):\n # noinspection PyBroadException\n try:\n setup_config(self.writer, CONFIG_INVALID_URL_PROTOCOL)\n self.assertTrue(False)\n except Exception as e:\n self.assertEqual(str(e), 'Invalid protocol specified. Must be either \"http\", \"https\" or \"telnet\"')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Nonbool type to raise_on_empty should raise InvalidUsage
|
def test_client_invalid_raise_on_empty(self):
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
raise_on_empty=0,
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
raise_on_empty="test",
)
self.assertRaises(
InvalidUsage,
Client,
instance="test",
user="foo",
password="foo",
raise_on_empty={"a": "b"},
)
|
[
"def validate(self, *args):\n pass",
"def ensure_valid(self):\n self.op_info.ensure_valid()",
"def validate_extended(self):",
"def test_value_if_not_asked__raises_exception_without_should_ask():\n with pytest.raises(\n ValueError,\n match=\"You should either remove value_if_not_asked or add should_ask.\",\n ):\n BasicQuestion(\n SOME_NAME,\n SOME_STRING,\n SOME_DEFAULT,\n value_if_not_asked=\"a\",\n )",
"def throws_an_error_if_buzz_has_no_input():",
"def __is_valid__(self, state):\n return False",
"def test_ensure_user_nonuser(self):\n self.assertRaises(ValueError, self.messagetools.ensure_user, {})",
"def test_person_valueerror_not_mutate():\n person = Person()\n data = {\n 'is_organization': True\n }\n assert_raises(ValueError, person.format_data_set, data)",
"def test_empty_args_list(self):\n item_args = []\n operation = \"dummy\"\n with pytest.raises(MWSError):\n parse_item_args(item_args, operation)",
"def test_no_user(self):\n with self.assertRaises(TypeError):\n limited_infection()",
"def test_totally_empty(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(PACKAGE_EMPTY), PackageAPI(PACKAGE_EMPTY2)],\n errors_allowed=0,\n )\n reporter._check_function_args()\n self.assertTrue(reporter.errors == [])",
"def test_nothing_to_validate(self):\n with mn.model() as m:\n mn.constant('X7Allowed', False)\n mn.constant('X5Allowed', False)\n mn.constant('X4Allowed', False)\n\n self.assertEqual(m.validate_all(), {'success': True})",
"def invalid(self):\n return not self.valid",
"def test_validate_no_data(self):\n with pytest.raises(fields.ValidationError):\n assert self.field.validate()",
"def test_excepts_if_empty_input(self):\n\t\tself.assertRaises(ValueError, self.string_manipulation.format)",
"def _check_empty(key, value, empty):\n if not empty and not value:\n raise Exception(\"{} is empty, expecting a value\".format(key))\n elif empty and value:\n raise Exception(\n \"{} is suppose to be empty. value: {} exists\".format(key, value)\n )",
"def allowempty(self, allowempty):\n\n self._allowempty = allowempty",
"def validate_input(self, definition):\n pass",
"def validate(self, context, vbbefore, activity, usage):\r\n raise Exception(\"Unimplemented\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Client `request_params` property should match what was passed as an argument
|
def test_client_valid_request_params(self):
params = {"foo": "bar"}
c = Client(instance="test", user="foo", password="foo", request_params=params)
self.assertEqual(c.request_params, params)
|
[
"def get_params_from_request(self):\n self._create_moe_log_line(\n type='request',\n content=self.request.json_body,\n )\n\n return self.request_schema.deserialize(self.request.json_body)",
"def test_client_invalid_request_params(self):\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=\"a string\",\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=[\"item0\", \"item1\"],\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=3,\n )\n\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=0,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=(1, \"2\"),\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=True,\n )\n self.assertRaises(\n InvalidUsage,\n Client,\n instance=\"test\",\n user=\"foo\",\n password=\"foo\",\n request_params=2.89,\n )",
"def test_immutability_of_sample_request_and_params():\n with pytest.raises(FrozenInstanceError):\n params = FrozenParams()\n params.allowed_intents = []\n\n with pytest.raises(TypeError):\n params = FrozenParams()\n params.dynamic_resource[\"a\"] = \"b\"\n\n with pytest.raises(FrozenInstanceError):\n request = Request()\n request.params = Params()\n\n with pytest.raises(TypeError):\n request = Request()\n request.frame[\"a\"] = \"b\"",
"def _get_params(self, request):\n params = request.GET.copy()\n return params",
"def test_URL_kwargs(self):\n self.request_method_test('matchdict')",
"def w_positional_request(foo, bar, REQUEST):\n return 42",
"def test_returns_true_if_request_has_get_parameter(self):\n self.request_mock.GET = {self.parameter_name: 'foobar'}\n self.assertTrue(self.has_parameter(self.get_response_mock, self.request_mock))",
"def updateRequest(self, paramsFromResponse, extraParams):\r\n\r\n # https://portswigger.net/burp/extender/api/constant-values.html\r\n PARAM_BODY = 0x01\r\n PARAM_URL = 0x00\r\n\r\n request = self._requestResponse.getRequest()\r\n\r\n # loop over all the unique parameters that we scraped from the response\r\n for param in extraParams:\r\n\r\n # create a corresponding burp IParameter\r\n # weird stuff happens if there are spaces. not sure if other\r\n # characters will cause problems, but I think URL encoding could\r\n # interfere with the scanner so I'm trying to avoid it\r\n value = paramsFromResponse[param].replace(' ', '+')\r\n burpParam = helpers.buildParameter(param,\r\n value,\r\n PARAM_URL)\r\n # add it to the request\r\n request = helpers.addParameter(request, burpParam)\r\n\r\n return request",
"def _data_or_params_cs_request(self, mock_request):\n call = [call for call in mock_request.call_args_list if call[0][1].endswith(self.cs_endpoint)][0]\n if call[0][0] == \"get\":\n return call[1][\"params\"]\n elif call[0][0] == \"post\":\n return call[1][\"data\"]",
"def _get_query_params(self, request):\n return getattr(request, 'query_params') or getattr(request, 'QUERY_PARAMS', request.GET)",
"def test_accept_make_query_param_to_filter(self):",
"def test_request_sends_parameters(self):\n self.register(json=self.RESPONSE)\n self.mock_request(self.RANGE_ID)\n self.assertDataSent(self.request_class.PRODUCT_RANGE_ID, self.RANGE_ID)\n self.assertDataSent(\n self.request_class.SALES_CHANNEL_ID,\n self.request_class.SALES_CHANNEL_ID_VALUE,\n )",
"def test_returns_false_if_request_hasnt_get_parameter(self):\n self.request_mock.GET = dict()\n self.assertFalse(self.has_parameter(self.get_response_mock, self.request_mock))",
"def load_params(self, req, resp):\n if not req.content_length:\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({'error': 'No request length.'})\n raise MissingParameterException()\n\n request = req.stream.read()\n log.debug(request)\n req_params = json.loads(request)\n\n for req_param, req_type in self.required_params.items():\n if not req_params.get(req_param):\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({\n 'error': '\"{}\" required parameter is missing.'.format(\n req_param)})\n\n raise MissingParameterException()\n\n if not isinstance(req_params.get(req_param), req_type):\n resp.status = falcon.HTTP_400\n resp.body = json.dumps({\n 'error': (\n '{} required parameter is not of type {}, '\n 'it is of type {}'.format(\n req_param, req_type, type(req_param)))})\n\n raise ParameterTypeException()\n\n return req_params",
"def _validate_params(self):\n assert all(key in self.params for key in self.required_params), set(self.required_params) - set(\n self.params.keys()\n )",
"def is_request(req: Request) -> bool:\n return \\\n req and \\\n is_dict(req) and \\\n is_str(req.get(\"api\", None))",
"def _validate(self, req_dict):\n if self._required_params:\n for param in self._required_params:\n if param not in req_dict:\n raise Exception(\n \"Required parameter not specified: '{0}'\".format(param))",
"def make_query_params(**request_params):\n\n params = {}\n for key, value in six.iteritems(request_params):\n if value is not None:\n params[key] = value\n\n return params",
"def get_query_params(self, request):\n try:\n query_params = request.query_params\n except AttributeError:\n # DRF 2\n query_params = getattr(request, \"QUERY_PARAMS\", request.GET)\n return query_params"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads in the text file f which contains one sentence per line.
|
def readFileToCorpus(f):
if os.path.isfile(f):
file = open(f, "r") # open the input file in read-only mode
i = 0 # this is just a counter to keep track of the sentence numbers
corpus = [] # this will become a list of sentences
print("Reading file ", f)
for line in file:
i += 1
sentence = line.split() # split the line into a list of words
#append this lis as an element to the list of sentences
corpus.append(sentence)
if i % 1000 == 0:
#print a status message: str(i) turns int i into a string
#so we can concatenate it
sys.stderr.write("Reading sentence " + str(i) + "\n")
#endif
#endfor
return corpus
else:
#ideally we would throw an exception here, but this will suffice
print("Error: corpus file ", f, " does not exist")
sys.exit() # exit the script
#endif
|
[
"def load_into_sentence(file_name):\n if not os.path.exists(file_name):\n raise ValueError(\"The file {0} does not exist!\".format(file_name))\n with open(file_name,'r') as fp:\n lines = fp.readlines()\n return ''.join(lines).replace(\"\\n\",' ').decode('utf-8')",
"def read(self):\n\n if self.path.endswith('.json'):\n sentences = json.load(open(self.path, 'r'))\n else:\n sentences = []\n with open(self.path, 'r', encoding='latin-1') as f:\n for line in f:\n print(line[:20])\n # first strip away newline symbols and the like, then replace ' and \" with the empty\n # string and get rid of possible remaining trailing spaces\n line = line.strip().translate({ord(i): None for i in '\"\\'\\\\'}).strip(' ')\n # lowercase and split at the white space (the corpus has ben previously tokenized)\n sentences.append(line.lower().split(' '))\n\n return sentences",
"def tokenize_txt(infile, configfile):\n # initialize output\n sentences = []\n sentence = []\n \n # open file and extract lines\n with open(infile, 'r', encoding = 'utf-8') as fileread:\n lines = fileread.readlines()\n\n # initialize tokenizer\n tokenizer = ucto.Tokenizer(configfile)\n\n # for each line\n for line in lines:\n # tokenize \n tokenizer.process(line)\n # add each token to the sentence...\n for token in tokenizer:\n sentence.append(token.text)\n # ...until the sentence ends\n if token.isendofsentence():\n sentences.append(sentence)\n # initialize a new sentence\n sentence = []\n\n if len(sentence) > 0:\n sentences.append(sentence)\n\n return sentences",
"def add_file(self, filename):\n for line in [line.rstrip().lower() for line in open(filename, errors='ignore').readlines()]:\n self.add_sentence(line)",
"def load_text_file(self):\n with open(self.file_name, \"r\") as filino:\n data = filino.readlines()\n\n return data",
"def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = ' '.join(word[0].upper() + word[1:] for word in line.split())\n\t\t\t\tprint line",
"def read_text():\n # Load the txt\n quotes = open(\"movie_quotes.txt\")\n\n # Convert the quotes into list\n contents_of_file = quotes.read()\n\n # Print the converted quotes\n print(contents_of_file)\n\n # Close the open file\n quotes.close()\n\n # Run the check profanity\n check_profanity(contents_of_file)",
"def read_spice(self, f):\n spice.Reader(self).read(f)",
"def read(f):\n\tp = HMMParser()\n\treturn p.read(f)",
"def get_sentences(input_file):\n \n with open(input_file) as f:\n \treturn [line.rstrip('\\n') for line in f if line.rstrip('\\n')]",
"def get_alt_textfile(f, func):\r\n while 1:\r\n line = f.readline()\r\n if not line:\r\n print '(Unexpected EOF from server)'\r\n break\r\n if line[-2:] == CRLF:\r\n line = line[:-2]\r\n elif line[-1:] in CRLF:\r\n line = line[:-1]\r\n if line == '.':\r\n break\r\n if line[:2] == '..':\r\n line = line[1:]\r\n func(line)",
"def read_sentiment():\n with open('sentiment.txt', 'r') as f:\n for line in f:\n line = line.strip().split()\n if line[1]<0:\n neg_words.add(line[1])\n elif line[1]>0:\n pos_words.add(line[1])",
"def readf(self, file_path):\n if self._empty_file_check(file_path):\n with open(file=file_path, mode='r') as text_file:\n for word in text_file:\n self._word_container.append(self._format_word(word))\n else:\n raise IOError\n return self._word_container",
"def load_sentences(self):\n if self.print_only:\n infile = 'data/sentences_clean.txt'\n with open(infile) as infile:\n lines = infile.readlines()\n sentences = [l.lower().strip() for l in lines]\n else:\n infile = resource_filename('typer_game', 'data/audio_lookup_subset.txt')\n sentences = pickle.load(open(infile, 'rb'))\n return sentences",
"def run_single_file(input_fn, output_fn, prop_ex):\n logging.info('Reading sentences from {}'.format(input_fn))\n ex_counter = 0\n line_counter = 0\n\n with codecs.open(output_fn, 'w', 'utf-8') as f_out:\n for line in open(input_fn):\n line_counter += 1\n data = line.strip().split('\\t')\n tweet_id = None\n if len(data) == 2:\n tweet_id, sent = data\n elif len(data) == 4:\n date, tweet_id, user, sent = data\n else:\n # Not at tweet, just fill in the id with a place holder\n tweet_id = 'NONE'\n sent = data[0]\n logging.info('Read: {}'.format(sent))\n for ex in prop_ex.get_extractions(sent):\n # Encode our chunking via double spaces\n spacy_tokenized_sent = \" \".join([prop_ex.parser.get_text(i)\n for i in range(prop_ex.parser.get_len())])\n to_print = '\\t'.join(map(str,\n ([tweet_id] if prop_ex.include_id else [])\\\n + [spacy_tokenized_sent, ex])).decode('ascii',\n errors = 'ignore')\n logging.debug(to_print)\n f_out.write(to_print + \"\\n\")\n ex_counter += 1\n\n logging.info('Done! Wrote {} extractions to {}'.format(ex_counter, output_fn))\n return line_counter, ex_counter",
"def read_names(f):\n return (line.strip() for line in io.open(f, 'r', encoding='utf-8'))",
"def read(self, f):\n\n\t\t#if f is a string, try to open the file\n\t\tif isinstance(f, basestring):\n\t\t\tf = open(f, 'r')\n\t\t\n\t\tret = []\n\t\tself.errors = []\n\t\tself.warnings = []\n\n\t\tdef _cleanLine(l):\n\t\t\t\"\"\"remove any comments and strip whitespace\"\"\"\n\t\t\ti = l.find('#')\n\t\t\tif i:\n\t\t\t\tl = l[:i]\n\t\t\treturn l.strip()\n\n\t\tdef consume(iterator, n):\n\t\t\t'''Advance the iterator n-steps ahead. If n is none, consume\n\t\t\t\t\tentirely.'''\n\t\t\tcollections.deque(itertools.islice(iterator, n), maxlen=0)\n\n\t\t#read in all the lines, cleaning each one\n\t\tlines = [_cleanLine(l) for l in f]\n\n\t\tstate = self.S_default\n\n\t\titerator = enumerate(lines).__iter__()\n\t\tfor self.lineNum, line in iterator:\n\t\t\t#ignore blank lines\n\t\t\tif not line:\n\t\t\t\tcontinue\n\n\t\t\t#find the start of an HMM definition\n\t\t\tif state == self.S_default:\n\t\t\t\tif not re.match(r\"^HMMER3/b\", line):\n\t\t\t\t\tself._addError(\"Invalid File: file must start with \\'HMMER3/b\\'\")\n\t\t\t\t\t#can't continue\n\t\t\t\t\tself._raiseErrors()\n\t\t\t\telse:\n\t\t\t\t\t#make a new model\n\t\t\t\t\thmm = HMM()\n\t\t\t\t\tstate = self.S_header\n\t\t\t\t\tcontinue\n\n\t\t\t#have we found the end of the HMM\n\t\t\tif re.match(r'^//', line):\n\t\t\t\t#if this isn't where it's supposed to be\n\t\t\t\tif state != self.S_model_me:\n\t\t\t\t\t#provide a helpful error\n\t\t\t\t\tltype = 'None'\n\t\t\t\t\tif state == self.S_header:\n\t\t\t\t\t\tltype = 'header'\n\t\t\t\t\telif state == self.S_model_ie:\n\t\t\t\t\t\tltype = 'insert emission'\n\t\t\t\t\telif state == self.S_model_st:\n\t\t\t\t\t\tltype = 'state transition'\n\t\t\t\t\tself._addError('Unexpected \\'//\\', expecting {} line'.format(ltype))\n\n\t\t\t\t#check if we've found the advertised number of states \n\t\t\t\t#\t\t\tnb. hmm.LENG doesn't include the begin state \"\"\"\n\t\t\t\tif len(hmm.states)-1 < len(hmm):\n\t\t\t\t\tself._addError('Too few states in model ({:d} < {:d})'\n\t\t\t\t\t\t\t.format(len(hmm.states)-1, len(hmm)))\n\t\t\t\tif len(hmm.states)-1 > len(hmm):\n\t\t\t\t\tself._addError('Too many states in model ({:d} > {:d})'\n\t\t\t\t\t\t\t.format(len(hmm.states)-1, len(hmm)))\n\t\t\t\t#add the HMM to the list\n\t\t\t\tret.append(hmm)\n\t\t\t\tstate = self.S_default\n\t\t\t\tcontinue\n\n\t\t\t#parse a header line\n\t\t\tif state == self.S_header:\n\t\t\t\tif re.match(r'^HMM\\s', line):\n\t\t\t\t\t#Parsed each header line\n\t\t\t\t\t#check all the required options were present\n\t\t\t\t\tfor o in REQUIRED:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif not getattr(hmm, o.lower()):\n\t\t\t\t\t\t\t\tself._addError('Option \\'{}\\' is required'.format(o), False)\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tself._addError('Option \\'{}\\' is required'.format(o), False)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif o == 'LENG':\n\t\t\t\t\t\t\t\tsetattr(hmm, o.lower(), 0)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsetattr(hmm, o.lower(), '')\t\n\t\t\t\t\t\n\t\t\t\t\t#check if the line after next is a COMPO line\n\t\t\t\t\tif re.match(r'^COMPO\\s', lines[self.lineNum+2]):\n\t\t\t\t\t\thmm.compo = self._parse_prob(lines[self.lineNum+2].split()[1:], hmm.K)\n\t\t\t\t\t\t#drop the next two lines\n\t\t\t\t\t\tconsume(iterator, 2)\n\t\t\t\t\telse:\n\t\t\t\t\t\t#otherwise drop only one line\n\t\t\t\t\t\tconsume(iterator, 1)\n\t\t\t\t\t\n\t\t\t\t\tmodel_state = State()\n\t\t\t\t\tstate = self.S_model_ie\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tm = self.hdr_re.match(line)\n\t\t\t\tif not m:\n\t\t\t\t\tself._addError('Invalid header line')\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tkey = m.group('key').upper()\n\t\t\t\tval = m.group('value')\n\t\t\t\t#check if the key is valid\n\t\t\t\tif not (key in OPTIONS):\n\t\t\t\t\tself._addWarning('Ignoring unknown option \\'%s\\'' % key)\n\t\t\t\t\tcontinue\n\n\t\t\t\t#simple strings\n\t\t\t\tif key in ['NAME', 'ACC', 'DESC', 'ALPH', 'DATE',]:\n\t\t\t\t\tsetattr(hmm, key.lower(), val)\n\t\t\t\t\tif key == 'ALPH':\n\t\t\t\t\t\tif hmm.alph.upper() in ALPHABETS:\n\t\t\t\t\t\t\thmm.symbols = ALPHABETS[hmm.alph.upper()]\n\t\t\t\t\t\t\thmm.K = len(hmm.symbols)\n\t\t\t\t\t\t\thmm.alpha = hmm.alph.upper()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself._addError('ALPH must be \\'DNA\\', \\'RNA\\' or \\'AMINO\\'')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t#integers\n\t\t\t\telif key in ['LENG', 'NSEQ', 'CKSUM',]:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tval = int(val)\n\t\t\t\t\t\tif val < 0:\n\t\t\t\t\t\t\traise ValueError\n\t\t\t\t\t\tsetattr(hmm, key.lower(), val)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tself._addError('{} must be a positive integer'.format(key))\n\t\t\t\t\t\tcontinue\n\t\t\t\t#bools\n\t\t\t\telif key in ['RF', 'CS', 'MAP',]:\n\t\t\t\t\tb = {'yes': True, 'no': False,}\n\t\t\t\t\tval = val.lower()\n\t\t\t\t\tif val in b:\n\t\t\t\t\t\tsetattr(hmm, key.lower(), b[val])\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._addError('{} must be \\'yes\\' or \\'no\\''.format(key))\n\t\t\t\t\t\tcontinue\n\t\t\t\t#COM\n\t\t\t\telif key == 'COM':\n\t\t\t\t\tm2 = re.match(r'(\\d+)\\s+(\\S+)$', val)\n\t\t\t\t\tif m2:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\thmm.com.append( (int(m2.group(1)), m2.group(2)))\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\thmm.com.append( (len(hmm.com)+1, m2.group(0)))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\thmm.com.append( (len(hmm.com)+1, val))\n\t\t\t\telif key == 'EFFN':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tval = float(val)\n\t\t\t\t\t\tif val < 0:\n\t\t\t\t\t\t\traise ValueError\n\t\t\t\t\t\thmm.effn = val\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tself._addError('EFFN must be a positive real')\n\t\t\t\t\t\tcontinue\n\t\t\t\t#pairs of floats\n\t\t\t\telif key in ['GA', 'TC', 'NC']:\n\t\t\t\t\tm2 = re.match(r'^([\\d\\.]+)\\s+([\\d\\.]+);?$', val)\n\t\t\t\t\tif m2:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tv1 = float(m2.group(1))\n\t\t\t\t\t\t\tv2 = float(m2.group(2))\n\t\t\t\t\t\t\tif v1 < 0 or v2 < 0:\n\t\t\t\t\t\t\t\traise ValueError\n\t\t\t\t\t\t\tsetattr(hmm, key.lower(), (v1, v2))\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tself._addError('{} must be two positive reals'.format(key))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._addError('{} must be two positive reals'.format(key))\n\t\t\t\t\t\tcontinue\n\t\t\t\t#STATS\n\t\t\t\telif key == 'STATS':\n\t\t\t\t\tm2 = re.match(r'^(\\w+)\\s+(\\w+)\\s+([\\d\\.-]+)\\s+([\\d\\.-]+)$', val)\n\t\t\t\t\tif m2:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ts1 = m2.group(1).upper()\n\t\t\t\t\t\t\ts2 = m2.group(2).upper()\n\t\t\t\t\t\t\tf1 = float(m2.group(3))\n\t\t\t\t\t\t\tf2 = float(m2.group(4))\n\t\t\t\t\t\t\tif s1 == 'LOCAL' and s2 in ['MSV', 'VITERBI', 'FORWARD']:\n\t\t\t\t\t\t\t\thmm.stats.append((s1, s2, f1, f2))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif s1 != 'LOCAL':\n\t\t\t\t\t\t\t\t\tself._addError('s1 must equal \\'LOCAL\\'')\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself._addError('s2 must be \\'MSV\\', \\'VITERBI\\' or \\'FORWARD\\'')\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tself._addError('STATS <s1> <s2> <f1> <f2>')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._addError('STATS <s1> <s2> <f1> <f2>')\n\t\t\t\t\t\tcontinue\n\t\t\t\n\t\t\tif state == self.S_model_me:\n\t\t\t\t#parse the ME line\n\t\t\t\tl = line.split()\n\t\t\t\t\n\t\t\t\tmodel_state = State()\n\n\t\t\t\ttry:\n\t\t\t\t\tmodel_state.num = int(l[0])\n\t\t\t\t\tif model_state.num != len(hmm.states):\n\t\t\t\t\t\tself._addError('Expected state number %s' % len(hmm.states))\n\t\t\t\texcept ValueError:\n\t\t\t\t\tself._addError('Node number must be a positive integer')\n\n\t\t\t\tmodel_state.me = self._parse_prob(l[1:hmm.K+1], hmm.K)\n\t\t\t\t\n\t\t\t\t#MAP number\n\t\t\t\ttry:\n\t\t\t\t\tif(l[hmm.K+1] != '-'):\n\t\t\t\t\t\tmodel_state.map = int(l[hmm.K+1])\n\t\t\t\t\telif hmm.map:\n\t\t\t\t\t\tself._addWarning('Map annotation is \\'-\\', even though MAP is \\'yes\\'')\n\t\t\t\texcept ValueError:\n\t\t\t\t\tself._addError('Map Annotation must be an integer or \\'-\\'')\n\t\t\t\texcept IndexError:\n\t\t\t\t\tself._addError('No Map annotation provided')\n\t\t\t\t#RF annotation\n\t\t\t\ttry:\n\t\t\t\t\tmodel_state.rf = l[hmm.K+2]\n\t\t\t\t\tif len(model_state.rf) != 1:\n\t\t\t\t\t\tself._addError('RF annotation must be a single character')\n\t\t\t\texcept IndexError:\n\t\t\t\t\tself._addError('No RF annotation provided')\n\t\t\t\t#CS annotation\n\t\t\t\ttry:\n\t\t\t\t\tmodel_state.cs = l[hmm.K+3]\n\t\t\t\t\tif len(model_state.cs) != 1:\n\t\t\t\t\t\tself._addError('CS annotation must be a single character')\n\t\t\t\texcept IndexError:\n\t\t\t\t\tself._addError('No CS annotation provided')\n\t\t\t\t#we're now expecting an IE line\n\t\t\t\tstate = self.S_model_ie\n\t\t\t\tcontinue\n\n\t\t\tif state == self.S_model_ie:\n\t\t\t\tmodel_state.ie = self._parse_prob(line.split(), hmm.K)\n\t\t\t\tstate = self.S_model_st\n\t\t\t\tcontinue\n\n\t\t\tif state == self.S_model_st:\n\t\t\t\tmodel_state.tr = self._parse_prob(line.split(), 7)\n\t\t\t\t#add the state to the current hmm\n\t\t\t\thmm.states.append(model_state)\n\t\t\t\tmodel_state = State()\n\t\t\t\tstate = self.S_model_me\n\t\t\t\tcontinue\n\n\t\t#parsed every line in the file\n\t\t#if there were errors, raise them. Otherwise keep quiet\n\t\tif len(self.errors):\n\t\t\tself._raiseErrors()\n\n\t\treturn ret",
"def _load_tex_file(self):\n with open(self.input_file.path, 'r', encoding='utf8') as file:\n return [line.strip() for line in file]",
"def read(self, file_name, path=TXTS_DIRECTORY_PATH):\n final_path = os.path.join(path, file_name)\n for fr in read_tokenize(final_path):\n self.kb.kb_assert(fr)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pull data for monitoring.
|
def pull_data(self):
|
[
"def _download_data(self):\n self.raw_data = requests.get(self.api_address).json()\n self.age = datetime.now()",
"def poll(self):\n data = self.get_data()\n if data:\n self.add_metrics(data)",
"def load_new_data(self):\n r = requests.get(self.STATUS_URL)\n raw_data = self._received_data_processor(r.text)\n soup = BeautifulSoup(raw_data, 'lxml')\n self.status_data = soup.find(\"service\").find(\"subway\").findAll(\"line\")",
"def _get_data(self):\n url = f\"http://{self._ip_addr}/status\"\n\n r = requests.get(url)\n if r:\n data = r.json()\n\n self._target_temperature = data[\"targetState\"][\"temperature\"]\n self._current_temperature = data[\"currentState\"][\"temperature\"]\n \n heater_1 = data[\"currentState\"][\"heater_1\"]\n heater_2 = data[\"currentState\"][\"heater_2\"]\n\n if heater_1 == 1 or heater_2 == 1:\n self._current_hvac_action = CURRENT_HVAC_HEAT\n else:\n self._current_hvac_action = CURRENT_HVAC_IDLE\n\n _LOGGER.debug(\"Hot tub status updated\")\n else:\n _LOGGER.error(\"Could not get data from Hot Tub.\")",
"def run(self, **kwargs):\n pull_and_parse_logs()",
"def pull_data(self, pulling_done):\n logging.getLogger(\"moler_threads\").debug(\"ENTER {}\".format(self))\n heartbeat = tracked_thread.report_alive()\n reads = []\n\n while not pulling_done.is_set():\n if next(heartbeat):\n logging.getLogger(\"moler_threads\").debug(\"ALIVE {}\".format(self))\n try:\n reads, _, _ = select.select([self._terminal.fd], [], [], self._select_timeout)\n except ValueError as exc:\n self.logger.warning(\"'{}: {}'\".format(exc.__class__, exc))\n self._notify_on_disconnect()\n pulling_done.set()\n\n if self._terminal.fd in reads:\n try:\n data = self._terminal.read(self._read_buffer_size)\n if self.debug_hex_on_all_chars:\n self.logger.debug(\"incoming data: '{}'.\".format(all_chars_to_hex(data)))\n if self.debug_hex_on_non_printable_chars:\n self.logger.debug(\"incoming data: '{}'.\".format(non_printable_chars_to_hex(data)))\n\n if self._shell_operable.is_set():\n self.data_received(data=data, recv_time=datetime.datetime.now())\n else:\n self._verify_shell_is_operable(data)\n except EOFError:\n self._notify_on_disconnect()\n pulling_done.set()\n logging.getLogger(\"moler_threads\").debug(\"EXIT {}\".format(self))",
"def pull(self):\n # Request a pull from the parameter server.\n self.socket.sendall(b'p')\n # Fetch the dictionary from the parameter server.\n data = recv_data(self.socket)\n self.center_variable = np.asarray(data['model'])\n self.last_update = data['update']",
"def read(self):\n self.data = None\n\n response = requests.get(url=self.report_url)\n\n log.info(\"ReportReaderJSON: Response\", extra={'http_status_code': response.status_code})\n\n self.data = json.loads(response.text)\n self.count = len(self.data)",
"def fetch_data(self):\r\n print(\"Fetching Data from USGS Water Services API\")\r\n self.response = requests.get(self.complete_url)\r\n self.response.raise_for_status()",
"def fetch_statistics(conf):\n return fetch_json(\"http://%s:%d/monitor/statistics.json\" % (conf[\"host\"], conf[\"port\"]), timeout=30)",
"def get_readings_task():\n\n print('Getting readings')\n\n try:\n humidity, temperature = \\\n Adafruit_DHT.read_retry(SENSOR_TYPE, SENSOR_PIN_BCM)\n\n print('Humidity {0}, temperature {1}'.format(humidity,\n temperature))\n\n if USE_CLOUD:\n send_meas_cloud(temperature=float(temperature)\n , humidity=float(humidity))\n\n if USE_FILE:\n send_meas_filesystem(temperature=float(temperature)\n , humidity=float(humidity))\n\n except Exception as err:\n print('Exception while reading/sending measurements: {0}'.format(\n err\n ))\n\n # Reschedule.\n scheduler.enter(INTERVAL_SECONDS, 1, get_readings_task)",
"def getHarvestData(self):\n if self.stopped:\n return\n try:\n self.setStatus('HARVESTING')\n getRequest = Request(self.harvestInfo['uri'])\n self.data = getRequest.getData()\n del getRequest\n except Exception as e:\n self.logger.logMessage(\"ERROR RECEIVING DATA, %s,\" % str(repr(e)), \"ERROR\")\n self.handleExceptions(e, terminate=True)",
"def api_fetch():\n headers = {\n 'Authorization': 'apikey {}'.format(settings.DEMOCRACY_WORKS_API_KEY),\n 'Accept': 'application/json'\n }\n\n response = requests.get(\n settings.DEMOCRACY_WORKS_API_URL,\n headers=headers)\n\n logger.info(u'Sync: API Pull - URL: %s Status Code: %s Time: %s',\n settings.DEMOCRACY_WORKS_API_URL, response.status_code,\n response.elapsed.total_seconds())\n\n if response.status_code != 200:\n raise Exception(\n 'Bad Response from Democracy Works {}'.format(\n response.status_code))\n\n return response.json()",
"def _pullMessage(self):\n\n data = {\n \"msgs_recv\": 0,\n \"sticky_token\": self.sticky,\n \"sticky_pool\": self.pool,\n \"clientid\": self.client_id,\n \"state\": \"active\" if self._markAlive else \"offline\",\n }\n\n j = self._get(self.req_url.STICKY, data, fix_request=True, as_json=True)\n\n self.seq = j.get(\"seq\", \"0\")\n return j",
"def test_getSleepData(self):\n method = 'getSleepData'\n resp = requests.get(self.url + method, self.get_data)\n self.assertEqual(resp.status_code, 200)",
"def extract(self):\n try:\n self._logger.info(\"GitHubPullRequest2Db started\")\n start_time = datetime.now()\n self._get_pull_requests()\n\n end_time = datetime.now()\n minutes_and_seconds = self._logging_util.calculate_execution_time(end_time, start_time)\n self._logger.info(\"GitHubPullRequest2Db finished after \" + str(minutes_and_seconds[0])\n + \" minutes and \" + str(round(minutes_and_seconds[1], 1)) + \" secs\")\n self._logging_util.remove_file_handler_logger(self._logger, self._fileHandler)\n except Exception:\n self._logger.error(\"GitHubPullRequest2Db failed\", exc_info=True)\n finally:\n if self._dao:\n self._dao.close_connection()",
"def __pull_tick(self):\n self.loop.run_until_complete(self.__do_pull_tick())",
"def load_pull(self):\n file_path = os.path.join(self.script_dir,'pull list.json') \n if not os.path.isfile(file_path)or os.path.getsize(file_path) == 0 :\n with open(file_path,'w') as out:\n json.dump({},out)\n\n with open(file_path) as infile:\n self.pull_list = json.load(infile)",
"def fetch(cls):\n cls.fetch_bus_stops()\n cls.fetch_bus_routes()\n cls.fetch_bus_timetables()",
"def get_data_from_reaper(self):\n url = 'http://reaper:3300'\n source = requests.get(url)\n self.all_rate = source.json()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checks the next available space in a column and returns its tuple
|
def next_avail_space(column):
for row in range (1, 7):
if board_config[(row, column)] == ' ':
return (row, column)
else:
pass
return None #User tries to put chip in a full column
|
[
"def _next(self, cell):\n row, col = cell\n if col == self.size - 1:\n row, col = row + 1, 0\n else:\n col += 1\n return row, col",
"def first_free_position(self):\n\n for row in self._table:\n for col in row:\n if col == -1:\n return self._table.index(row), row.index(col)\n return [0, 0]",
"def next_cell(board: List[List[int]]) -> Union[tuple[int, int], tuple[str, str]]:\r\n for x in range(0, 9):\r\n for y in range(0, 9):\r\n if board[x][y] == \"\":\r\n return x, y\r\n return \"\", \"\"",
"def __find_prime_in_row(marked,row):\n\n marked_col = tf.squeeze(tf.gather(marked, col))\n idx_find = tf.where(tf.equal(marked_col, 2))\n\n try:\n col = tf.segment_min(idx_find)\n return col\n except Exception as e :\n return -1 # return col = -1 when we find now row containing a \"1\"",
"def lookup_range(self, row, column):\n # width should be factor1_size + factor2_size - 1\n if column > self.width: # check too far to left\n return None\n if row > self.factor2_size - 1: # check too far down\n return None\n low = self._offset(row, column)\n return (low, low + 9)",
"def check_column(board, col):\n symbol = board[0][col]\n for row in range(1, SIZE):\n if board[row][col] != symbol:\n return None\n return symbol # Will only get here is all symbols in column match",
"def next_position(self, pos: Tuple[int, int], room: Room) -> Tuple[int, int]:\n #do it stupidly: just walk from the bottom left to the top right, and return\n #the position of the first empty spot:\n seen_pos: bool = False\n for k in range(0, room.width+room.height+1):\n for j in range(k+1):\n i = k-j\n if i < room.height and j < room.width:\n if pos == (i, j):\n seen_pos = True\n if seen_pos and room.is_empty_spot((i, j)):\n return (i, j)\n\n #otherwise return nonexisting value:\n return (-1, -1)",
"def next_pos_in_col(self, col):\n h = self.col_heights[col]\n row = BOARD_H - h - 1\n return self.coord_to_pos((row, col))",
"def check_column(cell, j):\n\n if cell[0] == -1:\n return j\n else:\n for k in range(j + 1, sample_line.__len__()):\n if cell[3] < sample_line[k][1] + 5:\n return j\n else:\n j += 1\n return j",
"def _find_next_position_right(board, row, col):\n moving = True\n if board[row][col] == 0:\n return col\n if col == 3: # rightmost column\n return col\n\n else:\n while moving:\n if board[row][col + 1] == 0: # If the position to the right is empty\n col += 1\n if col == 3: # Can not move right anymore\n return col\n else: # col + 1 would hit an occupied tile so return row\n moving = False\n return col",
"def find_grid_col(long):\n for i in range(10):\n if long >= longs[i] and long < longs[i+1]:\n return i",
"def retrieve_next_empty_row_index(self, col_index):\n for pair in reversed(list(enumerate(self.board_rows_of_columns))):\n print('pair', pair)\n row_index = pair[0]\n row = pair[1]\n if row[col_index] == '':\n print('row index', row_index)\n return row_index",
"def _find_next_position_down(board, row, col):\n moving = True\n if board[row][col] == 0:\n return row\n if row == 3: # bottom row\n return row\n\n else:\n while moving:\n if board[row + 1][col] == 0: # If the position below is empty\n row += 1\n if row == 3: # Can not move down anymore\n return row\n else: # row + 1 would hit an occupied tile so return row\n moving = False\n return row",
"def _find_next_position_left(board, row, col):\n moving = True\n if board[row][col] == 0:\n return col\n if col == 0: # leftmost column\n return col\n\n else:\n while moving:\n if board[row][col-1] == 0: # If the position to the left is empty\n col -= 1\n if col == 0: # Can not move left anymore\n return col\n else: # col - 1 would hit an occupied tile so return row\n moving = False\n return col",
"def nextCell(r, c):\n\n if r == 8 and c == 8: # at SE corner -- all done\n return 9, 9\n\n if r <= 8 and c < 8: # move one cell to the right\n c = c + 1\n return r, c\n\n if r < 8 and c == 8: # move to the left-most cell of the next row\n c = 0\n r = r + 1\n return r, c\n\n return r, c # this should never happen",
"def find_nearest_column(self, w, k):\n while (w, k) not in self.columns.keys() and w != 0:\n w -= 1\n if w == 0 and (w, k) not in self.columns.keys():\n return False\n return w, k",
"def find_next_tile(board: List[List[int]]) -> tuple[int, int]:\n for i in range(N):\n for j in range(N):\n if board[i][j] == 0:\n return (i, j)\n \n return None",
"def find_next_empty(board):\n for y in range(board_len):\n for x in range(board_len):\n if not board[y][x]:\n return (y, x)\n\n return None",
"def isLegal(prevCol, currentCol,index,g):\r\n height = len(prevCol) # n+1\r\n legalColumns = True\r\n for h in range(1,height):\r\n if g[h-1][index] == 1:\r\n if prevCol[h-1] + currentCol[h-1] + prevCol[h] + currentCol[h] != 1:\r\n legalColumns=False\r\n break\r\n else:\r\n if prevCol[h - 1] + currentCol[h - 1] + prevCol[h] + currentCol[h] == 1:\r\n legalColumns=False\r\n break\r\n\r\n return legalColumns"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get interest by id
|
def get_by_id(interest_id: int):
interest = Interest.query.get(interest_id)
if interest is None:
raise NotFound(f"Interest id {interest_id} not found")
return interest
|
[
"def get_by_name(name: str):\n interest = Interest.query.filter(Interest.name == name).first()\n if interest is None:\n raise NotFound(f\"Interest name {name} not found\")\n\n return interest",
"def retrieve(self, id):\n _, _, invoice = self.http_client.get(\"/invoices/{id}\".format(id=id))\n return invoice",
"def get_example(self, id):\n return self.examples.get(id, None)",
"def get(self, id): \n student = get(id)\n return student",
"def get(self, id):\n logged_user = h.default_user() \n incident = model.get_specific_incident(id)\n if incident[7] == logged_user:\n if incident:\n incidents = h.incident_serializer(incident)\n return({\"incident\":incidents})\n return({\"message\":\"incident not found\"}) \n return({\"message\":\"Access denied\"})",
"def find_observation_by_observation_id(cls, id):\n\n result = Observations.query.filter(Observations.id == id).first()\n\n return result",
"def find_by_id(self, id):\n\n raise NotImplementedError",
"def loan_interest_payment(self, firm_id, bank_id):\n bank_instance = self.bank_list[bank_id - 1]\n interest_payment = bank_instance.get_interest_for_loans(firm_id)\n self.payment_firm_to_bank(firm_id, bank_id, interest_payment)",
"async def get_incomes(income_id: Optional[str] = None):\n if income_id:\n query = {\"_id\": AppObjectId.validate(income_id)}\n income = await CRUDManager.find_one(collection=COLLECTION,\n model=Income,\n query=query)\n return income_schema(income)\n incomes = await CRUDManager.find_all(collection=COLLECTION, model=Income)\n return incomes_schema(incomes)",
"def get_institution(self, id):\n return(self.get(self.webapi + \"/browse/institutions/{:d}\".format(id)))",
"async def create_interest(\n self,\n user_id: str,\n id: Optional[str] = None,\n allowed_audiences: Optional[Union[str, \"models.MicrosoftGraphAllowedAudiences\"]] = None,\n created_date_time: Optional[datetime.datetime] = None,\n inference: Optional[\"models.MicrosoftGraphInferenceData\"] = None,\n last_modified_date_time: Optional[datetime.datetime] = None,\n source: Optional[\"models.MicrosoftGraphPersonDataSources\"] = None,\n application: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n device: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n user: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n microsoft_graph_identity_application: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n microsoft_graph_identity_device: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n microsoft_graph_identity_user: Optional[\"models.MicrosoftGraphIdentity\"] = None,\n categories: Optional[List[str]] = None,\n collaboration_tags: Optional[List[str]] = None,\n description: Optional[str] = None,\n display_name: Optional[str] = None,\n web_url: Optional[str] = None,\n **kwargs\n ) -> \"models.MicrosoftGraphPersonInterest\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPersonInterest\"]\n error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}))\n\n _body = models.MicrosoftGraphPersonInterest(id=id, allowed_audiences=allowed_audiences, created_date_time=created_date_time, inference=inference, last_modified_date_time=last_modified_date_time, source=source, application_last_modified_by_application=application, device_last_modified_by_device=device, user_last_modified_by_user=user, application_created_by_application=microsoft_graph_identity_application, device_created_by_device=microsoft_graph_identity_device, user_created_by_user=microsoft_graph_identity_user, categories=categories, collaboration_tags=collaboration_tags, description=description, display_name=display_name, web_url=web_url)\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_interest.metadata['url'] # type: ignore\n path_format_arguments = {\n 'user-id': self._serialize.url(\"user_id\", user_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n header_parameters['Accept'] = 'application/json'\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(_body, 'MicrosoftGraphPersonInterest')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPersonInterest', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def fetch( self, obj, id ):\n\t\treturn obj.ById( id )",
"def economists_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=ECONOMIST_TYPE_URI,\n rdf_type_name=ECONOMIST_TYPE_NAME, \n kls=Economist)",
"def get_by_id(self, id):\n # type: (int) -> BoundIso\n response = self._client.request(url=\"/isos/{iso_id}\".format(iso_id=id), method=\"GET\")\n return BoundIso(self, response['iso'])",
"def read(self, id):",
"def get_invitation(self, id):\n response = requests.get(self.invitations_url, params = {'id': id}, headers = self.headers)\n response = self.__handle_response(response)\n i = response.json()['invitations'][0]\n return Invitation.from_json(i)",
"def interestOnInterest(self):\n\t\tself.int_on_int = ((self.coupon)*((((1+self.results['ytm'])**(self.mat)-1))/(self.results['ytm'])))-(self.tot_coup_paym)\n\t\treturn round(self.int_on_int, 2)",
"def computeInterest(self):\n interest=self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"def by_id(cls, like_id):\n return cls.get_by_id(like_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get interest by name
|
def get_by_name(name: str):
interest = Interest.query.filter(Interest.name == name).first()
if interest is None:
raise NotFound(f"Interest name {name} not found")
return interest
|
[
"def get_by_id(interest_id: int):\n interest = Interest.query.get(interest_id)\n if interest is None:\n raise NotFound(f\"Interest id {interest_id} not found\")\n\n return interest",
"def investigation_by_name(self, name):\n endpoint = \"investigationsearch/\"\n r = self._make_post(endpoint)\n if r:\n res = list(filter(lambda x: x[\"name\"] == name, r))\n if res:\n return res[0]\n else:\n logging.error(\"Error to list investigation %s\" % self.yeti_url + endpoint)",
"def computeInterest(self):\n interest=self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"async def name_to_inchi(self, name):\n args = f'name/{name}/JSON'\n response = await self.query_the_service('PubChem', args)\n if response:\n response_json = json.loads(response)\n for prop in response_json['PC_Compounds'][0]['props']:\n if prop['urn']['label'] == 'InChI':\n return prop['value']['sval']",
"def calc_interest(self): # METHOD\n # TODO: If no 0 balance in 30-day period, add interest; else, no interest\n interest = self._balance * self._interest_rate\n return interest",
"def get_recipe_by_name(self, name):\n for _, recipe in self.recipe_list.items():\n if recipe.name == name:\n print(recipe)\n return recipe",
"def find_rate_by_name(cls, rate_name):\n for rate_ex in cls.instances:\n if rate_ex.rate_name == rate_name:\n return rate_ex\n raise KeyError('No rate named \"{}\" defined'.format(rate_name))",
"def get_person_by_name(self, name):\n print(name)\n urlpath = '/people?where={{\"name\":\"{}\"}}'.format(name)\n resd = self.getdict(urlpath)\n res = resd['_items']\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n print('Not Found')\n return None\n else:\n print('Found multiple', len(res))\n return res",
"def determine_interest(outstanding_balance: float, interest_rate:\r\n float) -> float:\r\n return outstanding_balance * interest_rate / 12",
"def get_interest_rate():\n try:\n if conf.exchange == 'bitmex':\n today = datetime.date.today().isoformat()\n result = exchange.public_get_funding({'symbol': conf.symbol, 'startTime': today, 'count': 1})\n if result is not None:\n return result[0]['fundingRateDaily'] * -100\n return None\n log.error(\"get_interest_rate() not yet implemented for %s\", conf.exchange)\n\n except (ccxt.ExchangeError, ccxt.AuthenticationError, ccxt.ExchangeNotAvailable, ccxt.RequestTimeout) as error:\n log.error('Got an error %s %s, retrying in about 5 seconds...', type(error).__name__, str(error.args))\n sleep_for(4, 6)\n return get_interest_rate()",
"def get_recipe_by_name(self, name):\n pass",
"def computeInterest(self):\n total=0.0\n for account in self._account.values():\n total+= account.computeInterest()\n return total",
"def interestOnInterest(self):\n\t\tself.int_on_int = ((self.coupon)*((((1+self.results['ytm'])**(self.mat)-1))/(self.results['ytm'])))-(self.tot_coup_paym)\n\t\treturn round(self.int_on_int, 2)",
"def load_interest_data():\n return load(\"user_interests.csv\")",
"def get_account_by_name(self, name: str): \r\n return self.accounts[name] if name in self.accounts else None",
"def get_by_name(self, name):\n # type: (str) -> BoundIso\n return super(IsosClient, self).get_by_name(name)",
"def calculate_yearly_interest_rate(self, year) -> dict:\n return self.interest_rates.get(\n year, self.interest_rates.get(\n list(self.interest_rates.keys())[-1] # take last year's interest rate\n )\n )",
"def test_interests():\r\n\r\n interests = s.get_interests(image)\r\n\r\n correct_interests = ['dig']\r\n\r\n assert interests == correct_interests",
"def futures_loan_interest_history(self, **kwargs):\n\n return self.sign_request(\"GET\", \"/sapi/v1/futures/loan/interestHistory\", kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save changes to db
|
def save(self):
db.session.commit()
|
[
"def save_and_flush(self):\n self.save()\n self.flush()",
"def save(self):\n self.session.add(self)\n self.commit_session()",
"def commit(self):\n\t\tself.dbConnection.commit()",
"def save(self):\n\n # Iterate over each new attribute (check if changed) and validate using Field class. Then save to DB\n self.__new_values = {}\n\n for key in self.__fields_keys:\n if self.__dict__[key] is not None:\n self.__new_values[key] = self.__dict__[key]\n\n self._database_values = {k: v for k, v in self._database_values.items() if v is not None}\n\n self.__db.multiple_where(self._database_values).update(self.__new_values)",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_webservice(self.uuid, updates)\n self.obj_reset_changes()",
"def save(self):\n if self.sw_update_obj is not None:\n self.sw_update_obj.save()",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_bay(self.uuid, updates)\n\n self.obj_reset_changes()",
"def dbSave(self, env):\n\t\traise NotImplementedError, 'Flat File Saving Not Implemented'",
"def commit(self, session):\n session.commit()",
"def savedb(self, dbcurs):\n donesave = 0\n for oe in self.editlist:\n oe.savedb(dbcurs)\n donesave += 1\n if donesave != 0:\n dbcurs.connection.commit()",
"def save(self):\n for workunit in self.workunits.values():\n workunit.save()",
"def save_changes(_id, data):\n\n query, values = Vote.add_vote(data, user=_id)\n db().commit_changes(query, values)",
"def _do_commit(self):\n self.backend.commit()",
"def commit_db(self):\n\t\tself.conn.commit()\n\t\tself.conn.close()",
"def save(self):\r\n\t\tfilename = self.generate_filename()\r\n\t\twrite_to_file(self.table, filename)",
"def save_model(self, request, obj, form, change):\n obj.save(request=request)",
"def save(self):\r\n values = [str(prop)+\" = \"+str(sqlRender(getattr(self,prop))) for prop in self._column_list]\r\n \r\n if getattr(self,self.pk):\r\n Base.cursor.execute(f\"UPDATE {self._table_name} SET {', '.join(values)} WHERE {self.pk}={sqlRender(self.pk)}\")\r\n Base.cursor.commit()\r\n Base.cursor.fetchall()\r\n else:\r\n list_without_pk = self._column_list.copy()\r\n list_without_pk.remove(self.pk)\r\n values = [getattr(self,item) if getattr(self,item) else \"NULL\" for item in list_without_pk]\r\n Base.cursor.execute(f\"INSERT INTO {self._table_name} ({', '.join(list_without_pk)}) VALUES ( { ', '.join([str(sqlRender(value)) for value in values])}\")\r\n Base.cursor.commit()\r\n Base.cursor.fetchall()",
"def save(self, instance):\n pass",
"def save_tag(self):\n self.save()",
"def save(self, really=True):\n if really and not self.delay_save:\n self.db.session.commit()\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Models star as an array of uniformly distributed point sources
|
def generatePoints(starR):
if starR == 0: # model as point source
return np.array([(0,0)])
n = 5 # number of points to model 1D radius of star
pairs = np.array([item for item in product(np.linspace(-starR, starR, 2*n-1), repeat=2) if hypot(item[0], item[1]) <= starR])
return pairs
|
[
"def starmodel(self,star=None,pars=None):\n\n psf = self.psf.copy()\n if pars is not None:\n psf._params = pars\n \n model = []\n if star is None:\n star = np.arange(self.nstars)\n else:\n star = [star]\n\n for i in star:\n image = self.imdata[i]\n amp = self.staramp[i]\n xcen = self.starxcen[i] \n ycen = self.starycen[i]\n bbox = self.bboxdata[i]\n model1 = psf(pars=[amp,xcen,ycen],bbox=bbox)\n model.append(model1)\n return model",
"def reg_noise(pts,num_of_points):\n x,y,z = pts.min(axis=0)\n x_max,y_max,z_max = pts.max(axis=0)\n noise = []\n for i in range(num_of_points):\n x_noise = random.uniform( x, x_max )\n y_noise = random.uniform( y, y_max )\n z_noise = random.uniform( z, z_max )\n noise+=[[x_noise,y_noise,z_noise]]\n \n return np.concatenate((pts,np.asarray(noise)))",
"def _sample_inputs(cls):\n a = - 2 + 2 * 2 * np.random.rand()\n v_steering = - 0.2 + 2 * 0.2 * np.random.rand()\n return [v_steering, a]",
"def _create_star(center_id, center_pos, nb_points):\n import numpy as np\n pos = np.zeros((2, nb_points+1))\n pos[:, 0] = center_pos\n redensify.G[center_id] = set()\n sangle = cexp.r.random()*1.57+.5\n for i in range(nb_points):\n cexp.add_signed_edge(center_id, center_id+i+1, True)\n pos[:, i+1] = pos[:, 0] + [np.cos(i*2*np.pi/nb_points+sangle),\n np.sin(i*2*np.pi/nb_points+sangle)]\n return pos",
"def compute_scattering_source(self,x) :\n\n self.scattering_src = np.zeros((4*self.param.n_mom*self.param.n_cells))\n for cell in xrange(0,int(self.param.n_cells)) :\n# Get i,j pair from a cell\n [i,j] = cell_mapping(cell,self.param.n_x)\n i_mat = self.param.mat_id[i,j]\n sca = self.param.sig_s[:,i_mat]\n# Get location in the matrix\n ii = mapping(i,j,self.param.n_x)\n# Block diagonal term\n for k in xrange(0,self.param.n_mom) :\n kk = k*4*self.param.n_cells + ii\n tmp = x[kk[0]:kk[3]+1]\n dot_product = np.dot(self.fe.mass_matrix,tmp)\n pos = 0\n for i_kk in xrange(int(kk[0]),int(kk[3]+1)) :\n self.scattering_src[i_kk] += self.scattering_src[i_kk] + sca[k]*\\\n dot_product[pos]\n pos += 1",
"def draw(self, star):\n # Start by getting all interpolation coefficients for all observed points\n data, weight, u, v = star.data.getDataVector(include_zero_weight=True)\n # Subtract star.fit.center from u, v\n u -= star.fit.center[0]\n v -= star.fit.center[1]\n\n coeffs, psfx, psfy = self.interp(u/self.du, v/self.du)\n # Turn the (psfy,psfx) coordinates into an index into 1d parameter vector.\n index1d = self._indexFromPsfxy(psfx, psfy)\n # All invalid pixel references now have negative index; record and set to zero\n nopsf = index1d < 0\n index1d = np.where(nopsf, 0, index1d)\n # And null the coefficients for such pixels\n coeffs = np.where(nopsf, 0., coeffs)\n\n pvals = self._fullPsf1d(star)[index1d]\n model = star.fit.flux * np.sum(coeffs*pvals, axis=1)\n if not star.data.values_are_sb:\n # Change data from surface brightness into flux\n model *= star.data.pixel_area\n\n return Star(star.data.setData(model,include_zero_weight=True), star.fit)",
"def flat(self):\n i=0\n self.x=4\n self.volume=self.x**(self.dim)\n while(i<self.npoints):\n j=0\n while (j<self.dim):\n self.data[i,j]=np.random.uniform(0,self.x)\n j+=1\n self.like[i]=1/self.volume\n i+=1 \n self.maindensity=self.npoints/self.volume\n print(\"main density is\",self.maindensity)",
"def generate_random_points_on_sphere():\n \n def normalize(v):\n norm = numpy.linalg.norm(v)\n if norm == 0: \n return v\n else:\n return v/norm\n \n v = numpy.zeros(3)\n for i in range(3):\n k = 15 # We use the central limit theorem for our samples\n k_sample = numpy.random.standard_normal(size=k)\n ran_coord = k_sample.sum()\n v[i] = ran_coord\n \n v = normalize(v)\n\n return v",
"def data_generator_simulation1():\n # Target : 1 nuage de point\n nt = 1000\n mu_t = np.array([50, 50])\n cov_t = np.array([[60, 40], \n [40, 60]])\n xt = ot.datasets.make_2D_samples_gauss(nt, mu_t, cov_t)\n\n # Source : 3 nuages de points\n ns1 = 700\n mu_s = np.array([25, 60])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs = ot.datasets.make_2D_samples_gauss(ns1, mu_s, cov_s)\n\n ns2 = 400\n mu_s = np.array([55, 80])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs=np.append(xs,ot.datasets.make_2D_samples_gauss(ns2, mu_s, cov_s),axis=0)\n\n\n # Compute the distribution laws associate with the clouds of dots.\n ns=ns1+ns2\n a, b = ot.unif(ns), ot.unif(nt) # uniform distribution on samples\n return (xs,a),(xt,b)",
"def produce_random_points(self):\n if self.vectorize:\n if self.dimension > 0: # the user did not specify any input points\n dimensionality = self.dimension\n sample_produced = np.random.multivariate_normal(np.zeros(dimensionality), np.eye(dimensionality))\n else:\n n_args = len(inspect.getargspec(self.init_function).args)\n test_dim_input = np.ones(n_args).reshape(n_args, 1)\n dimensionality = len(test_dim_input)\n sample_produced = np.random.multivariate_normal(np.zeros(dimensionality), np.eye(dimensionality))\n return sample_produced",
"def tracklet_smoothing_linear_regr(points_3d):\n # TODO: add depth confidence into consideration\n # confidence can be used to adjust alpha\n _, n_points = points_3d.shape\n # print(points_3d.shape)\n t = np.reshape(np.arange(n_points), (n_points, 1))\n points_3d_new = []\n for coor in points_3d:\n lr = LinearRegression().fit(t, coor)\n # print(huber.score(t, coor))\n coor_new = lr.predict(t)\n coor_final = coor_new\n points_3d_new.append(coor_final)\n\n points_3d_new = np.array(points_3d_new)\n return points_3d_new",
"def sample(self, point_lons, point_lats, order=0, method='scipy'):\n LonGrid, LatGrid = np.meshgrid(self.gridX,self.gridY)\n d,l = utils.sphere.sampleOnSphere(LonGrid.flatten(),\n LatGrid.flatten(),\n self.gridZ.flatten(),\n np.array(point_lons),\n np.array(point_lats),\n k=4)\n\n #print d,l\n # based on http://earthpy.org/interpolation_between_grids_with_ckdtree.html\n # note also that where d is zero, we get a divide by zero error - hence, these\n # values are (currently) set to one\n w = np.divide(1.,d**2, out=np.ones_like(d), where=d!=0)\n point_z = np.sum(w * self.gridZ.flatten().ravel()[l],axis=1) / np.sum(w,axis=1)\n\n return point_z",
"def create_star(**kwargs):\n if type(kwargs['x']) == int:\n x = kwargs['x']\n else:\n x_range = kwargs['x']\n x = random.randint(x_range[0], x_range[1])\n\n if type(kwargs['y']) == int:\n y = kwargs['y']\n else:\n y_range = kwargs['y']\n y = random.randint(y_range[0], y_range[1])\n if type(kwargs['s']) == int:\n s = kwargs['s']\n else:\n s = random.choice(kwargs['s'])\n\n # print(\"coord x = {}. Type: {}\".format(x, type(x)))\n # print(\"coord y = {}. Type: {}\".format(y, type(y)))\n # print(\"speed s = {}. Type: {}\".format(s, type(y)))\n\n return {'x': x, 'y': y, 's': s}",
"def point_source_foregrounds(\n nu,\n n_sources=1000,\n Smin=0.3,\n Smax=300.0,\n chromatic=False,\n return_beta=True,\n beta=None,\n seed=42,\n alpha_low=-1.5,\n alpha_high=-1.25,\n mfreq=150,\n return_sources=False,\n):\n np.random.seed(seed)\n theta = np.random.uniform(0, np.pi / 2.0, n_sources)\n phi = np.random.uniform(0, 2 * np.pi, n_sources)\n\n if chromatic:\n alpha = np.random.uniform(alpha_low, alpha_high, size=n_sources)\n beta = (nu.value / mfreq) ** alpha[:, None]\n sources = (np.random.uniform(Smin, Smax, size=n_sources)[:, None] * beta).T\n\n else:\n alpha = alpha_low\n if beta is None:\n beta = (nu.value / mfreq) ** alpha\n\n flux = np.random.uniform(Smin, Smax, size=n_sources)\n sources = flux * beta[:, None]\n\n if return_beta:\n if return_sources:\n return sources, theta, phi, beta, flux, mfreq, alpha\n\n return sources, theta, phi, beta\n\n else:\n return sources, theta, phi",
"def generate_non_linear(num_train_samples=200, num_test_samples=32 * 32, noise=False):\n # Generate the dataset\n # Initialize two 2D point sets with num_train_samples and num_test_samples resp.\n train_samples = np.random.uniform(0.0, 128.0, (num_train_samples, 2))\n num_test_samples = np.sqrt(num_test_samples)\n test_samples = list(itertools.product(np.linspace(0.5, 127.5, num_test_samples),\n np.linspace(0.5, 127.5, num_test_samples)))\n\n # compute train and test labels\n labels = [[], []]\n\n for k, samples in enumerate((train_samples, test_samples)):\n for i in range(0, len(samples)):\n sample = samples[i]\n x = np.random.poisson()\n if 16 <= sample[0] <= 112 and 16 <= sample[1] <= 112:\n if sample[0] < 40 and sample[1] < 40:\n if np.sqrt((40 - sample[0]) ** 2 + (40 - sample[1]) ** 2) <= 24:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n elif sample[0] > 88 and sample[1] < 40:\n if np.sqrt((88 - sample[0]) ** 2 + (40 - sample[1]) ** 2) <= 24:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n elif sample[0] > 88 and sample[1] > 88:\n if np.sqrt((88 - sample[0]) ** 2 + (88 - sample[1]) ** 2) <= 24:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n elif sample[0] < 40 and sample[1] > 88:\n if np.sqrt((40 - sample[0]) ** 2 + (88 - sample[1]) ** 2) <= 24:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if x > 3 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n else:\n if (sample[0] - 8 * x < 0 or 128 < sample[0] + 8 * x \\\n or sample[1] - 8 * x < 0 or 128 < sample[1] + 8 * x) \\\n and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if (32 < sample[0] + 8 * x and sample[0] - 8 * x < 96 \\\n and 32 < sample[1] + 8 * x and sample[1] - 8 * x < 96) \\\n and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n\n # Convert data type\n train_samples = np.asarray(train_samples, dtype=np.float32)\n train_labels = np.asarray(labels[0], dtype=np.float32)\n test_samples = np.asarray(test_samples, dtype=np.float32)\n test_labels = np.asarray(labels[1], dtype=np.float32)\n\n return (train_samples, train_labels), (test_samples, test_labels)",
"def generate_linear(num_train_samples=200, num_test_samples=32 * 32, noise=False):\n # Generate the dataset\n # Initialize two 2D point sets with num_train_samples and num_test_samples resp.\n train_samples = np.random.uniform(0.0, 128.0, (num_train_samples, 2))\n num_test_samples = np.sqrt(num_test_samples)\n test_samples = list(itertools.product(np.linspace(0.5, 127.5, num_test_samples),\n np.linspace(0.5, 127.5, num_test_samples)))\n\n # compute train and test labels\n labels = [[], []]\n\n for k, samples in enumerate((train_samples, test_samples)):\n for i in range(0, len(samples)):\n sample = samples[i]\n x = 8 * np.random.poisson()\n if sample[0] < 64:\n if sample[0] + x > 70 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [0])\n else:\n if sample[0] - x < 58 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n else:\n labels[k] = np.append(labels[k], [1])\n\n # Convert data type\n train_samples = np.asarray(train_samples, dtype=np.float32)\n train_labels = np.asarray(labels[0], dtype=np.float32)\n test_samples = np.asarray(test_samples, dtype=np.float32)\n test_labels = np.asarray(labels[1], dtype=np.float32)\n\n return (train_samples, train_labels), (test_samples, test_labels)",
"def star_sim(catalog=None, name=None, psf=None, pixel_scale=None, pad_image=1.5, x_size=None, y_size=None,\n sky_noise=0.0, instrument_noise=0.0, photon_noise=False,\n dcr_flag=False, band_name='g', sed_list=None,\n astrometric_error=None, edge_dist=None, **kwargs):\n \"\"\"\n if psf is None:\n psf = galsim.Kolmogorov(fwhm=1)\n \"\"\"\n # I think most PSF classes have a getFWHM method. The math converts to a sigma for a gaussian.\n fwhm_to_sigma = 1.0 / (2.0 * np.sqrt(2. * np.log(2)))\n if pixel_scale is None:\n pixel_scale = psf.getFWHM() * fwhm_to_sigma\n if edge_dist is None:\n if pad_image > 1:\n edge_dist = 0\n else:\n edge_dist = 5 * psf.getFWHM() * fwhm_to_sigma / pixel_scale\n kernel_radius = np.ceil(5 * psf.getFWHM() * fwhm_to_sigma / pixel_scale)\n bright_sigma_threshold = 3.0\n bright_flux_threshold = 0.1\n # print(\"Kernel radius used: \", kernel_radius)\n if catalog is None:\n catalog = cat_sim(x_size=x_size, y_size=y_size, name=name, edge_distance=edge_dist,\n pixel_scale=pixel_scale, **kwargs)\n schema = catalog.getSchema()\n n_star = len(catalog)\n bandpass = load_bandpass(band_name=band_name, **kwargs)\n if name is None:\n # If no name is supplied, find the first entry in the schema in the format *_flux\n schema_entry = schema.extract(\"*_flux\", ordered='true')\n fluxName = schema_entry.iterkeys().next()\n else:\n fluxName = name + '_flux'\n\n if sed_list is None:\n # Load in model SEDs\n matchStarObj = matchStar()\n sed_list = matchStarObj.loadKuruczSEDs()\n\n fluxKey = schema.find(fluxName).key\n temperatureKey = schema.find(\"temperature\").key\n metalKey = schema.find(\"metallicity\").key\n gravityKey = schema.find(\"gravity\").key\n # if catalog.isContiguous()\n flux = catalog[fluxKey] / psf.getFlux()\n temperatures = catalog[temperatureKey]\n metallicities = catalog[metalKey]\n gravities = catalog[gravityKey]\n flux_arr = np.zeros((n_star, bandpass_nstep(bandpass)))\n\n for _i in range(n_star):\n f_star = flux[_i]\n t_star = temperatures[_i]\n z_star = metallicities[_i]\n g_star = gravities[_i]\n star_spectrum = star_gen(sed_list=sed_list, temperature=t_star, flux=f_star, bandpass=bandpass,\n metallicity=z_star, surface_gravity=g_star)\n flux_arr[_i, :] = np.array([flux_val for flux_val in star_spectrum])\n flux_tot = np.sum(flux_arr, axis=1)\n if n_star > 3:\n cat_sigma = np.std(flux_tot[flux_tot - np.median(flux_tot)\n < bright_sigma_threshold * np.std(flux_tot)])\n i_bright = (np.where(flux_tot - np.median(flux_tot) > bright_sigma_threshold * cat_sigma))[0]\n if len(i_bright) > 0:\n flux_faint = np.sum(flux_arr) - np.sum(flux_tot[i_bright])\n i_bright = [i_b for i_b in i_bright if flux_tot[i_b] > bright_flux_threshold * flux_faint]\n n_bright = len(i_bright)\n i_faint = [_i for _i in range(n_star) if _i not in i_bright]\n n_faint = len(i_faint)\n else:\n i_bright = np.arange(n_star)\n i_faint = np.arange(0)\n n_bright = n_star\n n_faint = 0\n if not dcr_flag:\n flux_arr = flux_tot\n flux_bright = flux_arr[i_bright]\n flux_arr = flux_arr[i_faint]\n else:\n flux_bright = flux_arr[i_bright, :]\n flux_arr = flux_arr[i_faint, :]\n\n xv = catalog.getX()\n yv = catalog.getY()\n\n return_image = np.zeros((y_size, x_size))\n if dcr_flag:\n if n_faint > 0:\n return_image += convolve_dcr_image(flux_arr, xv[i_faint], yv[i_faint],\n bandpass=bandpass, x_size=x_size, y_size=y_size,\n kernel_radius=kernel_radius,\n psf=psf, pad_image=pad_image, pixel_scale=pixel_scale,\n photon_noise=photon_noise, sky_noise=sky_noise, **kwargs)\n if n_bright > 0:\n return_image += convolve_dcr_image(flux_bright, xv[i_bright], yv[i_bright],\n bandpass=bandpass, x_size=x_size, y_size=y_size,\n kernel_radius=x_size, oversample_image=2.0,\n psf=psf, pad_image=pad_image, pixel_scale=pixel_scale,\n photon_noise=photon_noise, sky_noise=0.0, **kwargs)\n\n else:\n if n_faint > 0:\n return_image += convolve_image(flux_arr, xv[i_faint], yv[i_faint],\n x_size=x_size, y_size=y_size, kernel_radius=kernel_radius,\n psf=psf, pad_image=pad_image, pixel_scale=pixel_scale,\n photon_noise=photon_noise, sky_noise=sky_noise, **kwargs)\n if n_bright > 0:\n return_image += convolve_image(flux_bright, xv[i_bright], yv[i_bright],\n x_size=x_size, y_size=y_size,\n kernel_radius=x_size, oversample_image=2.0,\n psf=psf, pad_image=pad_image, pixel_scale=pixel_scale,\n photon_noise=photon_noise, sky_noise=0.0, **kwargs)\n if instrument_noise > 0:\n return_image += np.random.normal(scale=instrument_noise, size=(y_size, x_size))\n return(return_image)",
"def _local_sources(self, src_sel):\n ps = self.skymodel.get_point_sources(src_sel)\n return np.asarray(ps)",
"def _randomSamples(self, n):\n # we want to return points in unit sphere, could do using spherical coords\n # but rejection method is easier and arguably faster :)\n points = np.array([])\n while points.shape[0] < n:\n remainingPoints = n - points.shape[0]\n p = (np.random.rand(remainingPoints,3) - 0.5)*2\n #p = p[np.linalg.norm(p, axis=1) <= SAMPLE_SPHERE_RADIUS]\n\n if points.size == 0:\n points = p \n else:\n points = np.concatenate((points, p))\n return points",
"def sgd(self):\n import math\n for i, j, r in self.samples:\n # Computer prediction and error\n prediction = self.get_rating(i, j)\n e = (r - prediction)\n #print(e)\n # Update biases\n self.b_u[i] += self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] += self.alpha * (e - self.beta * self.b_i[j])\n\n # Update user and item latent feature matrices\n #self.P[i, :] += self.alpha * (e * self.Q[j, :] - self.beta * self.P[i,:])\n #self.Q[j, :] += self.alpha * (e * self.P[i, :] - self.beta * self.Q[j,:])\n\n P_i = np.copy(self.P[i,:])\n for k in range(self.K):\n self.P[i,k] += self.alpha * (e * self.Q[j,k] - self.beta * self.P[i,k])\n self.Q[j,k] += self.alpha * (e * P_i[k] - self.beta * self.Q[j,k])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates transverse velocity of KBO
|
def vT(a, vE):
# a is distance to KBO, in AU
# vE is Earth's orbital speed, in m/s
# returns vT, transverse KBO velocity, in m/s
return vE * ( 1 - (1./a)**(1/2.))
|
[
"def velocity(slowness):\n return 0.3048 / ((slowness * (10**(-6))))",
"def v(self):\n return self.velocity + self.dv()",
"def velocity(self, t):\n pass",
"def velocity(obs0, obs1, r0, r1):\n\tsigma = G/(np.linalg.norm(r0)**3)\n\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, 0)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\tfor _ in range(4): # Iterate to get tau\n\t\ttau = r0.dot(v0)/r0.dot(r0)\n\t\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, tau)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\treturn v0",
"def update_velocity(self, msg):\n\t\tself.ekf.vel = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))",
"def orbital_velocity(height): #in meters\n #height *= m\n v = (G*mars.mass/height)**(1/2)\n return v",
"def E2V(E):\r\n# for energy in mev returns velocity in m/s\r\n return sqrt(E/5.227e-6)",
"def get_velocity( b ):\n v = []\n for i in range(1,len(b)-1):\n D2 = b[i+1] - 2.0*b[i] + b[i-1]\n D1 = (b[i+1] - b[i-1])/2.0\n D1norm2 = D1[0]**2.0 + D1[1]**2.0\n v.append( D2/D1norm2 )\n return np.array(v)",
"def velocity(self):\r\n if self.sprint:\r\n return self._absDirection * self.sprintSpeed\r\n else:\r\n return self._absDirection * self.baseSpeed",
"def climb_velocity(T,sigma,omega,Q_cl,b):\n v_climb = 2.0*omega*np.exp(-Q_cl/(constants.k*T))*(np.exp(sigma*b**3/(constants.k*T)) - 1.0)\n \n return v_climb",
"def ConvectiveVelocity(self):\n\n T = self.T[:,:,75]; P = self.P[:,:,75]; rho = self.rho[:,:,75]\n uy = self.vy[:,:,75]; ux = self.vx[:,:,75]\n d = 1.0 # delta\n self.vel_conv = np.nan_to_num(np.sqrt(self.F*d*self.dTdy(P,T,rho)/T)*self.dx)\n\n xx,yy = np.meshgrid(self.y, self.x)\n fig = plt.figure('conv vel')\n ax = fig.gca(projection='3d')\n ax.plot_surface(xx, yy, rho, cmap=cm.plasma)\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m/s]')\n plt.tight_layout()\n plt.savefig('Density.png')\n\n print '------'\n print 'The convective velocity is,'\n print self.vel_conv\n print '------'\n print 'Difference between convective velocity and vertical velocity'\n print self.vel_conv - uy\n\n \"\"\"\n Mass fraction moving with convective velocity +/- 10%. For each cell, the\n mass moving up with the given velocity range needs to be summed up. This\n gives the mass fraction moving with the given velocity range.\n \"\"\"\n\n mass_y = []; mass_x = []\n for i in range(self.nx):\n for j in range(self.ny):\n if uy[i,j] >= self.vel_conv[i,j]*0.9 and uy[i,j] <= self.vel_conv[i,j]*1.1:\n mass_y.append(rho[i,j])\n if ux[i,j] >= self.vel_conv[i,j]*0.9 and ux[i,j] <= self.vel_conv[i,j]*1.1:\n mass_x.append(rho[i,j])\n\n MassFraction_y = np.sum(mass_y)/np.sum(rho)\n MassFraction_x = np.sum(mass_x)/np.sum(rho)\n print 'Fraction of mass moving with velocities v_conv +/- 10% in x direction:',MassFraction_x\n print 'Fraction of mass moving with velocities v_conv +/- 10% in y direction:',MassFraction_y\n\n print '-----------'\n return self.vel_conv",
"def velocity_damping(self, kpar):\n return (1.0 + (kpar * self.sigma_v(self.ps_redshift))**2.)**-1.",
"def update_velocity_body(self, msg):\n\t\tself.ekf.vel_body = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))",
"def veq(self):\n return self._veq / self._velocity_factor",
"def _updateVelocity(self):\n\t\t# Find difference between two vectors\n\t\tdifferenceVector = [0, 0]\n\t\tdifferenceVector[0] = self.targetVelocity[0] - self.currentVelocity[0]\n\t\tdifferenceVector[1] = self.targetVelocity[1] - self.currentVelocity[1]\n\n\t\t# Exit if there's nothing to update to avoid extra calculations\n\t\tif(differenceVector[0] == 0 and differenceVector[1] == 0):\n\t\t\treturn\n\n\t\t# Find the hypotenuse of the difference vector\n\t\tdifferenceMagnitude = math.sqrt((differenceVector[0] ** 2) + (differenceVector[1] ** 2))\n\n\t\t# If hypotenuse <= maxAcceleration, set currentVelocity = targetVelocity\n\t\tif(differenceMagnitude <= self.maxAcceleration):\n\t\t\tself.currentVelocity[0] = self.targetVelocity[0]\n\t\t\tself.currentVelocity[1] = self.targetVelocity[1]\n\t\t\treturn\n\n\t\t# Else, divide the distance vector by the hypotenuse (to make unit vector), multiply by maxAcceleration, and add to currentVelocity\n\t\tdifferenceVector[0] = self.maxAcceleration * (differenceVector[0] / differenceMagnitude)\n\t\tdifferenceVector[1] = self.maxAcceleration * (differenceVector[1] / differenceMagnitude)\n\n\t\tself.currentVelocity[0] += differenceVector[0]\n\t\tself.currentVelocity[1] += differenceVector[1]\n\n\t\treturn",
"def circular_velocity(k, a):\n return np.sqrt(k / a)",
"def velocity(estimate, actual, times=60):\n return (estimate*times)/(actual*1.)",
"def _update_intermediate_vel_bc_(self, u, w, mask, time, _bc):\n\n # Interior boundaries\n # Apply no-slip boundary conditions to obstacles.\n # Setup masks that are 0 where velocities need to be updated,\n # and 1 where they stay unmodified.\n # Note that (mask & 1) has 1 in the ghost cells.\n u_mask = ( mask[:-1,:] | mask[1:,:] ) & 1\n w_mask = ( mask[:,:-1] | mask[:,1:] ) & 1\n\n # zero velocity inside and on the boundary of obstacles\n u[:,:] *= ( mask[:-1,:] & mask[1:,:] & 1 )\n # negate velocities inside obstacles\n u[:,1:-2] -= ( 1 - u_mask[:,1:-2] ) * u[:,2:-1]\n u[:,2:-1] -= ( 1 - u_mask[:,2:-1] ) * u[:,1:-2]\n\n # zero velocity inside and on the boundary of obstacles\n w[:,:] *= ( mask[:,:-1] & mask[:,1:] & 1 )\n # nullify velocities inside obstacles\n w[1:-2,:] -= ( 1 - w_mask[1:-2,:] ) * w[2:-1,:]\n w[2:-1,:] -= ( 1 - w_mask[2:-1,:] ) * w[1:-2,:] \n\n # top boundary\n _bc_ = _bc[self.UP]\n if 'w' in _bc_:\n fun_ = _bc_['w']\n if callable(fun_):\n for i in range(w.shape[0]):\n node = self.grid[i-0.5, w.shape[1]-1]\n w[i,-1] = fun_(node[0], node[1], time) * (mask[i,-2] & 1)\n else:\n w[:,-1] = fun_ \n\n # bottom boundary\n _bc_ = _bc[self.DOWN]\n if 'w' in _bc_:\n fun_ = _bc_['w']\n if callable(fun_):\n for i in range(w.shape[0]):\n node = self.grid[i-0.5, 0]\n w[i,0] = fun_(node[0], node[1], time) * (mask[i,1] & 1)\n else:\n w[:,0] = fun_ \n\n # left boundary\n _bc_ = _bc[self.LEFT]\n if 'u' in _bc_:\n fun_ = _bc_['u']\n if callable(fun_):\n for i in range(u.shape[1]):\n node = self.grid[u.shape[0]-1, i-0.5]\n u[-1,i] = fun_(node[0], node[1], time) * (mask[-2,i] & 1)\n else:\n u[-1,:] = fun_\n\n # west boundary\n _bc_ = _bc[self.RIGHT]\n if 'u' in _bc_:\n fun_ = _bc_['u']\n if callable(fun_):\n for i in range(u.shape[1]):\n node = self.grid[0, i-0.5]\n u[0,i] = fun_(node[0], node[1], time) * (mask[1,i] & 1)\n else:\n u[0,:] = fun_",
"def rotationalVelocity(self, t):\n pass",
"def angular_velocity(self):\n return 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Rounds x to the nearest odd integer
|
def roundOdd(x):
x = ceil(x)
if x % 2 == 0:
return int(x-1)
return int(x)
|
[
"def iround(x):\n return int(round(x))",
"def round_even(number):\n rounded = int(round(number, 0))\n return rounded+1 if number % .5 == 0 and rounded % 2 != 0 else rounded",
"def round_to_half(num):\n return round(num * 2) / 2.0",
"def intround(n):\r\n return int(round(n))",
"def succ_of_even_odd(x, n):\r\n if x%2 == 0:\r\n return x/2\r\n return n/2 + x/2",
"def floor(x) -> int:\n pass",
"def _round(self, number):\n \n sign = 1 if number >= 0 else -1\n \n rounded = int(round(number))\n nextRounded = int(round(number + 1 * sign))\n \n if nextRounded == rounded:\n # We rounded X.5 to even, and it was also away from 0.\n return rounded\n elif nextRounded == rounded + 1 * sign:\n # We rounded normally (we are in Python 2)\n return rounded\n elif nextRounded == rounded + 2 * sign:\n # We rounded X.5 to even, but it was towards 0.\n # Go away from 0 instead.\n return rounded + 1 * sign\n else:\n # If we get here, something has gone wrong.\n raise RuntimeError(\"Could not round {}\".format(number))",
"def round_base(x, base=8):\n return int(base * round(float(x)/base))",
"def mod2pi(x):\n\n (f, i) = math.modf(x / (2.*math.pi))\n if f < 0.:\n f += 1.\n return f * 2. * math.pi",
"def wrap(x):\n if not 0 <= x <= 9:\n x %= 9\n if x == 0:\n x = 9\n return x",
"def round_to(x, d):\n return round(x, d - int(floor(log10(abs(x)))))",
"def mod_switch(x, q, rq): \n return int(round(1.* rq * x / q) % rq)",
"def roundrnd(x: float) -> float:\n return int(x) + int(_random.random() > (1 - (x % 1)))",
"def test_round(self):\n self.assertEqual(m.round_to_even(0), 0)\n self.assertEqual(m.round_to_even(0.9), 0)\n self.assertEqual(m.round_to_even(1), 2)\n self.assertEqual(m.round_to_even(1.1), 2)\n \n self.assertEqual(m.round_to_odd(0), 1)\n self.assertEqual(m.round_to_odd(0.9), 1)\n self.assertEqual(m.round_to_odd(1), 1)\n self.assertEqual(m.round_to_odd(1.9), 1)\n self.assertEqual(m.round_to_odd(2), 3)",
"def RoundSF(num, sigfigs):\n\n if num == 0:\n return (0)\n\n rc = round(num, -int(math.floor(math.log(abs(num), 10)) - (sigfigs - 1)))\n\n return (rc)",
"def nextRoundNumber(x):\n\n #guess to nearest order of magnitude\n if x in (0, 1):\n return x\n\n if x < 0:\n return -1.0 * nextRoundNumber(-x)\n else:\n lg = int(log10(x))\n\n if lg == 0:\n if x < 1:\n base = 0.1\n else:\n base = 1.0\n elif lg < 0:\n base = 10.0 ** (lg - 1)\n else:\n base = 10.0 ** lg # e.g. base(153) = 100\n # base will always be lower than x\n\n if base >= x:\n return base * 1.0\n elif (base * 2) >= x:\n return base * 2.0\n elif (base * 5) >= x:\n return base * 5.0\n else:\n return base * 10.0",
"def iround(x):\n\n return np.round(x).astype(int)",
"def pred_of_even_odd(x, n):\r\n if x < n/2:\r\n return 2 * x\r\n return 2 * (x - n/2) + 1",
"def hundreds(self, v):\n return int(v / 100) % 10",
"def roundup(x, base=4):\n return base * math.ceil(x/base)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the chromosome where the given path lies.
|
def get_path_chromosome(pathid, coord_dir='tileid_hg19_split_by_path/'):
with open(coord_dir + pathid + '.csv') as f:
first_line = f.readline()
# Example line:
# 000.00.000.000,hg19 chr1 0-24 10534
# Entry 1 is chromosome.
chromosome = first_line.split(' ')[1]
return chromosome
|
[
"def getChromosomePath(individual):\n path = [start_cell]\n for Move in [MOVES[gene] for gene in individual]:\n path.append(Move.apply(path[-1])) #append each move to the current Position\n if path[-1] == end_cell : return path #current Position = end cell\n return path",
"def get_player_pos(player):\n for v, p in path[::-1]:\n if p == player:\n return v",
"def chromosome(self):\n return self.attrs[CHROM].lower()",
"def get_path(self, path):\n return path[len(self.base)+2:]",
"def find_chromosome(self, chr_name):\n for chromosome in self.chromosomes:\n if chromosome.name == chr_name:\n return chromosome\n return None",
"def find_path(grid):\n n = len(grid)\n m = len(grid[0])\n\n def helper(row,col,path):\n if row == n:\n return path\n for i in range(col-1,col+2):\n if 0 <= i < m and grid[row][i]:\n result = helper(row+1,i,path + [(row,i)])\n if result is not None:\n return result\n return None\n\n for c in range(0,m):\n if grid[0][c]:\n result = helper(1,c,[(0,c)])\n if result is not None:\n return result\n return None",
"def findPath(self, path: 'SoPath') -> \"int\":\n return _coin.SoPathList_findPath(self, path)",
"def getCell(self, steps):\r\n\t#print \"passedpath = \", self.passedpath, \"length of path = \", len(self.path)\r\n\tif (self.passedpath + steps) < len(self.path) and (self.passedpath + steps) >= 0:\r\n\t return self.path[self.passedpath + steps]",
"def exonCoordinateToChromosome(self, p):\n if p is None: return None\n if p < 0:\n return None\n if p >= sum([(e.stop - e.start) for e in self.exons]):\n return None\n assert(len(self.exons))\n c = 0 # cumulative position through exon space\n if not self.chromosomeInterval.strand:\n p = sum([(e.stop - e.start) for e in self.exons]) - 1 - p\n e_start = self.exons[0].start\n for e in self.exons:\n if p < c + e.stop - e.start:\n # the position is within this exon\n return p - c + e.start\n else:\n # sorry mario, your position is in another exon\n c += e.stop - e.start\n assert(False) # we should never get here",
"def path_coordinates(self, typ, branch_or_cusp):\n idx = self._path_idx(typ, branch_or_cusp)\n return self._path_coordinates[idx]",
"def get_chrom_from_filename(fn):\n\n match = '(?:chr|sites\\.)([0-9XYMT]+)'\n\n try:\n chrom = re.search(match, fn).group(1)\n except AttributeError:\n print(f'Unable to interpret chromosome from file name ({fn}), '\n 'using -1 as a placeholder.')\n chrom = '-1'\n \n return(chrom)",
"def mRnaCoordinateToChromosome(self, p):\n assert(len(self.exons))\n if p is None: return None\n if p < 0: return None\n limit = sum([(e.stop - e.start) for e in self.exons])\n if p >= limit: return None\n p = self.mRnaCoordinateToExon(p)\n if p >= limit: return None\n return self.exonCoordinateToChromosome(p)",
"def get_path_ref_high(path=None):\n if path == None:\n path = my.pwd()\n\n #print \"2\",path\n ### check ob wir in ti sind\n get.from_path_string_job_is(path=path,job=\"ti\")\n\n #print \"3 \"\n ## get sc for refpath\n sc = get.from_path_string_details_supercell(path=path)\n\n\n ## get /home/glensk/v/PAW_PBE/Al/ti_divak_fcc4 oder so, der pfad in welchem\n pathout = get.get_path_job_type_cell(path=path)\n\n #print \"4\",path\n ref = my.checkdir(pathout+\"/ref_high_\"+sc,create=True)\n return ref",
"def gi_from_path(path):\n\n fname = path.split(os.sep)[-1]\n gi = fname.split('.')[0]\n return gi",
"def co_loc(sample,bedfile):\n s = bedfile[bedfile['sample']==sample]\n locs=[]\n parents = s['donor'].unique()\n for index,row in s.iterrows():\n locs.append([row['chr'],int(row['start']),int(row['end']),row['donor']])\n return locs,parents",
"def find(self, gene):\n for chromosome in self.chromosomes:\n if gene in chromosome.plus or gene in chromosome.minus:\n return chromosome\n return None",
"def find_path(self, start_vertex, end_vertex, path=[]):\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return path\n if start_vertex not in graph:\n return None\n for vertex, _ in graph[start_vertex]:\n if vertex not in path:\n extended_path = self.find_path(vertex, end_vertex, path)\n if extended_path:\n return extended_path\n return None",
"def find_path(self, start_vertex, end_vertex, path=None):\n if path == None:\n path = []\n \n graph = self.graph\n \n path = path + [start_vertex]\n \n if start_vertex == end_vertex:\n return path\n \n if start_vertex not in graph:\n return None\n \n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_path = self.find_path(vertex, \n end_vertex, \n path)\n if extended_path: \n return extended_path\n return None",
"def codonCoordinateToChromosome(self, p):\n m = self.codonCoordinateToMRna(p)\n return self.mRnaCoordinateToChromosome(m)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Also, use GPIO functions to set the row pins as outputs and the column pins as inputs.
|
def setup(self):
for pin in self.row_pins:
GPIO.setup(pin, GPIO.OUT)
for pin in self.col_pins:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
|
[
"def setup_pins():\n\n # Use Board Pin numbers\n gpio.setmode(gpio.BOARD)\n\n # All pins are pulled down as we take to GND on close.\n gpio.setup(LEFT_A, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.setup(LEFT_B, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.setup(LEFT_PUSH, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.setup(RIGHT_A, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.setup(RIGHT_B, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.setup(RIGHT_PUSH, gpio.IN, pull_up_down=gpio.PUD_UP)",
"def config(self):\n\t\t# Setup for GPIO\n\t\tgpio.setwarnings(False)\n\t\tgpio.setmode(gpio.BCM)\n\t\tself.gpioPins = [24, 23, 18]\t# [S3, S2, S1]\n\t\tfor pin in self.gpioPins:\n\t\t\tgpio.setup(pin, gpio.OUT)\n\n\t\t# Turn off ACK messages and continuous reading for all devices\n\t\tfor port in self.expanderAddr:\n\t\t\tself.ser.flush()\n\t\t\tself.send_cmd(\"*OK,0\", port)\t# Disable OK messages\n\t\t\ttime.sleep(0.01)\t\t\t\t# Wait 10 ms before next instruction\n\t\t\tself.ser.flush()\n\t\t\tself.send_cmd(\"C,0\", port)\t\t# Disable continuous reading mode\n\t\t\ttime.sleep(0.01)\t\t\t\t# Wait 10 ms before next instruction\n\n\t\t# Return to default port \"0,0,0\" (or \"P1\")\n\t\tself.select_SE_port(\"P1\")",
"def __init__(self, pin, numbering=gpio.BCM, _gpio=gpio):\n BasicLogic.BasicToggleOutput.__init__(self, pin, numbering, _gpio)",
"def main(self):\n\n GPIO.add_event_detect(self.pin_clk, GPIO.BOTH, callback=self._change_system_state, bouncetime=50)\n GPIO.add_event_detect(self.pin_button, GPIO.FALLING, callback=self._deactivate_alert, bouncetime=50)\n \n try:\n while True:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n finally:\n self._logger.info(\"Räume GPIO auf\")\n GPIO.cleanup()",
"def setup_pin(self, pin):\n # TODO add some extra checks here. Maybe verify BCM?\n GPIO.setup(pin, GPIO.OUT)",
"def __init__(self, address=0x20, busnum=Base.I2C.get_default_bus(), cols=16, lines=2):\n # Configure MCP23017 device.\n self._mcp = Base.MCP.MCP23017(address=address, busnum=busnum)\n # Make sure that LEDs are off\n self._mcp.setup(Base.LCD_PLATE_RED, Base.GPIO.OUT)\n self._mcp.setup(Base.LCD_PLATE_GREEN, Base.GPIO.OUT)\n self._mcp.setup(Base.LCD_PLATE_BLUE, Base.GPIO.OUT)\n val = Base.GPIO.HIGH\n self._mcp.output_pins({Base.LCD_PLATE_RED: val, Base.LCD_PLATE_GREEN: val, Base.LCD_PLATE_BLUE: val})\n # Set LCD R/W pin to low for writing only.\n self._mcp.setup(Base.LCD_PLATE_RW, Base.GPIO.OUT)\n self._mcp.output(Base.LCD_PLATE_RW, Base.GPIO.LOW)\n # Set buttons as inputs with pull-ups enabled.\n for button in (self.SELECT, self.RIGHT, self.DOWN, self.UP, self.LEFT):\n self._mcp.setup(button, Base.GPIO.IN)\n self._mcp.pullup(button, True)\n # Initialize LCD (with no PWM support).\n super(LCD, self).__init__(Base.LCD_PLATE_RS, Base.LCD_PLATE_EN,\n Base.LCD_PLATE_D4, Base.LCD_PLATE_D5, Base.LCD_PLATE_D6, Base.LCD_PLATE_D7, cols, lines,\n Base.LCD_PLATE_RED, Base.LCD_PLATE_GREEN, Base.LCD_PLATE_BLUE, enable_pwm=False, \n gpio=self._mcp,initial_color=colors['black'])\n self.clear()\n\n self.create_char(self.SYM_RIGHT, [0x0,0x8,0xc,0xe,0xc,0x8,0x0,0x0])\n self.create_char(self.SYM_LEFT, [0x0,0x2,0x6,0xe,0x6,0x2,0x0,0x0])\n self.create_char(self.SYM_UP, [0x0,0x0,0x4,0xe,0x1f,0x0,0x0,0x0])\n self.create_char(self.SYM_DOWN, [0x0,0x0,0x1f,0xe,0x4,0x0,0x0,0x0])\n self.create_char(self.SYM_CLOCK, [0x0,0xe,0x15,0x17,0x11,0xe,0x0,0x0])\n self.create_char(self.SYM_PLAY, [0x8,0xc,0xe,0xf,0xe,0xc,0x8,0x0])\n self.create_char(self.SYM_PAUSE, [0x1b,0x1b,0x1b,0x1b,0x1b,0x1b,0x1b,0x0])\n self.create_char(self.SYM_STOP, [0x0,0x1f,0x1f,0x1f,0x1f,0x1f,0x0,0x0])",
"def __init__(self, outA, outB=-1):\n GPIO.setmode(GPIO.BCM)\n if outA > 0:\n GPIO.setup(outA, GPIO.IN)\n if outB > 0:\n GPIO.setup(outB, GPIO.IN)\n\n self.outA = outA\n self.outB = outB",
"def led(self,pos,flash,quick):\n self.ledAllOff()\n #Get row\n aRow = pos[1]\n #Get column\n aCol = pos[0]\n #Set col to LOW\n if aCol == 'a':\n theLed = self.LED1\n if aCol == 'b':\n theLed = self.LED2\n if aCol == 'c':\n theLed = self.LED3\n if aCol == 'd':\n theLed = self.LED4\n if aCol == 'e':\n theLed = self.LED5\n if aCol == 'f':\n theLed = self.LED6\n if aCol == 'g':\n theLed = self.LED7\n if aCol == 'h':\n theLed = self.LED8\n\n #Set row to HIGH\n if aRow == '1':\n theRow = self.ROW1\n if aRow == '2':\n theRow = self.ROW2\n if aRow == '3':\n theRow = self.ROW3\n if aRow == '4':\n theRow = self.ROW4\n if aRow == '5':\n theRow = self.ROW5\n if aRow == '6':\n theRow = self.ROW6\n if aRow == '7':\n theRow = self.ROW7\n if aRow == '8':\n theRow = self.ROW8\n self.wiringpi.digitalWrite(theRow,self.HIGH)\n self.wiringpi.digitalWrite(theLed,self.LOW)\n if quick == True:\n flashTime = 0.2\n else:\n flashTime = 0.5\n if flash == True:\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.HIGH)\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.LOW)\n sleep(flashTime)\n self.wiringpi.digitalWrite(theLed,self.HIGH)",
"def add_inputs_outputs(self, port):\n self.vf.write(\" input clk{0}; // clock\\n\".format(port))\n self.vf.write(\" input csb{0}; // active low chip select\\n\".format(port))\n if port in self.readwrite_ports:\n self.vf.write(\" input web{0}; // active low write control\\n\".format(port))\n\n self.vf.write(\" input [ADDR_WIDTH-1:0] addr{0};\\n\".format(port))\n if port in self.write_ports:\n if self.write_size:\n self.vf.write(\" input [NUM_WMASKS-1:0] wmask{0}; // write mask\\n\".format(port))\n if self.num_spare_cols == 1:\n self.vf.write(\" input spare_wen{0}; // spare mask\\n\".format(port))\n elif self.num_spare_cols > 1:\n self.vf.write(\" input [{1}:0] spare_wen{0}; // spare mask\\n\".format(port, self.num_spare_cols-1))\n self.vf.write(\" input [DATA_WIDTH-1:0] din{0};\\n\".format(port))\n if port in self.read_ports:\n self.vf.write(\" output [DATA_WIDTH-1:0] dout{0};\\n\".format(port))",
"def analog_output_setup(self, conditions):",
"def init_rpi():\n import RPi.GPIO as GPIO\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(config.FAN_PIN, GPIO.OUT)\n GPIO.setup(config.HEAT_PIN, GPIO.OUT)\n GPIO.setup(config.COOL_PIN, GPIO.OUT)\n GPIO.output(config.FAN_PIN, config.RELAY_OFF)\n GPIO.output(config.HEAT_PIN, config.RELAY_OFF)\n GPIO.output(config.COOL_PIN, config.RELAY_OFF)",
"def add_layout_pins(self):\n return\n row_list = self.cell.get_all_wl_names()\n\n for row in range(1, self.row_size - 1):\n for cell_row in row_list:\n wl_pin = self.cell_inst[row, 0].get_pin(cell_row)\n self.add_layout_pin(text=cell_row + \"_{0}\".format(row),\n layer=wl_pin.layer,\n offset=wl_pin.ll().scale(0, 1),\n width=self.width,\n height=wl_pin.height())\n\n # Add vdd/gnd via stacks\n for row in range(1, self.row_size - 1):\n for col in range(self.column_size):\n inst = self.cell_inst[row, col]\n for pin_name in [\"vdd\", \"gnd\"]:\n for pin in inst.get_pins(pin_name):\n self.add_power_pin(name=pin.name,\n loc=pin.center(),\n start_layer=pin.layer)",
"def draw_grid():\n for y in range(num_rows):\n for x in range(num_cols):\n led_matrix.point(x, y, curr_gen[y][x])",
"def init_binary_pins(self, array):\n for pin in array:\n GPIO.setup(pin, GPIO.OUT)\n self.led_off(pin)",
"def __init__(self, pi,\n pin_rs=None, pin_rw=None, pin_e=None, pin_e2=None,\n pins_data=None,\n pin_backlight=None, backlight_mode='active_low',\n backlight_pwm=False, backlight_enabled=True,\n pin_contrast=None, contrast_mode='active_high',\n contrast_pwm=None, contrast=0.5,\n cols=20, rows=4, dotsize=8,\n charmap='A02',\n auto_linebreaks=True):\n\n # Save the pigpio.pi object\n self.pi = pi\n\n # Set attributes\n if pin_rs is None:\n raise ValueError('pin_rs is not defined.')\n if pin_e is None:\n raise ValueError('pin_e is not defined.')\n\n if len(pins_data) == 4: # 4 bit mode\n self.data_bus_mode = c.LCD_4BITMODE\n block1 = [None] * 4\n elif len(pins_data) == 8: # 8 bit mode\n self.data_bus_mode = c.LCD_8BITMODE\n block1 = pins_data[:4]\n else:\n raise ValueError('There should be exactly 4 or 8 data pins.')\n block2 = pins_data[-4:]\n self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e, e2=pin_e2,\n d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],\n d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],\n backlight=pin_backlight, contrast=pin_contrast)\n self.backlight_mode = backlight_mode\n self.backlight_pwm = backlight_pwm\n self.contrast_mode = contrast_mode\n self.contrast_pwm = contrast_pwm\n\n # Call superclass\n super(CharLCD, self).__init__(cols, rows, dotsize,\n charmap=charmap,\n auto_linebreaks=auto_linebreaks)\n\n # Set backlight status\n if pin_backlight is not None:\n self.backlight_enabled = backlight_enabled\n\n # Set contrast\n if pin_contrast is not None:\n self.contrast = contrast",
"def setup_lights(self):\n\t\tGPIO.setup(Light.RED, GPIO.OUT)\n\t\tGPIO.setup(Light.ORANGE, GPIO.OUT)\n\t\tGPIO.setup(Light.GREEN, GPIO.OUT)",
"def togglePin(self,pin):\n if -2 == pin:\n self.stData = 1 - self.stData\n GPIO.output(self.pinDATA, self.dataLevel[self.stData and 1 or 0])\n elif -3 == pin:\n self.stClock = 1 - self.stClock\n GPIO.output(self.pinCLOCK, self.stClock and GPIO.HIGH or GPIO.LOW)\n elif -4 == pin:\n self.stLatch = 1 - self.stLatch\n GPIO.output(self.pinLATCH, self.stLatch and GPIO.HIGH or GPIO.LOW)",
"def toggle_pins(self, pins, seq):\n # set pins, sleep for pulse time, then clear\n # we assume that setting pins takes negligible amount of time\n # fixme: use self.pins instead of arg?\n for i in zip(pins, seq):\n GPIO.output(i[0], i[1])\n #\n time.sleep(float(self.pulse_time))\n GPIO.output(pins, GPIO.LOW)",
"def HC595_input(bits):\n\tfor i in bits:\n\t\tGPIO.output(SER, i) #set state of current pin\n\t\tGPIO.output(SRCLK, GPIO.HIGH)\n\t\ttime.sleep(0.001)\n\t\tGPIO.output(SRCLK, GPIO.LOW)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
trawl through a list of claims and return a width and height of fabric big enough to fit all of them
|
def find_fabric_dimensions(claimlist):
cur_width = cur_height = 0
for claim in claimlist:
cur_width = max(cur_width, claim.x + claim.width)
cur_height = max(cur_height, claim.y + claim.height)
return cur_width, cur_height
|
[
"def count_square_claims( input_list ):\n fabric_claims = defaultdict( int )\n\n for line in input_list:\n ( _, x_coord, y_coord, width, height ) = parse_line( line )\n\n for xindex in range( x_coord, x_coord + width ):\n for yindex in range( y_coord, y_coord + height ):\n fabric_claims[(xindex,yindex)] += 1\n\n return fabric_claims",
"def get_annot_chipsizes(ibs, aid_list, ensure=True):\n cid_list = ibs.get_annot_cids(aid_list, ensure=ensure)\n chipsz_list = ibs.get_chip_sizes(cid_list)\n return chipsz_list",
"def populate_fabric_array(fabric, claimlist, overlap_char):\n overlap_count = 0\n good_claims = set()\n for claim in claimlist:\n good_claims.add(claim.id)\n\n for claim in claimlist:\n for offset_x in range(claim.width):\n for offset_y in range(claim.height):\n x = claim.x + offset_x\n y = claim.y + offset_y\n\n if fabric[x][y] is None: # free space, all cool\n fabric[x][y] = claim.id\n else: # not free!\n if fabric[x][y] in good_claims: # invalidate the claim already there\n good_claims.remove(fabric[x][y])\n if claim.id in good_claims: # invalidate this claim\n good_claims.remove(claim.id)\n if fabric[x][y] != overlap_char: # needs to be marked and counted\n fabric[x][y] = overlap_char\n overlap_count += 1\n\n return fabric, overlap_count, good_claims",
"def entSize(ent):\n box = ent['box']\n width, height = max(box[1]), min(box[1])\n return width, height",
"def testSizes(self, aspect_ratio : tuple, sizes : tuple, limit=1000) -> list:\n rx, ry = aspect_ratio\n w, h = sizes\n\n closer = [0, 0]\n for i in range(limit):\n x, y = i * rx, i * ry\n\n conditions = {\n \"width\": abs(w-x) < abs(w-closer[0]),\n \"height\": abs(h-y) < abs(h-closer[1]),\n \"ishigher\": (x > w and y > h)\n }\n if conditions[\"width\"] and conditions[\"height\"] and conditions[\"ishigher\"]:\n closer = [x, y]\n return closer",
"def get_report_height_width(self):\n max_width = 0\n max_height = 0\n for box in self.boxes:\n tmp = box.x_cm + box.width\n if tmp > max_width:\n max_width = tmp\n tmp = box.y_cm + box.height\n if tmp > max_height:\n max_height = tmp\n max_width += self.report_opts.box_shadow\n max_width += self.report_opts.littleoffset\n max_height += self.report_opts.box_shadow\n max_height += self.report_opts.littleoffset\n return (max_width, max_height)",
"def _get_sizes(self) -> int:\n pass",
"def perimRect(length,width):\n return (length+length+width+width)",
"def get_rectangle_lengths(self) -> List[Tuple[float, int]]:\n rects = []\n # Top Deck\n rects.append((float2dec(self.top_width, 2), self.top_sheets))\n # Bottom Deck\n rects.append((float2dec(self.bottom_width, 2), self.bottom_sheets))\n # Webbing\n rects.append((float2dec(self.web_height + (self.top_glue_width - 1.27) + (self.bottom_glue_width - 1.27), 2), 2))\n return rects",
"def _size(self):\n xpixels = ctypes.c_int32()\n ypixels = ctypes.c_int32()\n sdk.GetDetector(ctypes.byref(xpixels), ctypes.byref(ypixels))\n self.width = xpixels.value\n self.height = ypixels.value\n self.pixels = xpixels.value * ypixels.value\n return (xpixels.value, ypixels.value)",
"def get_ccd_widths():\n with open(BIN_PATH_ABS + '/ccd_defs.json') as jsonFile:\n return json.load(jsonFile)",
"def get_number_of_shapes(self) -> int:\n card_map = {1: Count.ONE, 2: Count.TWO, 3: Count.THREE}\n img = self.preprocess_card()\n min_area = img.size * 0.06\n max_area = img.size * 0.9\n threshold = cv2.threshold(\n img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY\n )[1]\n cnt, hier = cv2.findContours(\n threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n new_cnt: List[Any] = []\n central_coordinates: List[List[int]] = []\n for i, c in enumerate(cnt):\n if max_area > cv2.contourArea(c) > min_area:\n moments = cv2.moments(c)\n center_of_shape_x = int(moments[\"m10\"] / moments[\"m00\"])\n center_of_shape_y = int(moments[\"m01\"] / moments[\"m00\"])\n # if there are recorded found shapes\n all_far = True\n for coord in central_coordinates:\n dist = distance.euclidean(\n (coord[0], coord[1]),\n (center_of_shape_x, center_of_shape_y),\n )\n if dist < 50:\n all_far = False\n break\n if all_far:\n new_cnt.append(c)\n central_coordinates.append(\n [center_of_shape_x, center_of_shape_y]\n )\n # cv2.drawContours(threshold, new_cnt, -1, (255, 255, 255), 3)\n self.shape_contours = new_cnt\n self.card_info[\"count\"] = card_map.get(len(new_cnt))\n return len(new_cnt)",
"def compute_face_size(self, pts):\r\n mm = pts.mean(axis=0).reshape((1, -1))\r\n dis = np.sqrt(np.sum((pts - mm)**2, axis=1))\r\n return np.median(dis)",
"def enclosing_size(sizes, positions):\n rectangles = [R(*size, *pos) for size, pos in zip(sizes, positions)]\n width = max(r.width + r.x for r in rectangles)\n height = max(r.height + r.y for r in rectangles)\n return (width, height), rectangles",
"def get_bounding_box_size(images):\n\n for image in images:\n image = ndimage.median_filter(image,3)\n\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width",
"def find_base_size(self):\n\n# Find longitudinal locations of first two points\n first_UTM = self.shapes[0].points[0][0]\n second_UTM = self.shapes[1].points[0][0]\n\n# Find the difference. This difference in meters is the size of the grid\n grid_size = second_UTM - first_UTM\n\n return grid_size",
"def get_sizes(events, discrete_width, prob, pred=None, num_classes=2):",
"def GetShrunkInceptionShapes(shrink=10):\n input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],\n [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],\n [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],\n [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],\n [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],\n [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],\n [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],\n [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],\n [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],\n [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],\n [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],\n [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],\n [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],\n [4, 147, 147, 24]]\n \n filter_sizes = [[1, 1, 1248, 128], [1, 1, 384, 384], [1, 1, 384, 384],\n [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],\n [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],\n [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],\n [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],\n [3, 3, 128, 320], [1, 1, 1248, 128], [1, 1, 224, 224],\n [3, 3, 192, 256], [3, 3, 192, 256], [1, 1, 1216, 192],\n [1, 1, 1216, 96], [1, 1, 224, 224], [3, 3, 192, 224],\n [3, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],\n [3, 3, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],\n [1, 1, 1024, 128], [3, 3, 128, 192], [1, 1, 1024, 160],\n [3, 3, 128, 192], [1, 1, 1024, 256], [3, 3, 128, 128],\n [1, 1, 768, 192], [3, 3, 128, 128], [3, 3, 128, 128],\n [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],\n [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],\n [1, 1, 256, 64], [3, 3, 48, 64], [1, 1, 256, 48],\n [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],\n [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],\n [1, 1, 24, 64]]\n out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],\n [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],\n [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],\n [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],\n [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],\n [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],\n [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],\n [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],\n [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],\n [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],\n [4, 147, 147, 64]]\n strides = [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1\n ]\n\n # Shrink sizes to make the test faster\n for i in input_sizes:\n i[3] //= shrink\n for f in filter_sizes:\n f[2] //= shrink\n f[3] //= shrink\n for o in out_sizes:\n o[3] //= shrink\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, VALID, VALID, VALID\n ]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p",
"def __get_group_sizes(self, num_people):\n if num_people <= 5:\n return [num_people]\n else:\n div = 0\n while num_people%4 != 0:\n div+=1\n num_people-=3\n return [3]*div + [4]*(num_people/4)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return an empty 2d array width x height filled with blank char, with some extra padding
|
def build_empty_array(width, height, blank):
array = []
for _ in range(width):
array.append([blank] * height)
return array
|
[
"def get_empty_cell(self):\n return ' ' * self.width",
"def padded_shapes(self):\n return ([None], [None])",
"def _get_empty(self):\n empty_cells = []\n row_i = 0\n column_i = 0\n\n for row in self._grid:\n column_i = 0\n for column in row:\n if column == 0:\n empty_cells.append([row_i, column_i])\n column_i += 1\n row_i += 1\n\n return empty_cells",
"def extend_array(array):\n for row in array:\n while len(row) < 6:\n row.append('')\n while len(array) < 4:\n array.append(['', '', '', '', '', ''])\n return array",
"def get_np_filled(input_data):\n separated = []\n for line in input_data:\n separated.append(list(line))\n\n # Make the data into a uniform square with 0's as filler\n data = np.array([np.array(line) for line in separated])\n lens = np.array([len(i) for i in data])\n mask = np.arange(lens.max()) < lens[:, None]\n out = np.zeros(mask.shape, dtype=data.dtype)\n out[mask] = np.concatenate(data)\n return out",
"def empty_room(size2d):\n \n sizex, sizey = size2d\n room = []\n\n # top wall section\n room.append('w' * sizex)\n # rows with empty space in between\n room += ['w' + ' ' * (sizex - 2) + 'w' for i in range(sizey - 2)]\n # bottom wall section\n room.append('w' * sizex)\n\n return Place(room, w='wall')",
"def makeBoard():\n grid = [[0 for j in range(GRID_SIZE)] for i in range(GRID_SIZE)]\n for row in range(0, GRID_SIZE):\n for col in range(0, GRID_SIZE):\n grid[row][col] = \"#\"\n return grid",
"def create_empty_board(n):\n grid = [] # Create empty grid\n for y in range(n): # Create rows one at a time\n row = []\n for x in range(n): # Build up each row by appending to a list\n row.append(None)\n grid.append(row) # Append the row (list) onto grid\n return grid",
"def get_empty_array(n):\n r = ['' for i in range(n)]\n \n return r",
"def get_blank_matrix(utterance):\r\n length = len(utterance)\r\n matrix = []\r\n for i in range(0,length):\r\n matrix.append([[\"-\"]])\r\n for j in range(0, length-i-1):\r\n matrix[i].append([\"-\"])\r\n return matrix",
"def __create_matrix(self):\n self.matrix = []\n for _ in range(self.size):\n line = []\n for __ in range(self.size):\n line.append(' ')\n self.matrix.append(line)",
"def create_empty_scores():\n return [[0 for dummycol in range(DIM)] \n for dummyrow in range(DIM)]",
"def pad_board(board):\n return np.pad(board, (1,), 'wrap')",
"def empty_image(request):\n channels = request.param\n data_shape = (4, 8, 12, channels)\n return np.zeros(data_shape).astype(ART_NUMPY_DTYPE)",
"def create_board():\n return [None] * 9",
"def empty_square(self):\n size = len(self.grid)\n return [(x, y) for y in range(size) for x in range(size) if self.grid[x][y] == None]",
"def generate_checkerboard_dummy(board_shape,region_size,region_intensities):\n dummy = np.array(region_intensities).reshape(board_shape)\n for axis, size in enumerate(region_size):\n dummy = np.repeat(dummy, size, axis=axis)\n label = np.array(range(len(region_intensities))).reshape(board_shape)\n for axis, size in enumerate(region_size):\n label = np.repeat(label, size, axis=axis)\n return dummy, label",
"def _pad_with_nulls(data, len_):\n return data + (b'\\x00' * (len_ - len(data)))",
"def pad_matrix(M):\n m, n = len(M), len(M[0])\n b = 1\n while b < max(m, n):\n b <<= 1\n M += [[0] * n for _ in range(b - m)]\n for i in range(b):\n M[i] += [0] * (b - n)\n return M"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
map each claim in claimlist to fabric array, with claim id in claimed space overlap_char and for overlapping claims
|
def populate_fabric_array(fabric, claimlist, overlap_char):
overlap_count = 0
good_claims = set()
for claim in claimlist:
good_claims.add(claim.id)
for claim in claimlist:
for offset_x in range(claim.width):
for offset_y in range(claim.height):
x = claim.x + offset_x
y = claim.y + offset_y
if fabric[x][y] is None: # free space, all cool
fabric[x][y] = claim.id
else: # not free!
if fabric[x][y] in good_claims: # invalidate the claim already there
good_claims.remove(fabric[x][y])
if claim.id in good_claims: # invalidate this claim
good_claims.remove(claim.id)
if fabric[x][y] != overlap_char: # needs to be marked and counted
fabric[x][y] = overlap_char
overlap_count += 1
return fabric, overlap_count, good_claims
|
[
"def count_square_claims( input_list ):\n fabric_claims = defaultdict( int )\n\n for line in input_list:\n ( _, x_coord, y_coord, width, height ) = parse_line( line )\n\n for xindex in range( x_coord, x_coord + width ):\n for yindex in range( y_coord, y_coord + height ):\n fabric_claims[(xindex,yindex)] += 1\n\n return fabric_claims",
"def get_claim_overlaps(claims):\n non_overlapping_id = 0\n overlaps = []\n\n for i, claim1 in enumerate(claims):\n overlapped = False\n for j, claim2 in enumerate(claims):\n if claim1.id != claim2.id:\n overlap = calc_overlap(claim1, claim2)\n if overlap:\n overlapped = True\n overlaps.extend(overlap)\n if not overlapped:\n non_overlapping_id = claims[i].id\n return set(itertools.chain(overlaps)), non_overlapping_id",
"def calc_overlap(claim1, claim2):\n x1 = max(claim1.x, claim2.x)\n x2 = min((claim1.x + claim1.width), (claim2.x + claim2.width))\n y1 = max(claim1.y, claim2.y)\n y2 = min((claim1.y + claim1.height), (claim2.y + claim2.height))\n return [(x, y) for x in xrange(x1, x2) for y in xrange(y1, y2)]",
"def test_claim_fabric_piece(self):\n claimList = [[1,1,1,3,3],[2,2,2,2,2]]\n fabric = pd.DataFrame(int(0), index=np.arange(1, 6), columns=np.arange(6))\n result = pysolve1.claim_fabric_piece(fabric,claimList)\n target = [[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 1, 2, 2, 0, 0], [0, 1, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0]]\n self.assertEqual(result.values.tolist(), target)",
"def find_fabric_dimensions(claimlist):\n cur_width = cur_height = 0\n for claim in claimlist:\n cur_width = max(cur_width, claim.x + claim.width)\n cur_height = max(cur_height, claim.y + claim.height)\n return cur_width, cur_height",
"def assign_party_to_names(party_membership_list_path, namelist):\n \n if not isinstance(namelist, pd.DataFrame):\n raise TypeError(\"Namelist must be a pd.DataFrame\")\n return None\n if not 'CouncillorName' in namelist.columns:\n raise KeyError(\"Namelist must contain a column labeled 'CouncillorName'\")\n return None\n #List of all members with their party\n all_members_cn = pd.read_csv(party_membership_list_path, sep=';',lineterminator='\\n') \n all_members_cn = all_members_cn[['FirstName','LastName','PartyAbbreviation']]\n #Concatenate first and last name\n \n all_members_cn['FullName'] = all_members_cn['LastName'].str.cat(all_members_cn['FirstName'],sep=' ') \n all_members_cn = all_members_cn.drop(columns=['LastName','FirstName'])\n #Remove duplicate \n all_members_cn = all_members_cn[['FullName','PartyAbbreviation']].drop_duplicates(subset=['FullName'])\n namelist_with_parties = namelist.join(all_members_cn.set_index('FullName'), on='CouncillorName')\n \n # Reassign parties if the party has merged with another one\n replace_these_parties = {'PRD':'PLR', 'GB':'PES', 'PLS':'PLR'}\n namelist_with_parties['PartyAbbreviation'] = namelist_with_parties['PartyAbbreviation'].replace(replace_these_parties)\n \n n_no_party = len(namelist_with_parties) - namelist_with_parties['PartyAbbreviation'].count()\n \n if n_no_party != 0:\n print(\"{0} councillors couldn't be associated to a party\".format(n_no_party))\n return namelist_with_parties",
"def assign_party_to_names(party_membership_list_path, namelist):\n\n if not isinstance(namelist, pd.DataFrame):\n raise TypeError(\"Namelist must be a pd.DataFrame\")\n\n if not 'CouncillorName' in namelist.columns:\n raise KeyError(\"Namelist must contain a column labeled 'CouncillorName'\")\n\n #List of all members with their party\n all_members_cn = pd.read_csv(party_membership_list_path, sep=';', lineterminator='\\n')\n all_members_cn = all_members_cn[['FirstName', 'LastName', 'PartyAbbreviation']]\n #Concatenate first and last name\n\n all_members_cn['FullName'] = all_members_cn['LastName'].str.cat(all_members_cn['FirstName'], sep=' ')\n all_members_cn = all_members_cn.drop(columns=['LastName', 'FirstName'])\n #Remove duplicate\n all_members_cn = all_members_cn[['FullName', 'PartyAbbreviation']].drop_duplicates(subset=['FullName'])\n namelist_with_parties = namelist.join(all_members_cn.set_index('FullName'), on='CouncillorName')\n\n # Reassign parties if the party has merged with another one\n replace_these_parties = {'PRD':'PLR', 'GB':'PES', 'PLS':'PLR'}\n namelist_with_parties['PartyAbbreviation'] = namelist_with_parties['PartyAbbreviation'].replace(replace_these_parties)\n\n n_no_party = len(namelist_with_parties) - namelist_with_parties['PartyAbbreviation'].count()\n\n if n_no_party != 0:\n print(\"{0} councillors couldn't be associated to a party\".format(n_no_party))\n return namelist_with_parties",
"def formatting_cid_ocn_clusters(cid_ocn_list):\n # key: cid, value: list of ocns [ocn1, ocn2]\n cid_ocns_dict = {}\n\n if cid_ocn_list:\n for cid_ocn in cid_ocn_list:\n cid = cid_ocn.get(\"cid\")\n ocn = cid_ocn.get(\"ocn\")\n if cid in cid_ocns_dict:\n cid_ocns_dict[cid].append(ocn)\n else:\n cid_ocns_dict[cid] = [ocn]\n\n return cid_ocns_dict",
"def map_snapshot_to_sdc(self, snapshot, sdc):\n\n current_sdcs = snapshot['mappedSdcInfo']\n current_sdc_ids = []\n sdc_id_list = []\n sdc_map_list = []\n sdc_modify_list1 = []\n sdc_modify_list2 = []\n\n if current_sdcs:\n for temp in current_sdcs:\n current_sdc_ids.append(temp['sdcId'])\n\n for temp in sdc:\n if 'sdc_name' in temp and temp['sdc_name']:\n sdc_id = self.get_sdc_id(sdc_name=temp['sdc_name'])\n elif 'sdc_ip' in temp and temp['sdc_ip']:\n sdc_id = self.get_sdc_id(sdc_ip=temp['sdc_ip'])\n else:\n sdc_id = self.get_sdc_id(sdc_id=temp['sdc_id'])\n if sdc_id not in current_sdc_ids:\n sdc_id_list.append(sdc_id)\n temp['sdc_id'] = sdc_id\n if 'access_mode' in temp:\n temp['access_mode'] = get_access_mode(temp['access_mode'])\n if 'bandwidth_limit' not in temp:\n temp['bandwidth_limit'] = None\n if 'iops_limit' not in temp:\n temp['iops_limit'] = None\n sdc_map_list.append(temp)\n else:\n access_mode_dict, limits_dict = check_for_sdc_modification(\n snapshot, sdc_id, temp)\n if access_mode_dict:\n sdc_modify_list1.append(access_mode_dict)\n if limits_dict:\n sdc_modify_list2.append(limits_dict)\n\n LOG.info(\"SDC to add: %s\", sdc_map_list)\n\n if not sdc_map_list:\n return False, sdc_modify_list1, sdc_modify_list2\n\n try:\n changed = False\n for sdc in sdc_map_list:\n payload = {\n \"volume_id\": snapshot['id'],\n \"sdc_id\": sdc['sdc_id'],\n \"access_mode\": sdc['access_mode'],\n \"allow_multiple_mappings\": self.module.params['allow_multiple_mappings']\n }\n self.powerflex_conn.volume.add_mapped_sdc(**payload)\n\n if sdc['bandwidth_limit'] or sdc['iops_limit']:\n payload = {\n \"volume_id\": snapshot['id'],\n \"sdc_id\": sdc['sdc_id'],\n \"bandwidth_limit\": sdc['bandwidth_limit'],\n \"iops_limit\": sdc['iops_limit']\n }\n\n self.powerflex_conn.volume.set_mapped_sdc_limits(**payload)\n changed = True\n return changed, sdc_modify_list1, sdc_modify_list2\n\n except Exception as e:\n errormsg = \"Mapping snapshot %s to SDC %s \" \\\n \"failed with error %s\" % (snapshot['name'],\n sdc['sdc_id'], str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"def get_map_email(email_list: list) -> list:\n email_unique = list(set(email_list))\n map_dict = {email_unique[i]: i for i in range(len(email_unique))}\n return map_dict",
"def get_encounter_aids(ibs, eid_list):\n gids_list = ibs.get_encounter_gids(eid_list)\n aids_list_ = ibsfuncs.unflat_map(ibs.get_image_aids, gids_list)\n aids_list = list(map(utool.flatten, aids_list_))\n #print('get_encounter_aids')\n #print('eid_list = %r' % (eid_list,))\n #print('gids_list = %r' % (gids_list,))\n #print('aids_list_ = %r' % (aids_list_,))\n #print('aids_list = %r' % (aids_list,))\n return aids_list",
"def add_gb_seqrecords_to_cluster_list(cluster_list, gb_filepath):\n # match up seqrecords\n gb_records = SeqIO.index(gb_filepath, 'genbank')\n for clu in cluster_list:\n clu.seq_record = gb_records[clu.sequence_id]\n gb_records.close()\n return cluster_list",
"def atlasOverlap(atlasMap,cbpLabel,A,L):\n \n atlName = atlasMap['name']\n atlFile = atlasMap['file']\n \n atl = nb.load(atlFile)\n atl = atl.darrays[0].data\n atlLabels = list(set(atl).difference({0}))\n print atlLabels\n \n cbp = nb.load(cbpLabel)\n cbp = cbp.darrays[0].data\n cbpLabels = list(set(cbp).difference({0}))\n \n overlaps = np.zeros((L+1,A+1))\n \n cbpIndices = {}.fromkeys(np.arange(1,L))\n atlIndices = {}.fromkeys(np.arange(1,A+1))\n \n for c in cbpLabels:\n cbpIndices[c] = np.where(cbp == c)[0]\n \n for a in atlLabels:\n atlIndices[a] = np.where(atl == a)[0]\n \n print 'Entering loop'\n for c in cbpLabels:\n cbpInds = cbpIndices[c]\n \n for a in atlLabels:\n atlInds = atlIndices[a]\n \n if len(atlInds) and len(cbpInds):\n \n ov = len(set(cbpIndices[c]).intersection(set(atlIndices[a])))\n overlaps[c,a] = (1.*ov)/len(cbpIndices[c])\n else:\n overlaps[c,a] = 0\n \n return [atlName,overlaps]",
"def get_annot_cpaths(ibs, aid_list):\n #utool.assert_all_not_None(aid_list, 'aid_list')\n #assert all([aid is not None for aid in aid_list])\n cfpath_list = preproc_chip.get_annot_cfpath_list(ibs, aid_list)\n return cfpath_list",
"def cmty_mapping(self, otherCmtys, mode=\"overlap\"):\n cmtys0 = self.cmtynodes()\n cmtys0list = list(cmtys0.iterkeys())\n cmtys1 = otherCmtys.cmtynodes()\n cmtys1list = list(cmtys1.iterkeys())\n cmty0_to_cmty1 = dict()\n assigned_cmtys = set()\n #plantedToDetectedMap = dict()\n\n overlaps = numpy.zeros(shape=(len(cmtys0), len(cmtys1)), dtype=int)\n #overlaps[detected, planted]\n \n # the following loop only seems to fill the overlaps array\n for i0, c0 in enumerate(cmtys0list):\n bestScore = 0\n bestcmty = None\n c0nodes = cmtys0[c0]\n for i1, c1 in enumerate(cmtys1list):\n c1nodes = cmtys1[c1]\n overlap = len(c0nodes & c1nodes)\n overlaps[i0,i1] = overlap\n score = 0\n if mode == \"overlap_forward\":\n # Using overlap / planted community size (note:\n # constant divisor, so the division doesn't do\n # anything really.)\n if c1 in assigned_cmtys:\n continue\n if len(c0nodes) > 0:\n score = float(overlap)/len(c0nodes)\n elif mode == \"F1\":\n # Using f-score\n precision = float(overlap)/len(c1nodes)\n recall = float(overlap)/len(c0nodes)\n F1 = 2. * precision * recall / (precision + recall)\n score = F1\n elif mode == \"newman\":\n pass\n #overlaps[c0,c1] = overlap\n else:\n #raise ValueError(\"Unknown mode: %s\"%mode)\n pass #XXXXX\n\n if bestScore is None or score > bestScore:\n bestScore = score\n bestcmty = c1\n #cmty0_to_cmty1[c0] = bestcmty\n assigned_cmtys.add(c1)\n \n if mode == \"overlap\":\n cmty_map = { }\n sorted_overlaps = [ (overlaps[i,j], (i, j))\n for i in xrange(overlaps.shape[0])\n for j in xrange(overlaps.shape[1])\n if overlaps[i,j] > 0]\n #if overlaps.shape[1] == 1: raise\n sorted_overlaps.sort(reverse=True, key=lambda x: x[0])\n planted_used = set()\n detected_used = set()\n for ov, (i,j) in sorted_overlaps:\n if i in planted_used or j in detected_used:\n continue\n cmty_map[i] = j # j is sometimes None XXXX\n if j is None: raise\n planted_used.add(i)\n detected_used.add(j)\n if None in cmtys0list or None in cmtys1list: raise\n for k, v in cmty_map.iteritems():\n cmty0_to_cmty1[cmtys0list[k]] = cmtys1list[v]\n if None in cmty0_to_cmty1.values(): raise\n elif mode == \"newman\":\n cmty0_to_cmty1 = { }\n # Newman algorithm in newman2004fast, footnote #19.\n plant_to_detect = collections.defaultdict(set)\n best_overlap_of_detected = overlaps.argmax(1)\n for i_planted, i_detected in enumerate(best_overlap_of_detected):\n c_planted = cmtys0list[i_planted]\n c_detected= cmtys1list[i_detected]\n plant_to_detect[c_detected].add(c_planted)\n for c_detect, c_planteds in plant_to_detect.iteritems():\n if len(c_planteds) == 1:\n #print \"mapping: c %s to c %s\"%(c_planteds, c_detect)\n cmty0_to_cmty1[c_planteds.pop()] = c_detect\n else:\n #print \"mapping: NOT done: %s all map to %s\"%(\n # sorted(c_planteds), c_detect)\n pass\n #from fitz import interactnow\n return cmty0_to_cmty1",
"def split_in_continious_ranges(coordinatelist):\n return [ (locus[0],locus[-1]+1) for locus in cluster_coordinates(coordinatelist,1) ]",
"def save_officer_and_grant_digital_resources(officer_emaillist_and_position_mappings, unprocessed_officer,\n officer_info):\n logger = Loggers.get_logger()\n position_name = unprocessed_officer.position_name\n phone_number = officer_info[UNPROCESSED_OFFICER_PHONE_NUMBER_KEY]\n full_name = officer_info[UNPROCESSED_OFFICER_NAME__KEY]\n sfu_computing_id = unprocessed_officer.sfu_computing_id\n success, error_message, sfu_info = get_sfu_info(sfu_computing_id)\n if not success:\n return success, error_message\n sfu_email_alias = sfu_info['aliases'][0]\n announcement_emails = []\n if len(officer_info[UNPROCESSED_OFFICER_ANNOUNCEMENT_EMAILS__KEY].strip()) > 1:\n announcement_emails = [\n announcement_email.strip() for announcement_email in\n officer_info[UNPROCESSED_OFFICER_ANNOUNCEMENT_EMAILS__KEY].split(\",\")\n ]\n github_username = officer_info.get(UNPROCESSED_OFFICER_GITHUB_USERNAME__KEY, None)\n gmail = officer_info.get(UNPROCESSED_OFFICER_GMAIL__KEY, None)\n start_date = unprocessed_officer.start_date\n term_obj = unprocessed_officer.term\n course1 = officer_info[UNPROCESSED_OFFICER_COURSE_1__KEY]\n course2 = officer_info[UNPROCESSED_OFFICER_COURSE_2__KEY]\n language1 = officer_info[UNPROCESSED_OFFICER_LANGUAGE_1__KEY]\n language2 = officer_info[UNPROCESSED_OFFICER_LANGUAGE_2__KEY]\n bio = officer_info[UNPROCESSED_OFFICER_BIO__KEY]\n position_mapping_for_new_officer = officer_emaillist_and_position_mappings.filter(position_name=position_name)\n if position_mapping_for_new_officer is None:\n return False, f\"Could not locate the position mapping for {position_name}\"\n position_mapping_for_new_officer = position_mapping_for_new_officer.first()\n position_index = position_mapping_for_new_officer.position_index\n sfu_officer_mailing_list_email = position_mapping_for_new_officer.email\n github_teams_to_add = position_mapping_for_new_officer.officerpositiongithubteammapping_set.all()\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()]\"\n f\" detected {len(github_teams_to_add)} github teams mapped to position {position_name}\"\n )\n\n current_positions = officer_emaillist_and_position_mappings.filter(marked_for_deletion=False)\n officer_has_google_drive_access = position_name in get_position_names(current_positions.filter(google_drive=True))\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} {'has' if officer_has_google_drive_access else 'does not have' } access to \"\n f\"google drive\"\n )\n officer_is_executive_officer = position_name in get_position_names(\n current_positions.filter(executive_officer=True)\n )\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} is {'' if officer_is_executive_officer else 'not ' }an executive officer\"\n )\n officer_is_election_officer = position_name in get_position_names(current_positions.filter(election_officer=True))\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} is {'' if officer_is_election_officer else 'not ' }an election officer\"\n )\n officer_is_council_representative = position_name in get_position_names(\n current_positions.filter(sfss_council_rep=True))\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} is {'' if officer_is_council_representative else 'not ' }the council rep\"\n )\n officer_is_frosh_week_chair = position_name in get_position_names(current_positions.filter(frosh_week_chair=True))\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} is {'' if officer_is_frosh_week_chair else 'not ' }the frosh week chair\"\n )\n officer_is_discord_manager = position_name in get_position_names(current_positions.filter(discord_manager=True))\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()] \"\n f\"{position_name} is {'' if officer_is_discord_manager else 'not ' }the discord manager\"\n )\n\n pic_path = get_officer_image_path(term_obj, full_name)\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()]\"\n f\" pic_path set to {pic_path}\"\n )\n\n if type(start_date) != datetime.datetime:\n # if taking in the start_date from the form that the new officers have to fill in\n start_date = datetime.datetime.strptime(start_date, OFFICER_START_DATE_FORMAT)\n success, error_message, discord_username, discord_nickname = get_discord_username_and_nickname(\n unprocessed_officer.discord_id\n )\n discord_nickname = discord_nickname if discord_nickname is not None else \"NA\"\n if not success:\n return success, error_message\n logger.info(\n \"[about/save_officer_and_grant_digital_resources.py saving new officer with the following info\"\n f\"\\n\\tposition_name={position_name}\\n\\tposition_index={position_index}\\n\\t\"\n f\"full_name={full_name}\\n\\tsfu_computing_id={sfu_computing_id}\\n\\tsfu_email_alias={sfu_email_alias}\\n\\t\"\n f\"phone_number={phone_number}\\n\\tgithub_username={github_username}\\n\\t\"\n f\"gmail={gmail}\\n\\tcourse1={course1}\\n\\tcourse2={course2}\\n\\tlanguage1={language1}\\n\\t\"\n f\"language2={language2}\\n\\tpic_path={pic_path}\\n\\tterm_obj={term_obj}\\n\\t\"\n f\"sfu_officer_mailing_list_email={sfu_officer_mailing_list_email}\\n\\tstart_date={start_date}\\n\\t\"\n f\"unprocessed_officer.discord_id={unprocessed_officer.discord_id}\\n\\t\"\n f\"discord_username={discord_username}\\n\\tdiscord_nickname={discord_nickname}\"\n )\n officer_obj = Officer(\n position_name=position_name, position_index=position_index, full_name=full_name,\n sfu_computing_id=sfu_computing_id, sfu_email_alias=sfu_email_alias, phone_number=phone_number,\n github_username=github_username, gmail=gmail, course1=course1, course2=course2, language1=language1,\n language2=language2, bio=bio, image=pic_path, elected_term=term_obj,\n sfu_officer_mailing_list_email=sfu_officer_mailing_list_email, start_date=start_date,\n discord_id=unprocessed_officer.discord_id, discord_username=discord_username,\n discord_nickname=discord_nickname\n )\n\n success, error_message = grant_google_drive_access(officer_has_google_drive_access, gmail)\n if not success:\n return success, error_message\n if officer_has_google_drive_access:\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()]\"\n f\" granted google drive access to {gmail} for position {position_name}\"\n )\n\n success, error_message = grant_github_access(officer_obj, github_teams_to_add)\n if not success:\n return success, error_message\n if len(github_teams_to_add) > 0:\n github_teams = \"], [\".join([github_team.get_team_name() for github_team in github_teams_to_add])\n logger.info(\n f\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()]\"\n f\" granted {officer_obj.github_username} access to github teams [{github_teams}]\"\n f\" for position {position_name}\"\n )\n success, error_message = assign_discord_roles(\n position_mapping_for_new_officer.discord_role_name, unprocessed_officer.discord_id, term_obj,\n role_is_executive_officer=position_mapping_for_new_officer.executive_officer\n )\n if not success:\n return success, error_message\n success, error_message = send_notifications_with_documentation_links(\n officer_obj, officer_is_executive_officer, officer_is_election_officer, officer_is_council_representative,\n officer_is_frosh_week_chair, officer_is_discord_manager\n )\n if not success:\n return success, error_message\n alert_sys_admin_to_update_email_list(\n sfu_officer_mailing_list_email, position_mapping_for_new_officer.bitwarden_access\n )\n if not success:\n return success, error_message\n officer_obj.save()\n for email in announcement_emails:\n AnnouncementEmailAddress(email=email, officer=officer_obj).save()\n logger.info(\"[about/save_officer_and_grant_digital_resources.py save_officer_and_grant_digital_resources()]\"\n \" successfully saved the officer and set their digital resources\")\n return True, None",
"def matchreads(refseq,refbase1num,firstbases,seqs,quals):\n ## by python numbering the first base in refseq is at position 0\n ## need to renumber of firstbases[] values, so the base positions line up\n numbases = len(refseq)\n r = [[i,refseq[i],[],[]] for i in range(numbases)] # make a structure to hold everything we need for each base\n numreads = len(firstbases)\n for j in range(numreads):\n k = firstbases[j]\n for ci,c in enumerate(seqs[j]):\n renum1 = (k+ci) - refbase1num\n if 0 <= renum1 < numbases:\n r[renum1][2].append(c)\n r[renum1][3].append(quals[j][ci])\n return r",
"def align_contigs(scaffold, contigs_data, contigs_seq):\n\n #print \"scaffold:\", scaffold\n #print \"contigs_data:\", contigs_data\n #print \"contigs_seq:\", contigs_seq\n\n scaffold_list = list(scaffold)\n for cd in contigs_data:\n remapped_Ns = 0\n #print cd\n\n sequence = contigs_seq[cd[\"contig_id\"]]\n pos_initial = cd[\"contig_pos_initial\"]\n pos_final = cd[\"contig_pos_final\"]\n orientation = cd[\"orientation\"]\n\n if orientation == '+':\n #print \"orientacion +\"\n contig_position = len(sequence)-1\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial:\n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n contig_position -= 1\n scaffold_position -= 1\n\n elif orientation == '-':\n #print \"orientacion -\"\n contig_position = 0\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial: \n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n scaffold_position -= 1\n contig_position += 1\n\n return \"\".join(scaffold_list)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pushes the given connection on the stack.
|
def push_connection(redis):
funclog()
_connection_stack.push(patch_connection(redis))
|
[
"def push_connection(connection):\n _connection_stack.push(connection)",
"def use_connection(connection):\n assert len(_connection_stack) <= 1, \\\n 'You should not mix Connection contexts with use_connection().'\n release_local(_connection_stack)\n push_connection(connection)",
"def push(self, context):\n self.stack.append(context)",
"def add(self, connection):\n from_location = connection.from_location\n to_location = connection.to_location\n gv.event_stream.push(NewConnectionEvent(connection))\n if from_location in self._out_graph:\n if connection in self._out_graph[from_location]:\n return\n self._out_graph[from_location].append(connection)\n else:\n self._out_graph[from_location] = [connection]\n if to_location is not None:\n if to_location in self._in_graph:\n self._in_graph[to_location].append(connection)\n else:\n self._in_graph[to_location] = [connection]",
"def push(self,token):\n self.stack.append(token)",
"def stack_push(self, value):\n return self._stack.push(value)",
"def push(stack, value):\n i = Item()\n i.below = stack.top\n i.value = value\n stack.top = i",
"def push(self, x):\n \n self.stack.append(x)\n self.index += 1",
"def push(self):\n _execution_ctx_stack.push(self)",
"def push(self, stream_info, msg):\n if stream_info.category != self._category:\n LOG.error(\"Invalid category for frame\")\n return\n msg = FrameMessage(stream_info.name, stream_info.category, msg)\n self.push_on_queuer(stream_info, msg.to_binary())",
"def Push(self, *args):\n return _snap.TChA_Push(self, *args)",
"def register(self, connection, handshake):\n assert connection.mode in prpc.ConnectionMode\n assert connection.mode != prpc.ConnectionMode.NEW\n # In fact, we should wait for key for some time\n # before raising.\n #\n # However, proper implementation (condition etc)\n # is unfeasibly complicated for now and polling\n # is too ugly.\n peer_uid = identity.Identity.get_uid(handshake)\n if connection.id in self._connections:\n raise ValueError(\n 'connection \\'%s\\' (mode: %s) is already registered' %\n (connection.id, connection.mode)\n )\n if connection.mode == prpc.ConnectionMode.SERVER:\n if peer_uid in self._incoming_by_uid:\n raise ValueError(\n 'incoming connection from peer \\'%s\\' '\n 'is already registered' % (peer_uid,)\n )\n self._incoming_by_uid[peer_uid] = connection\n connection.on_close.append(self._unregister)\n self._connections[connection.id] = connection\n self._log.info(\n 'New connection: id \\'%s\\', mode: %s, peer: \\'%s\\', token: \\'%s\\'',\n connection.id,\n connection.mode.name,\n peer_uid,\n identity.Identity.get_token(handshake)\n )",
"def stack_push(self, thing):\n # increment sp\n sp = self.regs.sp + self.arch.stack_change\n self.regs.sp = sp\n return self.memory.store(sp, thing, endness=self.arch.memory_endness, size=self.arch.bytes)",
"def push(self, data):\n self.__stacknode.items.append(data)",
"def register(self, connection, handshake):\n assert connection.mode in prpc.ConnectionMode\n assert connection.mode != prpc.ConnectionMode.NEW\n # In fact, we should wait for key for some time\n # before raising.\n #\n # However, proper implementation (condition etc)\n # is unfeasibly complicated for now and polling\n # is too ugly.\n if connection.id in self._connections:\n raise ValueError(\n 'connection \\'%s\\' (mode: %s) is already registered' %\n (connection.id, connection.mode)\n )\n connection.on_close.append(self._unregister)\n self._connections[connection.id] = connection\n self._log.info(\n 'New connection: id %s, mode: %s, peer: %s, token: %s',\n connection.id,\n connection.mode.name,\n identity.Identity.get_uid(handshake),\n identity.Identity.get_token(handshake)\n )",
"def push(self, item):\n if item == None:\n raise TypeError(\"Stack will not store an object of NoneType.\")\n self._stack_items.append(item)",
"def pushable(self, pushable):\n\n self._pushable = pushable",
"def addConnection(self, value, state):\n\t\tself.connections.add((value, state))",
"def push(self, value):\n self.__cpu.memory.stack.push(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pops the topmost connection from the stack.
|
def pop_connection():
funclog()
return _connection_stack.pop()
|
[
"def pop(self):\r\n if self.is_empty():\r\n raise IndexError(\"Tried to remove the top of an empty stack\")\r\n self.top = self.top.next_node\r\n self.size -=1",
"def pop(self):\n if self.stack != []:\n self.stack.pop()\n return self",
"def remove_from_stack(stack):\n stack.pop()\n return stack",
"def pop(self):\n binary = self.pop_on_queuer()\n if binary is None:\n return None\n return FrameMessage.from_binary(binary)",
"def pop(self):\n #speical case: in case the stack was empty it will raise an exception\n if self.top == None:\n raise StackIsEmptyException('Hey I cannot do the pop, stack is empty !')\n\n #Frist Step: Assign a variable to point to whatever the top pointer is pointing to\n prev = self.top\n\n #Second Step: Point the top pointer to point to whatever the variable next pointer(next pointer of the top pointer) is pointing to\n self.top = prev.next\n\n #Third Step:make the next pointer of the avriable to point to none and return the value of the removed node (prev)\n prev.next = None\n\n return prev.value",
"def pop() -> Any:\n\tglobal stack\n\tif not stack:\n\t\treturn None\n\telse:\n\t\tn = stack[-1]\n\t\tdel stack[-1]\n\t\treturn n",
"async def cmd_clear_tops(self, ctx):\n\n channel = ctx.channel\n\n if str(channel.id) in self.tops:\n self.tops.pop(str(channel.id))\n tops_file = open(self.tops_file, mode='w')\n json.dump(self.tops, tops_file)",
"def pop(self) -> None:\n self.stack.pop()\n self.minStack.pop()\n if self.isEmpty():\n self.globalMinimum = float('inf')\n else:\n self.globalMinimum = self.minStack[-1]",
"def pop_call(self):\n with self.lock:\n return self._pop_call(head=True)",
"def pop_state(self):\n\n def set_head(head):\n \"Set the head of the board to `head`.\"\n self.head = head\n\n dispatch = {\n 'restore': self._restore_from,\n 'move-head': set_head,\n 'take': self.take_stone,\n 'put': self.put_stone,\n 'move': self.move,\n }\n changelog_to_undo = self.changelog \n self.changelog = [] # overwrite\n for change in common.utils.seq_reversed(changelog_to_undo):\n assert change[0] in dispatch\n dispatch[change[0]](change[1])\n self.changelog = self.changelog_stack.pop()",
"def remove_top_card(self):\n if len(self._cards) == 0:\n print('Deck is empty')\n return\n return self._cards.pop(0)",
"def top(self):\n if self.is_empty():\n raise Empty(\"stack is empty\")\n return self._data[-1]",
"def PopItem(self):\n logging.debug(u'Pop on {0:s} queue, port {1:d}'.format(\n self.name, self.port))\n if not self._zmq_socket:\n self._CreateZMQSocket()\n try:\n return self._zmq_socket.recv_pyobj()\n except zmq.error.Again:\n raise errors.QueueEmpty\n except KeyboardInterrupt:\n self.Close(abort=True)\n raise",
"def popped(self) -> Signal:\n return self.popped_signal",
"def pop(self):\n popdfa, popstate, popnode = self.stack.pop()\n newnode = self.convert(self.grammar, popnode)\n if newnode is not None:\n if self.stack:\n dfa, state, node = self.stack[-1]\n node[-1].append(newnode)\n else:\n self.rootnode = newnode\n self.rootnode.used_names = self.used_names",
"def pop(self):\n assert len(self.matrices) > 1, \"Cannot pop last matrix from stack\"\n self.matrices.pop()",
"async def cmd_remove_top(self, ctx, top):\n channel = ctx.channel\n\n if not re.match(r'^-?\\d+$', top):\n await ctx.send(\"Fehler! Der übergebene Parameter muss eine Zahl sein.\")\n else:\n if str(channel.id) in self.tops:\n channel_tops = self.tops.get(str(channel.id))\n\n if 0 < int(top) <= len(channel_tops):\n del channel_tops[int(top) - 1]\n\n if len(channel_tops) == 0:\n self.tops.pop(str(channel.id))\n\n tops_file = open(self.tops_file, mode='w')\n json.dump(self.tops, tops_file)",
"def pop(self, state: 'SoState', prevTopElement: 'SoElement') -> \"void\":\n return _coin.SoWindowElement_pop(self, state, prevTopElement)",
"def pop(self):\n len_c = len(self._consTable) - 1\n len_v = len(self._varTable) - 1\n self._insTable.gen('opr', -1, self._varNum[self._curLayerNum])\n while self._consTable and self._consTable[len_c][1] == self._curLayerNum:\n self._consTable.pop()\n len_c -= 1\n while self._varTable and self._varTable[len_v][1] == self._curLayerNum:\n self._varTable.pop()\n len_v -= 1\n self._varNum.pop()\n self._curLayerNum -= 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the current Redis connection (i.e. the topmost on the connection stack).
|
def get_current_connection():
funclog()
return _connection_stack.top
|
[
"def connection(self):\n return self.get_app().extensions['redis'][self.config_prefix]",
"def get_redis_connection():\n return redis.StrictRedis(host=C.redis_host, port=C.redis_port, db=C.redis_task_db)",
"def getRecurrentConnection(self):\n return self.recurrentConnection",
"def _get_redis_connection():\r\n url = current_app.config.get('REDIS_URL', 'redis://localhost:6379')\r\n return redis.from_url(url)",
"def get_connection(self):\n return self.get_pool().get_connection()",
"def connection(self):\n ctx = stack.top\n\n if ctx is not None:\n if not hasattr(ctx, 'cuttlepool_connection'):\n ctx.cuttlepool_connection = self.get_connection()\n\n con = ctx.cuttlepool_connection\n\n pool = self.get_pool()\n # Ensure connection is open.\n if con._connection is None or not pool.ping(con):\n ctx.cuttlepool_connection.close()\n ctx.cuttlepool_connection = self.get_connection()\n\n return ctx.cuttlepool_connection",
"def db(cls):\r\n return redisco.get_client()",
"def get_redis():\n if 'redis' not in g:\n # connect to redis\n raddr = app.config['REDIS_HOST']\n rhost = raddr.split(':')[0]\n rport = int(raddr.split(':')[-1])\n try:\n g.redis = Redis(host=rhost, port=rport)\n except ConnectionError as e:\n err = f\"Could not connect to Redis: {e}\"\n logger.error(err)\n abort(503, err)\n return g.redis",
"async def get_redis(self) -> Redis:\n async with self._create_pool_lock:\n if self.redis is None:\n self.redis = await self.create_redis_pool()\n return self.redis",
"def get(self):\n try:\n connection = self.queue.get(block=False)\n self.connections -= 1\n # Reset the connection if exceeds request limit\n if (self.REQUEST_LIMIT and\n connection.request_count >= self.REQUEST_LIMIT):\n connection.close()\n connection = HTTPConnectionQueue.connection_object(\n self.address, self.encrypted)\n except Queue.Empty:\n connection = HTTPConnectionQueue.connection_object(self.address,\n self.encrypted)\n return connection",
"def raw_connection(self):\n return self._database.raw_connection()",
"def first_connection(self):\n return self._first_connection",
"def get_query_handler(self) -> \"Redis\":\n import redis\n\n try:\n r = redis.Redis(connection_pool=self.connection_pool)\n r.ping()\n return r\n except redis.exceptions.ConnectionError as r_con_error:\n self.logger.error(\"Redis connection error: \", r_con_error)\n raise",
"def currently_active_ipmi_connection(self):\n return self._cache.current_index",
"def get_connection(self):\n if self._certificate is None:\n return None\n return self.session.get_connection(self._address, self._certificate)",
"def setup_redis_connection(host=\"localhost\"):\n # log.info(\"Module: {} Function: {}\".format(__name__, sys._getframe().f_code.co_name))\n return redis.Redis(host=host)",
"def connection(self):\n\n # TODO: add a \"preferred\" flag to connection, which then\n # overrides the last_seen connection as the default, here\n try:\n return self.connections.latest(\"last_seen\")\n\n # if no connections exist for this reporter (how\n # did that happen?!), then just return None...\n except PersistantConnection.DoesNotExist:\n return None",
"def get_connection(self, read_only):\n with self.lock:\n pool = None\n if read_only:\n pool = self.choose_read_pool()\n if pool == None:\n pool = self.primary_pool\n return pool.get_connection()",
"def getConn(self):\r\n\r\n if not self.conn:\r\n self.conn = pymongo.MongoClient(self.addr)\r\n \r\n return self.conn"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the vdW radii of each atom in a molecule
|
def compute_vdw_radii(
molecule: "Molecule", radii_type: VdWRadiiType = VdWRadiiType.Bondi
) -> unit.Quantity:
if radii_type == VdWRadiiType.Bondi:
_BONDI_RADII = {
"H": 1.20,
"C": 1.70,
"N": 1.55,
"O": 1.52,
"F": 1.47,
"P": 1.80,
"S": 1.80,
"Cl": 1.75,
"Br": 1.85,
"I": 1.98,
"He": 1.40,
"Ar": 1.88,
"Na": 2.27,
"K": 1.75,
}
return [
_BONDI_RADII[SYMBOLS[atom.atomic_number]] for atom in molecule.atoms
] * unit.angstrom
else:
raise NotImplementedError()
|
[
"def svr(D):\n m = D.shape[0]\n r = ones(m-1)\n for i in xrange(m-1):\n v = svd(D[i:i+2,:],compute_uv=False)\n r[i] = v[0]/v[1]\n \n return r",
"def get_radii(self, particles):\n num_atoms = particles.get_num_atoms()\n radii = np.zeros((num_atoms+1,), dtype=np.float64)\n\n symbols = particles.get_atomic_symbol()\n atomic_variant = particles.get_atomic_variant()\n residue = particles.get_residue()\n\n table = self.get_ff_cm_dict()\n\n for i in range(num_atoms+1):\n ret_type = self.get_form_factor_atom_type(symbols[i], atomic_variant[i], residue[i])\n\n idx = table[ret_type]\n radii[i] = self.ff_radii[idx]\n\n return radii",
"def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n index = 0 # DEBUG\n for molecule in molecules:\n print \"molecule %d / %d\" % (index, len(molecules)) # DEBUG\n index += 1 # DEBUG\n \n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies",
"def calculate_energy(self, atoms):\n\n pair_energy = 0.0\n embedding_energy = 0.0\n mu_energy = 0.0\n lam_energy = 0.0\n trace_energy = 0.0\n\n self.total_density = np.zeros(len(atoms))\n if (self.form == 'adp'):\n self.mu = np.zeros([len(atoms), 3])\n self.lam = np.zeros([len(atoms), 3, 3])\n\n for i in range(len(atoms)): # this is the atom to be embedded\n neighbors, offsets = self.neighbors.get_neighbors(i)\n offset = np.dot(offsets, atoms.get_cell())\n\n rvec = (atoms.positions[neighbors] + offset -\n atoms.positions[i])\n\n # calculate the distance to the nearest neighbors\n r = np.sqrt(np.sum(np.square(rvec), axis=1)) # fast\n# r = np.apply_along_axis(np.linalg.norm, 1, rvec) # sloow\n\n nearest = np.arange(len(r))[r <= self.cutoff]\n for j_index in range(self.Nelements):\n use = self.index[neighbors[nearest]] == j_index\n if not use.any():\n continue\n pair_energy += np.sum(self.phi[self.index[i], j_index](\n r[nearest][use])) / 2.\n\n density = np.sum(\n self.electron_density[j_index](r[nearest][use]))\n self.total_density[i] += density\n\n if self.form == 'adp':\n self.mu[i] += self.adp_dipole(\n r[nearest][use],\n rvec[nearest][use],\n self.d[self.index[i], j_index])\n\n self.lam[i] += self.adp_quadrupole(\n r[nearest][use],\n rvec[nearest][use],\n self.q[self.index[i], j_index])\n\n # add in the electron embedding energy\n embedding_energy += self.embedded_energy[self.index[i]](\n self.total_density[i])\n\n components = dict(pair=pair_energy, embedding=embedding_energy)\n\n if self.form == 'adp':\n mu_energy += np.sum(self.mu ** 2) / 2.\n lam_energy += np.sum(self.lam ** 2) / 2.\n\n for i in range(len(atoms)): # this is the atom to be embedded\n trace_energy -= np.sum(self.lam[i].trace() ** 2) / 6.\n\n adp_result = dict(adp_mu=mu_energy,\n adp_lam=lam_energy,\n adp_trace=trace_energy)\n components.update(adp_result)\n\n self.positions = atoms.positions.copy()\n self.cell = atoms.get_cell().copy()\n\n energy = 0.0\n for i in components.keys():\n energy += components[i]\n\n self.energy_free = energy\n self.energy_zero = energy\n\n self.results['energy_components'] = components\n self.results['energy'] = energy",
"def radii(self):\n return array([self.graph[u][v]['conductivity']\n for u, v in self.edgeset])",
"def calculate_element_volumes_w_dem(self, depth_at_quads):\n if self.logger is not None:\n self.logger.info(\"Calculating element volumes with DEM elevations...\")\n quad_tri = GaussianQuadratureTri3(self.order_quadrature_tri_dem)\n quad_quad = GaussianQuadratureQuad4(self.order_quadrature_quad_dem)\n volumes = np.empty((self.mesh.n_elems()))\n row_i = 0\n for elem_i, node_idx in enumerate(self.mesh.elems):\n n_nodes = len(node_idx)\n nodes = self.mesh.nodes[node_idx]\n if n_nodes == 3:\n n_quad_pts = len(quad_tri.quad_wts)\n volumes[elem_i] = (quad_tri.quad_wts * quad_tri.jacobian_det(nodes)).dot(depth_at_quads[row_i:row_i + n_quad_pts])\n elif n_nodes == 4:\n n_quad_pts = len(quad_quad.quad_wts)\n volumes[elem_i] = (quad_quad.quad_wts * quad_quad.jacobian_det(nodes)).dot(depth_at_quads[row_i:row_i + n_quad_pts])\n else:\n raise ValueError(\"Not supported element type\")\n row_i += n_quad_pts\n return volumes",
"def wc_slanted_radial_velocity_4_fft(ds: xr.Dataset):\n\n if len(np.unique(ds.elevation)) > 1:\n raise TypeError(\"This dataset contains multiple elevations\")\n\n if 90 in np.unique(ds.elevation):\n raise ValueError(\n \"90 degrees elevation: not valid for retrieving horizontal wind\"\n )\n\n if (\n len(ds[\"azimuth\"].where(ds[\"azimuth\"] == ds[\"azimuth\"][0], drop=True))\n < 2\n ):\n raise ValueError(\n \"Not enough data to estimate the one scan cylce duration\"\n )\n\n # initializing storage ds\n radial_velocities = xr.Dataset()\n\n # identify the mean duration of a complete scan cycle\n half_cycle = (\n ds.where(ds.azimuth == ds.azimuth[0], drop=True)\n .time.diff(dim=\"time\")\n .mean()\n .values\n )\n half_cycle = pd.to_timedelta(half_cycle).seconds / 2\n\n # unique azimuths\n azimuth = np.unique(ds.azimuth)\n\n for azm in azimuth:\n\n # selecting the reference azimuthal slice\n azimuth_left_over = azimuth[azimuth != azm]\n tmp_reference_slice = ds[\"radial_wind_speed\"].where(\n ds[\"azimuth\"] == azm, drop=True\n )\n tmp_reference_slice = (\n tmp_reference_slice.drop([\"azimuth\"])\n .assign_coords({\"azimuth\": azm})\n .expand_dims([\"azimuth\"])\n )\n radial_velocities = xr.merge([radial_velocities, tmp_reference_slice])\n\n for azm_left in azimuth_left_over:\n\n tmp_azimuth_slice = ds[\"radial_wind_speed\"].where(\n ds[\"azimuth\"] == azm_left, drop=True\n )\n interp_azimuth_slice = tmp_azimuth_slice.reindex(\n time=tmp_reference_slice.time,\n method=\"nearest\",\n tolerance=f\"{half_cycle}s\",\n )\n tmp_slice = (\n interp_azimuth_slice.drop([\"azimuth\"])\n .assign_coords({\"azimuth\": azm_left})\n .expand_dims([\"azimuth\"])\n )\n radial_velocities = xr.merge([radial_velocities, tmp_slice])\n\n radial_velocities[\"azimuth\"].attrs = ds[\"azimuth\"].attrs\n\n return radial_velocities",
"def compute_virial_quantities(dsname, wdir = './', *args, **kwargs):\n data_ds = yt.load(wdir + dsname + '/' + dsname)\n halos_ds = yt.load(wdir + ROCKSTAR_OUTPUT_PREFIX + dsname + '/halos_0.0.bin')\n\n hc = HaloCatalog(data_ds = data_ds, halos_ds = halos_ds,\n output_dir = wdir + HALOCATALOG_PREFIX + str(data_ds))\n hc.add_filter('quantity_value', 'particle_mass', '>', 1E4, 'Msun')\n\n if ('enzo','Density') in data_ds.field_list:\n mass_field = 'matter_mass'\n radius_field = \"radius\"\n else:\n # DM only simulation\n mass_field = ('all',\"particle_mass\")\n radius_field = ('all','particle_radius')\n \n hc.add_recipe(\"my_calculate_virial_quantities\", [radius_field, mass_field ], radius_field=radius_field)\n hc.create()\n\n return",
"def calc_radii(self):\r\n\r\n # First, calculate the bending radius\r\n n_sides = len(self.edges)\r\n r_bend = 0\r\n counter = 0\r\n for j in self.edges:\r\n if j is not NotImplemented:\r\n sum = 0\r\n counter += 1\r\n for i in j.circles:\r\n sum = sum + i.radius\r\n\r\n r_bend = r_bend + sum / len(j.circles)\r\n\r\n r_bend = r_bend / counter\r\n\r\n # Then calculate the radius of the circumscribed circle through the theoretical edges.\r\n r_circum = 0\r\n counter = 0\r\n for i in self.edges:\r\n if i is not NotImplemented:\r\n counter += 1\r\n r_crnt_down = (i.theoretical_edge.xy_for_z(0)[0] ** 2 + i.theoretical_edge.xy_for_z(0)[1] ** 2)**0.5\r\n r_crnt_up = (i.theoretical_edge.xy_for_z(700)[0] ** 2 + i.theoretical_edge.xy_for_z(700)[1] ** 2)**0.5\r\n r_circum = r_circum + r_crnt_down + r_crnt_up\r\n\r\n r_circum = r_circum / (2 * counter)\r\n theta = np.pi/n_sides\r\n r_c_measured = (n_sides * (r_circum * np.sin(theta) - r_bend*np.tan(theta))/np.pi) + r_bend\r\n\r\n self.r_circle = r_c_measured\r\n self.r_bend = r_bend",
"def set_bond_radii(atoms, bond_type='bond'):\n if atoms.info is None:\n atoms.info = {}\n if 'bond_radii' in atoms.info:\n r_a = atoms.info['bond_radii']\n else:\n r_a = np.ones(len(atoms))\n \n for atom in atoms:\n if bond_type == 'covalent':\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][0])\n else:\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][1])\n atoms.info['bond_radii'] = r_a\n return r_a",
"def UnitVectorCalculator(atom1, atom2, molecule):\n vector1 = molecule[1][atom1]\n vector2 = molecule[1][atom2]\n lenght = distanceMatrix[atom1, atom2]\n return (vector2 - vector1)/lenght",
"def radiusOfCells(self, cells, variables):\n return (cells[variables['total_volume']] * (3 / ( 4 * math.pi))) ** (1 / 3)",
"def radial_force(self):\n return np.sum([t.radial_force_of_filament() for t in self.thick], 0)",
"def get_mol_weights_from_mol_list(mol_list):\n return np.array([Chem.rdMolDescriptors.CalcExactMolWt(m) for m in mol_list])",
"def calc_vswr(ant,f,r):\n\n# fwdcalfac = get_calfac(f)\n# revcalfac = get_calfac(r)\n\n f=abs(DAQC.getADC(0,f))\n r=abs(DAQC.getADC(0,r))\n# TODO For normalizing elements when true test rig is ready\n# f=f-fwdcalfac\n# r=r-revcalfac\n# Need to divide voltage by 50 ohm to get current, multiply current times voltage to get watts\n x=abs(1 + math.sqrt(rm_utils.safe_div(r,f)))\n y=abs(1 - math.sqrt(rm_utils.safe_div(r,f)))\n swr=round(rm_utils.safe_div(x,y), 3)\n if swr > 3.0:\n logger.warning(\"calc_vswr: Ant Height: {} SWR: \\033[91m {} \\033[0m\".format(ant,swr))\n if DEBUG:\n print(\"Ant Height: {} SWR: \\033[91m {} \\033[0m\".format(ant,swr))\n else:\n if DEBUG:\n print(\"Ant Height: {} SWR: \\033[92m {} \\033[0m\".format(ant,swr))\n return swr",
"def fixed_radii_for_Nweights():\n\n # 1. D < 1.0 micron\n # CLASSIC dry radii [microns] - Bellouin et al 2011\n rn_pmlt1p0_microns = {'(NH4)2SO4': 9.5e-02, # accumulation mode\n 'NH4NO3': 9.5e-02, # accumulation mode\n 'NaCl': 1.0e-01, # generic sea salt (fine mode)\n 'CORG': 1.2e-01, # aged fosil fuel organic carbon\n 'CBLK': 3.0e-02} # soot\n\n rn_pmlt1p0_m={}\n for key, r in rn_pmlt1p0_microns.iteritems():\n rn_pmlt1p0_m[key] = r * 1e-06\n\n # 2. D < 10 micron\n\n # pm1 to pm10 median volume mean radius calculated from clearflo winter data (calculated volume mean diameter / 2.0)\n rn_pm10_microns = 0.07478 / 2.0\n # turn units to meters and place an entry for each aerosol\n rn_pm10_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_pm10_m[key] = rn_pm10_microns * 1.0e-6\n\n # # old 2. D < 10 micron\n # # pm1 to pm10 median volume mean radius calculated from clearflo winter data (calculated volume mean diameter / 2.0)\n # pm1t10_rv_microns = 1.9848902137534531 / 2.0\n # # turn units to meters and place an entry for each aerosol\n # pm1t10_rv_m = {}\n # for key in rn_pmlt1p0_m.iterkeys():\n # pm1t10_rv_m[key] = pm1t10_rv_microns * 1.0e-6\n\n\n # 3. D < 2.5 microns\n # calculated from Chilbolton data (SMPS + GRIMM 2016)\n rn_pmlt2p5_microns = 0.06752 / 2.0\n\n rn_pmlt2p5_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_pmlt2p5_m[key] = rn_pmlt2p5_microns * 1.0e-6\n\n # 4. 2.5 < D < 10 microns\n # calculated from Chilbolton data (SMPS + GRIMM 2016)\n rn_2p5_10_microns = 2.820 / 2.0\n\n rn_2p5_10_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_2p5_10_m[key] = rn_2p5_10_microns * 1.0e-6\n\n\n return \\\n rn_pmlt1p0_microns, rn_pmlt1p0_m, \\\n rn_pm10_microns, rn_pm10_m, \\\n rn_pmlt2p5_microns, rn_pmlt2p5_m, \\\n rn_2p5_10_microns, rn_2p5_10_m",
"def harmonic_bond(conf, params, box, bond_idxs, param_idxs):\n ci = conf[bond_idxs[:, 0]]\n cj = conf[bond_idxs[:, 1]]\n dij = distance(ci, cj, box)\n kbs = params[param_idxs[:, 0]]\n r0s = params[param_idxs[:, 1]]\n energy = np.sum(kbs/2 * np.power(dij - r0s, 2.0))\n return energy",
"def dRij_dRm_norm(Rij, ijm_list):\n dRij_m = np.zeros([len(Rij), 3])\n\n R1ij = np.linalg.norm(Rij, axis=1).reshape([len(Rij), 1])\n l1 = (ijm_list[:, 2] == ijm_list[:, 0])\n dRij_m[l1, :] = -Rij[l1] / R1ij[l1]\n l2 = (ijm_list[:, 2] == ijm_list[:, 1])\n dRij_m[l2, :] = Rij[l2] / R1ij[l2]\n l3 = (ijm_list[:, 0] == ijm_list[:, 1])\n dRij_m[l3, :] = 0\n\n return dRij_m",
"def charge_density(potential):\n result = np.zeros_like(potential)\n\n lengthx, lengthy = potential.shape\n\n for i in range(lengthx):\n for j in range(lengthy):\n v = 0\n if i > 0:\n v += potential[i - 1, j]\n v -= potential[i, j]\n if i < lengthx - 1:\n v += potential[i + 1, j]\n v -= potential[i, j]\n if j > 0:\n v += potential[i, j - 1]\n v -= potential[i, j]\n if j < lengthy - 1:\n v += potential[i, j + 1]\n v -= potential[i, j]\n\n result[i, j] = v\n \n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns whether each atom and bond in a molecule is aromatic or not according to the MDL aromaticity model.
|
def apply_mdl_aromaticity_model(
molecule: "Molecule",
) -> Tuple[Dict[int, bool], Dict[Tuple[int, int], bool]]:
try:
return _oe_apply_mdl_aromaticity_model(molecule)
except (
ModuleNotFoundError,
MissingOptionalDependencyError,
ToolkitUnavailableException,
):
return _rd_apply_mdl_aromaticity_model(molecule)
|
[
"def test_aromaticity_perception_azulene(self):\n mol = Molecule(smiles='c1cccc2cccc2c1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 0)\n self.assertEqual(len(aromatic_bonds), 0)",
"def test_aromatic_naphthalene(self):\n m = Molecule().from_smiles('C12C(C=CC=C1)=CC=CC=2')\n isomers = m.generate_resonance_structures()\n self.assertTrue(any(isomer.is_aromatic() for isomer in isomers))",
"def test_aromaticity_perception_tetralin(self):\n mol = Molecule(smiles='c1ccc2c(c1)CCCC2')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 1)\n self.assertEqual(len(aromatic_bonds), 1)\n for bond in aromatic_bonds[0]:\n self.assertTrue(bond.atom1 in aromatic_atoms[0] and bond.atom2 in aromatic_atoms[0])",
"def test_aromaticity_perception_biphenyl(self):\n mol = Molecule(smiles='c1ccc(cc1)c2ccccc2')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 2)\n self.assertEqual(len(aromatic_bonds), 2)\n for index in range(len(aromatic_atoms)):\n for bond in aromatic_bonds[index]:\n self.assertTrue(bond.atom1 in aromatic_atoms[index] and bond.atom2 in aromatic_atoms[index])",
"def test_aromatic_cyclohexane(self):\n m = Molecule().from_smiles('C1CCCCC1')\n isomers = m.generate_resonance_structures()\n self.assertFalse(any(isomer.is_aromatic() for isomer in isomers))",
"def test_aromaticity_perception_furan(self):\n mol = Molecule(smiles='c1ccoc1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 0)\n self.assertEqual(len(aromatic_bonds), 0)",
"def test_aromaticity_perception_benzene(self):\n mol = Molecule(smiles='c1ccccc1')\n aromatic_atoms, aromatic_bonds = mol.get_aromatic_rings()\n self.assertEqual(len(aromatic_atoms), 1)\n self.assertEqual(len(aromatic_bonds), 1)\n for bond in aromatic_bonds[0]:\n self.assertTrue(bond.atom1 in aromatic_atoms[0] and bond.atom2 in aromatic_atoms[0])",
"def test_aryl_radical_true(self):\n mol = Molecule(smiles='[c]1ccccc1')\n self.assertTrue(mol.is_aryl_radical())",
"def test_aromatic_benzene(self):\n m = Molecule().from_smiles('C1=CC=CC=C1')\n isomers = m.generate_resonance_structures()\n self.assertTrue(any(isomer.is_aromatic() for isomer in isomers))",
"def _can_use_ani2x(molecule: OFFMolecule) -> bool:\n mol_elements = set([atom.symbol for atom in molecule.atoms])\n ani2x_elements = {\"H\", \"C\", \"N\", \"O\", \"S\", \"F\", \"Cl\"}\n if mol_elements - ani2x_elements:\n # if there is any difference in the sets or a net charge ani2x can not be used.\n return False\n return True",
"def oopAnglesCalc(atoms, molecule):\n if distanceMatrix[atoms[0]][atoms[3]] >= 4 or distanceMatrix[atoms[1]][atoms[3]] >= 4 or distanceMatrix[atoms[2]][atoms[3]] >= 4:\n return \"too far\"\n # define part by part, for clarity\n cross_product = np.cross(UnitVectorCalculator(atoms[3], atoms[1], molecule), UnitVectorCalculator(atoms[3], atoms[2], molecule))\n \n dot_product = np.dot(cross_product, UnitVectorCalculator(atoms[3], atoms[0], molecule))\n\n bond_angle = angleCalculator((atoms[1], atoms[3], atoms[2]), molecule)\n\n return np.arcsin(dot_product/np.sin(bond_angle))",
"def is_metal(atom):\n\n metal_atomic_numbers = chain(\n range(21, 31),\n range(39, 49),\n range(72, 81),\n )\n return atom.get_atomic_number() in metal_atomic_numbers",
"def GetAromaticRings(mol):\n edge = []\n aromaticrings = []\n for bond in mol.GetBonds():\n edge.append(((bond.GetBeginAtomIdx(),bond.GetEndAtomIdx())))\n data = py_rdl.Calculator.get_calculated_result(edge)\n for urf in data.urfs:\n rcs = data.get_relevant_cycles_for_urf(urf)\n for rc in rcs:\n aromatic = [mol.GetAtomWithIdx(node).GetIsAromatic() for node in rc.nodes]\n if all(aromatic):\n aromaticrings.append(rc.nodes)\n return aromaticrings",
"def test_aryl_radical_birad(self):\n mol = Molecule(smiles='[CH2]c1c[c]ccc1')\n self.assertFalse(mol.is_aryl_radical())",
"def consistance_arcs(self):\n\n refaire = False\n for c in self.contraintes:\n if c.dimension() == 2 and c.reviser():\n refaire = True\n\n if refaire:\n self.consistance_arcs()",
"def is_non_standard_AA(resid):\n if resid in AA_MONOMERS.keys():\n return not resid in AA_CODES.values()\n else:\n print(\"The residue %s is unknown.\" %resid)",
"def _valid_cdr3(cdr3):\n amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n valid = np.all([aa in amino_acids for aa in cdr3])\n return valid",
"def check_mol_only_has_atoms(mol, accept_atom_list):\n atom_symbol_list = [atom.GetSymbol() for atom in mol.GetAtoms()]\n return all(atom in accept_atom_list for atom in atom_symbol_list)",
"def truth(atom):\n if len(atom[0]) == 0 and len(atom[1]) == 0:\n return True \n else:\n sets_condition = set(atom[0])\n sets_conclusion = set(atom[1])\n if len(set.intersection(sets_condition,sets_conclusion)) != 0:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Lookup key in collection; if not found return if_none (or None)
|
def lookup(collection, key, if_none=None):
if key in collection:
return collection[key]
else:
return if_none
|
[
"def find_in_collection_by_name(self, collection_or_key, name):\n if type(collection_or_key) is str:\n collection_or_key = self.graph.get_collection(collection_or_key)\n for v in collection_or_key:\n if v.name == name:\n return v\n name += ':0'\n for v in collection_or_key:\n if v.name == name:\n return v\n return None",
"def get_item(self, key):\n\t\tif not key in self.items: return None\n\t\treturn self.items[ key ]",
"def value_or_none(dictionary: dict, key: str):\n return dictionary[key] if key in dictionary else None",
"def get_elem(self, key):\n\n the_hash = self._hash(key)\n\n for index in self._index_gen(the_hash):\n\n # Is this location occupied?\n contents = self.data[index]\n if contents is None:\n\n # This key has not been entered into the hash table\n return None\n\n # There are contents, but do they match the hash and key?\n elif contents[0] == the_hash and contents[1] == key:\n # We found the desired value!\n return contents[2]\n\n print(\"WARNING: We couldn't find the key or an empty spot\")\n return None",
"async def get(self, collection, key):\n logging.debug(\"Getting %s from arangodb\", key)\n coll = await self._get_collection(collection)\n\n try:\n return coll[key]\n except DocumentNotFoundError:\n return None",
"def cache_get(self, key=None, collection: str = None, target_value_name: str = None):\n try:\n return dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n except ConnectionFailure:\n n = 0\n result = None\n while not result or n == 5:\n result = dict(getattr(self, f'{collection}').find_one({\"_id\": key}))[target_value_name]\n n += 1\n return result\n except TypeError:\n return None",
"def _find_resource(key: str, collection: Collection) -> Optional[CollectionRowBlock]:\n resource = None\n\n key_lowered = key.lower()\n for block in collection.get_rows():\n if hasattr(block, \"title\") and block.title.lower().find(key_lowered) > -1:\n resource = block\n break\n\n return resource",
"def get(self, key: str):\r\n\r\n index = self.hash(key)\r\n\r\n if self.array[index] is None:\r\n return None\r\n else:\r\n # Loop through all the key/value pairs at this index, and find if\r\n # our key exists. If it does, return the value.\r\n\r\n for kvp in self.array[index]:\r\n if kvp[0] == key:\r\n return kvp[1]\r\n\r\n return None",
"def key_safe_data_access(data, key):\n try:\n return data[key]\n except (KeyError, IndexError):\n return None",
"def try_find_tag(self, tags_list, tag_key):\n if tags_list is None or tags_list.keys() is None:\n return None\n return next((tags_list[key] for key in tags_list.keys() if key == tag_key), None)",
"def __contains__(self, key):\n query = select([exists().where(self.store.c.key == key)])\n result = self.conn.execute(query)\n return result.fetchone()[0]",
"def __getitem__(self, key):\n hash_val = self._hash(key)\n if self.table[hash_val] != self.defVal and (isinstance(self.table[hash_val], tuple) and \n self.table[hash_val][0] == key and\n self.table[hash_val][2] == True):\n return self.table[hash_val][1]\n else:\n key_found = False\n iter_count = 0\n while not key_found:\n if hash_val >= self.capacity:\n hash_val = 0\n if self.table[hash_val] == self.defVal:\n \tbreak\n if self.table[hash_val][0] == key:\n if self.table[hash_val][2] == True:\n return self.table[hash_val][1]\n hash_val += 1\n iter_count += 1\n return self.defVal",
"def find(self, key):\n node=self.head\n while node:\n if node.data==key:\n return node\n if node.next:\n node=node.next\n return None",
"def lookup(index, keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None",
"def find(self, key):\n _, current, _ = self._linear_search(key)\n\n if current is None:\n value = None\n else:\n value = copy.deepcopy(current._value)\n return value",
"def get(self, key):\n return next(\n requirement for requirement in self if requirement.key == key\n )",
"def __getitem__(self, key):\n\n # check for slycat path\n self.check_fs_path()\n\n # is item in cache?\n if key in self:\n\n # get hash and value\n digest = self.digest_hash(key)\n value = self._loaded[digest].value\n expired = self._loaded[digest].expired()\n\n # if expired, erase and return None\n if expired:\n self.expire(digest)\n return None\n\n else:\n return None\n\n # cherrypy.log.error(\"[CACHE] Retrieving %s from cache.\" % str(digest))\n\n return value",
"def _lookup(key: str, *dicts: Dict[str, Any]) -> Any:\n for dict1 in dicts:\n if key in dict1:\n return dict1.get(key)\n return None",
"def __getitem__( self, key ):\n return self.read( key=key, default=None, raiseOnError=True )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
assertAlmostEqual checks float values
|
def test_assert_almost_equal(self):
self.assertAlmostEqual(1.0, 1.00000001)
#self.assertAlmostEqual(1.0, 1.00000009)
self.assertAlmostEqual(1.0, 1.0000001, places=6)
self.assertAlmostEqual(1.0, 1.001, delta=.01)
#self.assertAlmostEqual(1.0, 1.1, msg="Not close enough.")
|
[
"def float_equal(a, b):\n try:\n return math.fabs(a - b) < CMP_THR\n except TypeError:\n return False",
"def assertFloatsEqual(testCase, lhs, rhs, **kwargs):\n return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)",
"def _float_equal(fn1, fn2, epsilon=1e-8):\n fn1 = float(fn1)\n fn2 = float(fn2)\n asum = abs(fn1) + abs(fn2)\n diff = abs(fn1 - fn2)\n if asum < epsilon:\n return True\n else:\n return (diff / asum) < epsilon",
"def floats_equal(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)",
"def test_assertFloatEqualRel_equal(self):\n for first, second in self.within_1e6_rel_pairs:\n try:\n self.assertFloatEqualRel(first, second)\n except:\n raise AssertionError, \\\n \"unit_test.assertFloatEqualRel failed on input %s and %s\" \\\n % (`first`, `second`)",
"def test_assert_not_almost_equal(self):\n self.assertNotAlmostEqual(3.1, 3.3)",
"def _float_almost_equal(float1, float2, places=7):\n\n if round(abs(float2 - float1), places) == 0:\n return True\n\n return False",
"def testExpValueFloat(self):\n\n self.assertAlmostEqual(expValue(3.16227,2.0), 9.99995, places=3)",
"def testSqrtValueFloat(self):\n\n self.assertAlmostEqual(sqrtValue(10), 3.16227, places=3)",
"def testDivValueFloat(self):\n\n self.assertAlmostEqual(divValue(6.66666,2.0), 3.33333, places=3)",
"def test_significant_places_ok(self):\n self.assert_almost_equal_significant(.000541, .000542, places=1)",
"def test_function_float(function, expected, tolerance=0.001, **kwargs):\n # Evaluate function\n observed = function(**kwargs)\n\n result = numpy.allclose(observed, expected, atol=tolerance*expected)\n if result:\n message = function.__name__ + \": OK\"\n else:\n message = function.__name__ + \": FAIL\"\n message += (\"\\n -->\" + str(observed) + \" != \" + str(expected) +\n \" to within \" + str(tolerance*100) + \"%\")\n return result, message",
"def test_convert_to_float_success(self):\r\n for value in self.price_values:\r\n self.assertIsInstance(hw.Car._convert_to_float(value), float)\r\n self.assertEqual(hw.Car._convert_to_float(value), float(value))",
"def test_guess_correct():\n\n assert update_guess(1, 0.3, 0.1, 0.7) >= 0.3\n assert update_guess(1, 0.1, 0.3, 0.7) >= 0.1\n assert update_guess(1, 0.01, 0.01, 0.01) >= 0.01\n assert update_guess(1, 0.49, 0.49, 0.99) >= 0.49",
"def test_significant_places_fail(self):\n with self.assertRaises(AssertionError):\n self.assert_almost_equal_significant(.000541, .000542, places=2)",
"def test_ge(self):\r\n f1 = Fraction(1, -4)\r\n f2 = Fraction(3, 4)\r\n f3 = Fraction(-2, 8)\r\n self.assertTrue(f1 >= f1)\r\n self.assertTrue(f2 >= f1)\r\n self.assertTrue(f1 >= f3)\r\n self.assertFalse(f1 >= f2)\r\n self.assertFalse(f1 >= Fraction(1, 4))",
"def assert_equal(a: float, b: float) -> None:\n msg = \"{} != {}\".format(a, b) \n assert a == b, msg",
"def approx_equals(a, b):\n return (a - b) < 1.5e-16",
"def assert_almost_equal(a: Any, b: Any, tolerance: float) -> None:\n c = a - b\n msg = \"diff = {}\"\n try:\n assert abs(float(c)) < tolerance, msg.format(c)\n except TypeError:\n c_inf = numpy.linalg.norm(c, numpy.inf)\n assert c_inf < tolerance, msg.format(c_inf)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
assertNotAlmostEqual is (not assertAlmostEqual)
|
def test_assert_not_almost_equal(self):
self.assertNotAlmostEqual(3.1, 3.3)
|
[
"def test_assert_almost_equal(self):\n self.assertAlmostEqual(1.0, 1.00000001)\n #self.assertAlmostEqual(1.0, 1.00000009)\n self.assertAlmostEqual(1.0, 1.0000001, places=6)\n self.assertAlmostEqual(1.0, 1.001, delta=.01)\n #self.assertAlmostEqual(1.0, 1.1, msg=\"Not close enough.\")",
"def test_assertNotEqual_numbers(self):\n try:\n self.assertNotEqual(0, 0.0)\n except:\n message = str(exc_info()[1])\n self.assertEqual(message,\n 'Observed 0 and expected 0.0: shouldn\\'t test equal')\n else:\n raise AssertionError, \\\n \"unit_test.assertNotEqual failed on input %s and %s\" \\\n % (`first`, `second`)",
"def assert_almost_equal(a: Any, b: Any, tolerance: float) -> None:\n c = a - b\n msg = \"diff = {}\"\n try:\n assert abs(float(c)) < tolerance, msg.format(c)\n except TypeError:\n c_inf = numpy.linalg.norm(c, numpy.inf)\n assert c_inf < tolerance, msg.format(c_inf)",
"def test_notequal(self):\r\n f1 = Fraction(1, 3)\r\n f2 = Fraction(1, 7)\r\n f3 = Fraction(-3, -9)\r\n self.assertFalse(f1 != f1)\r\n self.assertTrue(f1 != f2)\r\n self.assertFalse(f1 != f3)\r\n self.assertTrue(f2 != f3)\r\n self.assertTrue(f1 != Fraction(-1, 3))\r\n self.assertFalse(f1 != Fraction(-1, -3))",
"def assert_equal(a: float, b: float) -> None:\n msg = \"{} != {}\".format(a, b) \n assert a == b, msg",
"def assertFloatsEqual(testCase, lhs, rhs, **kwargs):\n return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)",
"def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):\n if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):\n return assert_array_almost_equal(actual, desired, decimal=decimal,\n err_msg=err_msg, verbose=verbose)\n msg = build_err_msg([actual, desired],\n err_msg=err_msg, verbose=verbose)\n if not round(abs(desired - actual), decimal) == 0:\n raise AssertionError(msg)",
"def test_assertFloatEqualList_unequal(self):\n originals = [0, 1, -1, 10, -10, 100, -100]\n modified = [i + 1e-5 for i in originals]\n try:\n self.assertFloatEqual(originals, modified)\n except:\n pass \n else:\n raise AssertionError, \\\n \"unit_test.assertFloatEqual failed on lists of dissimilar values\"",
"def testNotEquals(self):\n a = _TrackTime('01:20:03')\n b = _TrackTime('01:02:03')\n self.assertNotEqual( a, b )",
"def test_significant_places_fail(self):\n with self.assertRaises(AssertionError):\n self.assert_almost_equal_significant(.000541, .000542, places=2)",
"def assert_not_equal(first, second, msg=\"\"):\n if first == second:\n raise AssertionError(\"%s and %s are equal, message: %s\" % (first, second, msg))",
"def test_assertNotEqual_equal(self):\n for first, second in self.equal_pairs:\n try:\n self.assertNotEqual(first, second)\n except:\n message = str(exc_info()[1])\n self.assertEqual(message,\n 'Observed %s and expected %s: shouldn\\'t test equal' \\\n % (`first`, `second`))\n else:\n raise AssertionError, \\\n \"unit_test.assertNotEqual failed on input %s and %s\" \\\n % (`first`, `second`)",
"def test_assertNotEqual_unequal(self):\n for first, second in self.unequal_pairs:\n try:\n self.assertNotEqual(first, second)\n except:\n raise AssertionError, \\\n \"unit_test.assertNotEqual failed on input %s and %s\" \\\n % (`first`, `second`)",
"def assert_not_equal(expected: Any, actual: Any, msg: str = '') -> None:\r\n from apysc.expression import expression_file_util\r\n from apysc.string import string_util\r\n if _actual_value_type_is_array(actual=actual):\r\n assert_arrays_not_equal(expected=expected, actual=actual, msg=msg)\r\n return\r\n if _actual_value_type_is_dict(actual=actual):\r\n assert_dicts_not_equal(expected=expected, actual=actual, msg=msg)\r\n return\r\n\r\n _trace_info(\r\n interface_label='assert_not_equal', expected=expected, actual=actual)\r\n expected_str, actual_str = _get_expected_and_actual_strs(\r\n expected=expected, actual=actual)\r\n\r\n msg = string_util.escape_str(string=msg)\r\n expression: str = (\r\n f'console.assert({expected_str} !== {actual_str}, \"{msg}\");'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)",
"def test_assertIsNotBetween_equals(self):\n self.assertIsNotBetween(1, 1, 2)\n self.assertIsNotBetween(1.0, 1, 2)\n self.assertIsNotBetween(1, 1.0, 2)\n self.assertIsNotBetween(1.0, 1.0, 2)\n self.assertIsNotBetween(2, 1, 2)\n self.assertIsNotBetween(2.0, 1, 2)\n self.assertIsNotBetween(2, 1, 2.0)\n self.assertIsNotBetween(2.0, 1, 2.0)",
"def assert_sensor_not_equal(self, sensorname, expected, sensortype=str,\n msg=None, places=7):\n if msg is None:\n places_msg = \" (within %d decimal places)\" % \\\n places if sensortype == float else \"\"\n msg = \"Value of sensor '%s' is %%r. Expected a different\" \\\n \" value%s.\" % (sensorname, places_msg)\n\n got = self.get_sensor_value(sensorname, sensortype)\n if '%r' in msg:\n msg = msg % got\n\n if sensortype == float:\n self.test.assertNotAlmostEqual(got, expected, places, msg)\n else:\n self.test.assertNotEqual(got, expected, msg)",
"def approx_equals(a, b):\n return (a - b) < 1.5e-16",
"def test_significant_places_ok(self):\n self.assert_almost_equal_significant(.000541, .000542, places=1)",
"def assert_approximately_equal(attribute, original, retrieved, tolerance):\n try:\n assert np.abs(original - retrieved) <= tolerance\n logger.info(' ... {0} approximately OK'.format(attribute))\n return True \n except AssertionError:\n logger.error(' XXX \"{0}\" failed'.format(attribute))\n # print original, retrieved, np.abs(original-retrieved)\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sometimes strategy parameters are learned along side brain parameters. In these caeses the strategy parameters need to be stripped from the population before sending the brain genomes to the evaluation.
|
def strip_strategy_from_population(population, mutation_learned, strategy_parameter_per_gene=False):
if len(population) == 0:
return population
if mutation_learned:
if strategy_parameter_per_gene:
half = len(np.array(population)[0]) // 2
return list(np.array(population)[:, :-half])
else:
return list(np.array(population)[:, :-2])
return population
|
[
"def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise pooling weights.\n nn.init.normal_(self.pooling_weights, mean=0.0, std=0.02)",
"def disableStrategies():\n stratDictionary = readSettings()\n position = 0\n for strategy in stratDictionary:\n listOfSettings = literal_eval(stratDictionary[\"strategy\"+str(position)])\n if listOfSettings[3] == 1:\n listOfSettings[3] = 0\n stratDictionary[\"strategy\"+str(position)] = str(listOfSettings)\n algoHandler.disableAlgo(position) #Temporary fix\n position+=1\n algoHandler.disableAllAlgos()\n writeSettings(cleanDictionary(stratDictionary))",
"def get_params_to_prune(self) -> Tuple[Tuple[nn.Module, str], ...]:\n raise NotImplementedError",
"def reset_params(self):\n pass",
"def reset_fixed_varied_parameters(self):\n self.varied_params = [param for param in self.parameters\n if param.is_varied()]\n self.fixed_params = [param for param in self.parameters\n if param.is_fixed()]",
"def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise lookup and target weights.\n nn.init.normal_(self.lookup_weights, mean=0.0, std=0.02)\n if self.target_weights is not None:\n nn.init.normal_(self.target_weights, mean=0.0, std=0.02)",
"def reset_tuning(self):\n return",
"def reset_parameters(self):\n\n # Reset fixed embeddings to original value\n if self.args.tune_partial > 0:\n if self.parallel:\n embedding = self.F.module.embedding.weight.data\n fixed_embedding = self.F.module.fixed_embedding\n else:\n embedding = self.F.embedding.weight.data\n fixed_embedding = self.F.fixed_embedding\n\n # Embeddings to fix are the last indices\n offset = embedding.size(0) - fixed_embedding.size(0)\n if offset >= 0:\n embedding[offset:] = fixed_embedding",
"def get_params_to_prune(self) -> Tuple[Tuple[nn.Module, str], ...]:\n return model_utils.get_params(\n self.model,\n ((nn.Conv2d, \"weight\"), (nn.BatchNorm2d, \"weight\"), (nn.Linear, \"weight\")),\n )",
"def freeze_base_model(self):\n for param in self.unispeech.parameters():\n param.requires_grad = False",
"def freeze_feature_encoder(self):\n self.unispeech.feature_extractor._freeze_parameters()",
"def fine_tune(self):\n # Do not train BERT parameters\n for param in self.bert.parameters():\n param.requires_grad = False\n # Train the classifier\n for param in self.fc.parameters():\n param.requires_grad = True",
"def clearStrategies(self) -> None:\n ...",
"def _update_trainable_params(self):\n self._trainable_params = set(self._par_info)",
"def hard_update_critic(self, model):\n for target_param, param in zip(self.critic_target.parameters(), model.parameters()):\n target_param.data.copy_(param.data)",
"def _remove_abnormal_values(self):\n\n adjust_negative_values(\n self.h_temp,\n self.wet_pwet_nodes,\n self.node_east,\n self.node_west,\n self.node_north,\n self.node_south,\n out_f=self.h_temp,\n )\n for i in range(self.number_gclass):\n adjust_negative_values(\n self.Ch_i_temp[i, :],\n self.wet_pwet_nodes,\n self.node_east,\n self.node_west,\n self.node_north,\n self.node_south,\n out_f=self.Ch_i_temp[i, :],\n )\n\n # if self.model == \"4eq\":\n # adjust_negative_values(\n # self.Kh_temp,\n # self.wet_pwet_nodes,\n # self.node_east,\n # self.node_west,\n # self.node_north,\n # self.node_south,\n # out_f=self.Kh_temp,\n # )",
"def subdCleanTopology():\n pass",
"def restore_params(self):\n self.model.load_state_dict(self.params)",
"def _reset_parameters(self):\n torch.nn.init.xavier_normal_(self.initial_embeddings)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract limit clause from SQL statement.
|
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
if idx is not None:
_, token = statement.token_next(idx=idx)
if token:
if isinstance(token, IdentifierList):
# In case of "LIMIT <offset>, <limit>", find comma and extract
# first succeeding non-whitespace token
idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
_, token = token.token_next(idx=idx)
if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
return int(token.value)
return None
|
[
"def add_sql_limit(sql, limit):\n # strip off trialing whitespaces and add limit\n sql = sql.rstrip()\n if sql.endswith(';'):\n sql = sql[:-1]\n sql_with_limit = sql + ' LIMIT %s, %s;' % limit\n return sql_with_limit",
"def _make_limit_clause(limit: int) -> psql.Composed:\n if limit != -1:\n if not isinstance(limit, int):\n raise TypeError(f\"'limit' must be a positive integer. Got {limit}\")\n return psql.SQL(\" LIMIT {limit}\").format(limit=limit)\n return psql.Composed([])",
"def visitLimitClause(self, ctx: MySqlParser.LimitClauseContext) -> SQLToken:\n if ctx.OFFSET():\n offset, limit = CONST(int(ctx.DECIMAL_LITERAL(1).getText())), CONST(int(ctx.DECIMAL_LITERAL(0).getText()))\n elif ctx.COMMA():\n offset, limit = CONST(int(ctx.DECIMAL_LITERAL(0).getText())), CONST(int(ctx.DECIMAL_LITERAL(1).getText()))\n else:\n offset, limit = CONST(0), CONST(int(ctx.DECIMAL_LITERAL(0).getText()))\n return SQLToken(LIMIT, (offset, limit))",
"def prepare_query(column_family, where_clause, limit):\n query = \"SELECT * FROM %s\" % column_family\n if where_clause is not None:\n query += \" WHERE \" + where_clause\n\n query += \" LIMIT %d\" % limit\n\n return query",
"def _render_limit(limit):\n if not limit:\n return ''\n\n return \"LIMIT %s\" % limit",
"def set_or_update_query_limit(self, new_limit: int, force: bool = False) -> str:\n if not self._limit:\n return f\"{self.stripped()}\\nLIMIT {new_limit}\"\n limit_pos = None\n statement = self._parsed[0]\n # Add all items to before_str until there is a limit\n for pos, item in enumerate(statement.tokens):\n if item.ttype in Keyword and item.value.lower() == \"limit\":\n limit_pos = pos\n break\n _, limit = statement.token_next(idx=limit_pos)\n # Override the limit only when it exceeds the configured value.\n if limit.ttype == sqlparse.tokens.Literal.Number.Integer and (\n force or new_limit < int(limit.value)\n ):\n limit.value = new_limit\n elif limit.is_group:\n limit.value = f\"{next(limit.get_identifiers())}, {new_limit}\"\n\n str_res = \"\"\n for i in statement.tokens:\n str_res += str(i.value)\n return str_res",
"def getQueryLimitName(self):\n return DEFAULT_LIMIT_VARIABLE_NAME",
"def getLimit(self):\n return self.limit",
"def _make_slice(\n limit_clause: _LimitOffsetType,\n offset_clause: _LimitOffsetType,\n start: int,\n stop: int,\n) -> Tuple[Optional[ColumnElement[int]], Optional[ColumnElement[int]]]:\n\n # for calculated limit/offset, try to do the addition of\n # values to offset in Python, however if a SQL clause is present\n # then the addition has to be on the SQL side.\n\n # TODO: typing is finding a few gaps in here, see if they can be\n # closed up\n\n if start is not None and stop is not None:\n offset_clause = _offset_or_limit_clause_asint_if_possible(\n offset_clause\n )\n if offset_clause is None:\n offset_clause = 0\n\n if start != 0:\n offset_clause = offset_clause + start # type: ignore\n\n if offset_clause == 0:\n offset_clause = None\n else:\n assert offset_clause is not None\n offset_clause = _offset_or_limit_clause(offset_clause)\n\n limit_clause = _offset_or_limit_clause(stop - start)\n\n elif start is None and stop is not None:\n limit_clause = _offset_or_limit_clause(stop)\n elif start is not None and stop is None:\n offset_clause = _offset_or_limit_clause_asint_if_possible(\n offset_clause\n )\n if offset_clause is None:\n offset_clause = 0\n\n if start != 0:\n offset_clause = offset_clause + start # type: ignore\n\n if offset_clause == 0:\n offset_clause = None\n else:\n offset_clause = _offset_or_limit_clause(\n offset_clause # type: ignore\n )\n\n return limit_clause, offset_clause # type: ignore",
"def get_posts_limit():\n limit = None\n try:\n limit = config.get(consts.FB_QUERY_SECTION, consts.LIMIT)\n except:\n pass\n if limit is None or limit is '':\n limit = consts.DEFAULT_LIMIT\n else:\n try:\n limit = int(limit)\n if limit > 100:\n print (\"ERROR: limit in {} must be an integer between 1 and \"\n \"100\".format(consts.CONFIG_FILE))\n exit(1)\n except:\n print (\"ERROR: limit in {} must be an integer between 1 and \"\n \"100\".format(consts.CONFIG_FILE))\n exit(1)\n if VERBOSE:\n print \"Using limit {} from config.\".format(limit)\n\n return str(limit)",
"def get_sql_statement(self, *_) -> str:\n return self.sql_stmt.format(\n result_limit=self.config.sourceConfig.config.resultLimit,\n filters=self.filters, # pylint: disable=no-member\n )",
"def limit_parse(count='0'):\n index = '0'\n if ',' in count:\n index, count = count.split(',', 1)\n index = int(index)\n count = int(count)\n\n def limiter(entities, indexable=False, environ=None):\n return limit(entities, index=index, count=count)\n\n return limiter",
"def df_collectLimit(df, limit, *cols, sortCol=None):\n if sortCol:\n df = df.sort(sortCol)\n\n if df.count() > limit:\n df = df.limit(limit)\n\n if cols:\n return df.select(*cols).collect()\n return df.collect()",
"def parse_query(self, sql):\n return process_sql.get_sql(self.spider_schema, sql)",
"def test_limit_on_tables():\n sql = 'select width from \"ft:1qpKIcYQMBsXLA9RLWCaV9D0Hus2cMQHhI-ViKHo\" LIMIT '\n err = 'Response was not equal to size of LIMIT'\n limit = 1\n q = SQL2GEE(sql + str(limit))\n assert len(q.response['features']) == limit, err\n limit = 2\n q = SQL2GEE(sql + str(limit))\n assert len(q.response['features']) == limit, err\n limit = 5\n q = SQL2GEE(sql + str(limit))\n assert len(q.response['features']) == limit, err\n return",
"def _offset_or_limit_clause(\n element: _LimitOffsetType,\n name: Optional[str] = None,\n type_: Optional[_TypeEngineArgument[int]] = None,\n) -> ColumnElement[int]:\n return coercions.expect(\n roles.LimitOffsetRole, element, name=name, type_=type_\n )",
"def iterPacketsAfter(self, id, limit=None):\n if limit:\n limit = \"LIMIT %d\" % limit\n else:\n limit = ''\n return self.db.iterDictQuery(\n \"%s WHERE P.source = %d AND P.id > %d ORDER BY P.id %s\" % (\n self.packetQuery, self.id, id, limit))",
"def _reduce_limit(self):\n if \"reduce_limit\" not in self.query_config:\n return None\n if not self.query_config[\"reduce_limit\"]:\n return None\n\n i = self._input_line_length\n return max(200, i / 2)",
"def filter_return_count(f, values):\n # if this raises an error, it will be handled outside this function\n count = f.get('# of results to return\\0')\n if count:\n try:\n count = int(count)\n except ValueError:\n raise ValueError(\n f'enter a valid count (you entered {count})')\n if count < 1:\n raise ValueError(\n 'enter a count > 0 (you entered {count})')\n # this will be passed to the parameterized query\n values.append(str(count))\n return f'LIMIT %s' # SQL for the paratemerized query\n return 'LIMIT 100'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract top clause value from SQL statement.
|
def extract_top_from_query(
statement: TokenList, top_keywords: Set[str]
) -> Optional[int]:
str_statement = str(statement)
str_statement = str_statement.replace("\n", " ").replace("\r", "")
token = str_statement.rstrip().split(" ")
token = [part for part in token if part]
top = None
for i, _ in enumerate(token):
if token[i].upper() in top_keywords and len(token) - 1 > i:
try:
top = int(token[i + 1])
except ValueError:
top = None
break
return top
|
[
"def _extract_limit_from_query(statement: TokenList) -> Optional[int]:\n idx, _ = statement.token_next_by(m=(Keyword, \"LIMIT\"))\n if idx is not None:\n _, token = statement.token_next(idx=idx)\n if token:\n if isinstance(token, IdentifierList):\n # In case of \"LIMIT <offset>, <limit>\", find comma and extract\n # first succeeding non-whitespace token\n idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, \",\"))\n _, token = token.token_next(idx=idx)\n if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:\n return int(token.value)\n return None",
"def get_model_top_1(db_name, img_num, model_name):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT top_1 FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)\n result = cursor.fetchall()\n return result[0][0]",
"def get_model_top_n(db_name, img_num, model_name, n):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT top_' + str(n)\n cmd += ' FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)\n result = cursor.fetchall()\n return result[0][0]",
"def get_model_top_5(db_name, img_num, model_name):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT top_5 FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)\n result = cursor.fetchall()\n return result[0][0]",
"def top_customer():\n\n try: \n top_customer = pd.read_sql(\"\"\"\n SELECT SUM(price), customer_id\n FROM ticket_sales\n GROUP BY customer_id\n ORDER BY SUM(price) DESC\n LIMIT 1;\n \"\"\",\n con=engine)\n print(f'The highest spending customer, Customer {int(top_customer.iloc[0][1])}, spent ${top_customer.iloc[0][0]} in total.')\n\n except SQLAlchemyError as e:\n error = str(e.__dict__['orig'])\n print(type(e))",
"def get_row_with_max_value(self, match_header: str):\n\t\treturn self._database_api.execute_custom_query_one_result('SELECT * FROM ' + self.table_name + ' ORDER BY ' + self.table_name + '.' + match_header + ' DESC LIMIT 1')",
"def top(self) -> int:\n return self.top_ele",
"def top(self, n):\n ttbl = self.order_cols()\n return ttbl.select(range(n+1))",
"def top_row(self):\n return self._row",
"def _getTopSection(self, name):\n section = self.query(name)\n assert len(section) == 1\n return section[0]",
"def top(self) -> int:\n return self.top_element",
"def query_and_return_the_first_row_where(statement):\n\n db = current.db\n s3db = current.s3db\n\n cmd = \"db(%s).select(\\\n limitby=(0,1) ).first()\" % statement\n logger.info(\"Executing query %s\" % cmd)\n\n output = eval(cmd)\n return output",
"def get_top_token(self, col: int) -> int:\n empty_row = self.get_empty(col)\n\n if empty_row == (self.nrows - 1): # ie. the whole column is empty\n return None\n if empty_row is None: # ie. the whole row is full\n return 0\n if empty_row is not None: # ie. token is one row below the empty spot\n return empty_row + 1",
"def top(self, category=0):\n return Top(self.base_url, category)",
"def get_top(self, num: int=10) -> List[Tuple[str, int]]:\n self.db.execute(\"SELECT discord_id, score FROM players ORDER BY score DESC LIMIT ?;\", (num,))\n return self.db.fetchall()",
"def get_top_n(self, num):\n try:\n return self.stack[-1 - num]\n except IndexError:\n print \"there is only {} elements!\".format(len(self.stack))",
"def get_top_point(self):\r\n \r\n top_point = self.curve_vals.loc[str(self.serotype)]['cal1_IgG']\r\n return top_point",
"def _get_bm_top(self, query: List[str]) -> List[List[str]]:\n # sort titles according to score and return indices\n scores = [(score, title) for score, title in zip(self.bm25.get_scores(query), self.corpus)]\n scores = sorted(scores, key=itemgetter(0), reverse=True)\n\n # Return top 2048 for evaluation purpose, cut to half for recommendations to prevent memory errors\n if self.eval:\n try:\n return [title for score, title in scores][:256]\n except IndexError:\n return [title for score, title in scores]\n else:\n try:\n return [title for score, title in scores if score > 0][:1028]\n except IndexError:\n return [title for score, title in scores if score > 0]",
"def _get_top(self) -> \"int\" :\n return _core.TextCommandPalette__get_top(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse the SQL and return the CTE and rest of the block to the caller
|
def get_cte_remainder_query(sql: str) -> Tuple[Optional[str], str]:
cte: Optional[str] = None
remainder = sql
stmt = sqlparse.parse(sql)[0]
# The first meaningful token for CTE will be with WITH
idx, token = stmt.token_next(-1, skip_ws=True, skip_cm=True)
if not (token and token.ttype == CTE):
return cte, remainder
idx, token = stmt.token_next(idx)
idx = stmt.token_index(token) + 1
# extract rest of the SQLs after CTE
remainder = "".join(str(token) for token in stmt.tokens[idx:]).strip()
cte = f"WITH {token.value}"
return cte, remainder
|
[
"def parse_query(self, sql):\n return process_sql.get_sql(self.spider_schema, sql)",
"def parse_select(stream):\n # first apply ()\n stream = group(stream, [Parenthesis])\n \n # then split in select from where for first one\n stream = group_select(stream)\n \n return stream",
"def parse(self):\n if self.query[-1] != ';':\n raise NotImplementedError(\"Semicolon missing\")\n self.query = self.query[:-1]\n keywords = self.separator()\n self.fill_dict(keywords)\n if len(self.info[\"tables\"]) == 0:\n raise NotImplementedError(\"Syntax error in SQL query, no tables mentioned in query\")\n if len(self.info[\"columns\"]) == 0:\n raise NotImplementedError(\"Syntax error in SQL query, no columns or aggregation mentioned to be selcted\")\n if self.info[\"hasgroupby\"] and len(self.info[\"groupby\"]) != 1:\n raise NotImplementedError(\"Syntax error in SQL query, we exactly support one column for GROUP BY\")\n if self.info[\"hasorderby\"] and len(self.info[\"orderby\"]) != 1:\n if len(self.info[\"orderby\"]) > 2 or (\n len(self.info[\"orderby\"]) == 2 and self.info[\"orderby\"][1] != \"ASC\" and self.info[\"orderby\"][\n 1] != \"DESC\"):\n raise NotImplementedError(\"Syntax error in SQL query, we exactly support one column for ORDER BY\")\n else:\n self.info[\"orderbytype\"] = self.info[\"orderby\"][1]\n temp = [self.info[\"orderby\"][0]]\n self.info[\"orderby\"] = temp\n if self.info[\"distinct\"] and (\n len(self.info[\"orderby\"]) > 0 and self.info[\"orderby\"][0] not in self.info[\"columns\"]):\n raise NotImplementedError(\n \"Syntax error in SQL query, DISTINCT used and ORDER BY uses columns not mentioned in SELECT\")\n\n return self.info",
"def transform(cls, sqlbuffer):\n try:\n\n blocks = sqlparse.split(sqlbuffer)\n\n #remove comments and empty statements\n for block in blocks:\n sql = sqlparse.format(block,strip_comments=True)\n \n # If sql is not empty\n if sql: \n \n # if select statemnt add output statement\n stmt = (sqlparse.parse(sql)[0]).get_type().upper()\n\n if (stmt == \"UNKNOWN\"): continue\n\n if (stmt.upper() == r\"SELECT\"):\n sql_stmt = (r'SELECT', sql)\n else:\n sql_stmt = (r'NAN', sql)\n \n yield sql_stmt\n ##c = sqlparse.format(sql, output_format=\"python\")\n #sqls.append(command)\n\n #if (stmt.upper() == r\"SELECT\"):\n # outputfile = resultfile + \"_{0}.xml\".format(len(sqls))\n # output = str(\"OUTPUT TO \") + str(outputfile) + str(\" FORMAT XML\")\n # sqls.append(output)\n \n except Exception as e:\n logging.debug(\"SQLParseError %s, => %s\" %(sqlfile,e.args))\n raise\n except:\n logging.debug(\"Fatal error occured in %s\" %(sqlfile))\n raise",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df",
"def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r",
"def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree",
"def parse(self, line: str) -> Statement:\n\n # handle the special case/hardcoded terminator of a blank line\n # we have to do this before we tokenize because tokenizing\n # destroys all unquoted whitespace in the input\n terminator = None\n if line[-1:] == constants.LINE_FEED:\n terminator = constants.LINE_FEED\n\n command = None\n args = None\n argv = None\n\n # lex the input into a list of tokens\n tokens = self.tokenize(line)\n\n # of the valid terminators, find the first one to occur in the input\n terminator_pos = len(tokens) + 1\n for pos, cur_token in enumerate(tokens):\n for test_terminator in self.terminators:\n if cur_token.startswith(test_terminator):\n terminator_pos = pos\n terminator = test_terminator\n # break the inner loop, and we want to break the\n # outer loop too\n break\n else:\n # this else clause is only run if the inner loop\n # didn't execute a break. If it didn't, then\n # continue to the next iteration of the outer loop\n continue\n # inner loop was broken, break the outer\n break\n\n if terminator:\n if terminator == constants.LINE_FEED:\n terminator_pos = len(tokens)+1\n\n # everything before the first terminator is the command and the args\n argv = tokens[:terminator_pos]\n (command, args) = self._command_and_args(argv)\n # we will set the suffix later\n # remove all the tokens before and including the terminator\n tokens = tokens[terminator_pos+1:]\n else:\n (testcommand, testargs) = self._command_and_args(tokens)\n if testcommand in self.multiline_commands:\n # no terminator on this line but we have a multiline command\n # everything else on the line is part of the args\n # because redirectors can only be after a terminator\n command = testcommand\n args = testargs\n argv = tokens\n tokens = []\n\n # check for a pipe to a shell process\n # if there is a pipe, everything after the pipe needs to be passed\n # to the shell, even redirected output\n # this allows '(Cmd) say hello | wc > countit.txt'\n try:\n # find the first pipe if it exists\n pipe_pos = tokens.index(constants.REDIRECTION_PIPE)\n # save everything after the first pipe as tokens\n pipe_to = tokens[pipe_pos+1:]\n\n for pos, cur_token in enumerate(pipe_to):\n unquoted_token = utils.strip_quotes(cur_token)\n pipe_to[pos] = os.path.expanduser(unquoted_token)\n\n # remove all the tokens after the pipe\n tokens = tokens[:pipe_pos]\n except ValueError:\n # no pipe in the tokens\n pipe_to = None\n\n # check for output redirect\n output = None\n output_to = None\n try:\n output_pos = tokens.index(constants.REDIRECTION_OUTPUT)\n output = constants.REDIRECTION_OUTPUT\n\n # Check if we are redirecting to a file\n if len(tokens) > output_pos + 1:\n unquoted_path = utils.strip_quotes(tokens[output_pos + 1])\n output_to = os.path.expanduser(unquoted_path)\n\n # remove all the tokens after the output redirect\n tokens = tokens[:output_pos]\n except ValueError:\n pass\n\n try:\n output_pos = tokens.index(constants.REDIRECTION_APPEND)\n output = constants.REDIRECTION_APPEND\n\n # Check if we are redirecting to a file\n if len(tokens) > output_pos + 1:\n unquoted_path = utils.strip_quotes(tokens[output_pos + 1])\n output_to = os.path.expanduser(unquoted_path)\n\n # remove all tokens after the output redirect\n tokens = tokens[:output_pos]\n except ValueError:\n pass\n\n if terminator:\n # whatever is left is the suffix\n suffix = ' '.join(tokens)\n else:\n # no terminator, so whatever is left is the command and the args\n suffix = None\n if not command:\n # command could already have been set, if so, don't set it again\n argv = tokens\n (command, args) = self._command_and_args(argv)\n\n # set multiline\n if command in self.multiline_commands:\n multiline_command = command\n else:\n multiline_command = None\n\n # build the statement\n # string representation of args must be an empty string instead of\n # None for compatibility with standard library cmd\n statement = Statement('' if args is None else args,\n raw=line,\n command=command,\n args=args,\n argv=list(map(lambda x: utils.strip_quotes(x), argv)),\n multiline_command=multiline_command,\n terminator=terminator,\n suffix=suffix,\n pipe_to=pipe_to,\n output=output,\n output_to=output_to,\n )\n return statement",
"def tr_sql_parser(file_input):\n\n declares = []\n sets = []\n wheres = []\n comments = []\n output = []\n with open(file_input, \"r\") as f:\n data = f.read()\n\n for line in data.split('\\n'):\n if line.startswith('DECLARE'):\n declares.append(line)\n elif line.startswith('SET'):\n sets.append(line)\n elif line.startswith('WHERE'):\n wheres.append(line)\n elif line.startswith('--'):\n comments.append(line)\n else:\n output.append(line)\n fields = [field.split('@')[1] for field in wheres]\n return declares, sets, fields, comments, output",
"def _parse(self):\n logger.debug('Parsing file: %s', self.filename)\n self._context = []\n self._last_popped = None\n self.statement_pre_read = None\n self.sw = None\n while self.can_read():\n token = self.next_token()\n if token is None:\n continue\n if token.model is None:\n continue\n if self.find_context_top(cond=lambda x: x != token and x.isinstance(CodeBlock)) is None:\n # this token model has no parents, we must save it separately\n self._save_model(token.model)\n self.parsed = True",
"def select(self, query):\n # start\n yield \"SELECT\"\n # prepare to render the field projection\n self.indent(increment=2)\n # if the query is a table specification\n if isinstance(query, self.schemer):\n # no projection\n yield self.place(\"*\")\n # push out\n self.outdent()\n # render the table name\n yield self.place(\"FROM {};\".format(query.pyre_name))\n # push out\n self.outdent()\n # all done\n return\n\n # native queries\n if isinstance(query, self.selector) or isinstance(query, self.query):\n # figure out how many field references there are\n fields = len(query.pyre_fields)\n # build the projection\n for index, entry in enumerate(query.pyre_fields):\n # do we need a comma?\n comma = ',' if index+1 < fields else ''\n # render this field\n yield self.place(\"{} AS {}{}\".format(self.expression(entry), entry.name, comma))\n # push out\n self.outdent()\n\n # render the {FROM} section\n yield self.place(\"FROM\")\n # do we have other clauses following the {FROM} section\n otherClauses = query.where or query.order or query.group\n # push in\n self.indent()\n # figure out how many table references there are\n tables = len(query.pyre_tables)\n # render the tables\n for index, tableName in enumerate(sorted(query.pyre_tables.keys())):\n # get the table\n table = query.pyre_tables[tableName]\n # do we need a terminator?\n # if we have more tables\n if index + 1 < tables:\n # make it a comma\n terminator = ','\n # if there are no other clauses in the query\n elif not otherClauses:\n # wrap up\n terminator = ';'\n # otherwise\n else:\n # leave blank\n terminator = ''\n # do we need to rename the table?\n if tableName == table.pyre_name:\n # no\n yield self.place(\"{}{}\".format(table.pyre_name, terminator))\n # otherwise\n else:\n # build a local alias for the table name\n yield self.place(\"{} AS {}{}\".format(\n table.pyre_name, tableName, terminator))\n\n # render the {WHERE} clause\n if query.where is not None:\n # do we have other clauses following the {FROM} section\n otherClauses = query.order or query.group\n # build a terminator\n terminator = '' if otherClauses else ';'\n # push out\n self.outdent()\n # build the filtering expression\n predicate = self.expression(root=query.where, context=query)\n # render the {WHERE} marker\n yield self.place(\"WHERE\")\n # push in\n self.indent()\n # and render the expression\n yield self.place(\"({}){}\".format(predicate, terminator))\n\n # render the {ORDER BY} clause\n order = query.order\n # if it exists\n if order is not None:\n # if it is not an iterable\n if not isinstance(order, collections.abc.Iterable):\n # make it one\n order = order,\n # push out\n self.outdent()\n # render the {ORDER BY} marker\n yield self.place(\"ORDER BY\")\n # push in\n self.indent()\n # build the collation expression\n collation = (self.expression(root=spec, context=query) for spec in order)\n # and render it\n yield self.place(\"{};\".format(\", \".join(collation)))\n\n # push out\n self.outdent(decrement=2)\n # all done\n return\n\n # all done\n return",
"def compileStatement(self):\n if self.token() == 'do':\n self.compileDo()\n elif self.token() == 'let':\n self.compileLet()\n elif self.token() == 'if':\n self.compileIf()\n elif self.token() == 'while':\n self.compileWhile()\n elif self.token() == 'return':\n self.compileReturn()",
"def parse_compound_statement(self):\n location = self.consume(\"{\").loc\n self.semantics.enter_compound_statement(location)\n while self.peek != \"}\":\n self.parse_statement_or_declaration()\n self.consume(\"}\")\n return self.semantics.on_compound_statement(location)",
"def _Simplify(self, parser_return):\n if parser_return.tree:\n return self._SimplifyNode(query_parser.SimplifyNode(parser_return.tree))\n return parser_return",
"def _form_query_from_data(self, row, parsed):\n d = { k:row[k] for k in row.keys() }\n q = Query(row[\"text\"], row[\"time\"])\n q.__dict__.update(d)\n if parsed:\n q.parsetree = ParseTreeNode.loads(row[\"parsetree\"])\n return q",
"def parseMT(self):\n print(\"starting\");\n ans = RowBox()\n if self.getStart():\n print(\"Found start\")\n nends = 0\n while self.checkNext(): \n print(\"Starting interpreter\")\n mb = self.nextRecord(True)\n if mb != None: #if this is true, parseMT terminates: we come here only once\n if self.recType == 1: # type LINE add all children of mb to ans\n for nmb in mb.c :\n if not nmb.isEmpty():\n ans.addChild(nmb)\n ## if(endct == 0) return ans;\n self.endct -= 1\n #elif self.subType == 1: # take last element of ans, put it in a rowbox, replace first of mb with the rowbox, finally insert mb in ans\n## used for adding exponent and index to elem\n #zb = ans.c.removeLast() #? is ans ever non-empty, here???\n #zbnew = RowBox()\n #zbnew.addChild(zb)\n #lb = mb\n #lb.c.remove(0)\n #lb.c.add(0, zbnew)\n #ans.addChild(mb)\n else: # add mb (as a block) as a single child of ans \n ans.addChild(mb)\n return ans #\n if self.recType == 0: #mb == None, if we find more than 6, stop\n nends += 1\n if nends > 6: \n return ans\n return ans #we've hit end of file",
"def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)",
"def generate_query(self):\n self.query = self._add_select_statement() +\\\n self._add_case_statement() +\\\n self._add_from_statement() +\\\n self._add_group_by_statement()\n\n return self.query"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Strips comments from a SQL statement, does a simple test first to avoid always instantiating the expensive ParsedQuery constructor This is useful for engines that don't support comments
|
def strip_comments_from_sql(statement: str) -> str:
return ParsedQuery(statement).strip_comments() if "--" in statement else statement
|
[
"def CleanSQL(self, sql):\n sql = self.cleanComments(sql)\n\n return sql",
"def clean_kql_query(query_string: str) -> str:\n remove_comments = re.sub(r\"(//[^\\\"\\'\\n]+)\", \" \", query_string, re.MULTILINE).strip()\n # get rid of newlines and returns\n return re.sub(r\"(\\s*\\n\\s*)\", \" \", remove_comments)",
"def test_skipComments(self):\r\n self.spitter.visitNode(Comment('foo'))\r\n self.assertNotIn('foo', ''.join(self.output))",
"def __parse_comment(gbp):\n # Only eat the comment when it is available\n gbp.handle_multiline_keyword('COMMENT', do_split=False,\n remove_keyword=False, raise_error=False)\n gbp.handle_multiline_keyword('PRIMARY', do_split=False,\n remove_keyword=False, raise_error=False)\n return []",
"def clean_comment(comment):\n return comment.strip(\"# \")",
"def _split_sql_script(self, sql):\n lines = list()\n queries = sql.split(';')\n queries = [self._remove_comments(q) for q in queries if len(q.strip()) > 0]\n return queries",
"def cleanup_comment(raw_comment):\n def pop_prepending_empty_lines(lines):\n first_non_empty_line_idx = 0\n for line in lines:\n if line == '':\n first_non_empty_line_idx += 1\n else:\n break\n return lines[first_non_empty_line_idx:]\n\n import string\n lines = raw_comment.split('\\n')\n chars_to_strip = '/' + '*' + '!' + string.whitespace\n lines = [line.lstrip(chars_to_strip) for line in lines]\n lines = pop_prepending_empty_lines(lines)\n clean_lines = []\n is_brief_comment = True\n for line in lines:\n if line == '' and is_brief_comment:\n # Skip lines that belong to brief comment.\n is_brief_comment = False\n continue\n if is_brief_comment:\n continue\n clean_lines.append(line)\n return '\\n'.join(clean_lines)",
"def _drop_sink_comment(self, comment):\n lines = comment.splitlines()\n if len(lines) > 2 and self._RE_VIA_COMMENT.match(lines[1]):\n result = lines[:2]\n for line in lines[2:]:\n if self._RE_CONSTRAINT_COMMENT.match(line):\n continue\n result.append(line)\n return \"\\n\".join(self._collapse_single_via(result))\n return comment",
"def _extract_ignore_from_comment(\n cls,\n comment: RawSegment,\n reference_map: Dict[str, Set[str]],\n ):\n # Also trim any whitespace afterward\n comment_content = comment.raw_trimmed().strip()\n comment_line, comment_pos = comment.pos_marker.source_position()\n result = cls._parse_noqa(\n comment_content, comment_line, comment_pos, reference_map\n )\n if isinstance(result, SQLParseError):\n result.segment = comment\n return result",
"def remove_comments(source):\n return re.sub(r\";.*\\n\", \"\\n\", source)",
"def remove_commentlines(self):\n\n tmp = self.main.splitlines()\n tmp = list(itertools.filterfalse(re.compile(r\"^\\s*%.*$\").match, tmp))\n self.main = \"\\n\".join(tmp)",
"def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)",
"def build_query(query_section_text):\n query_section_text = remove_comments(query_section_text)\n return query_section_text.rstrip(\"\\n;\")",
"def comment(s):\n return '\\n'.join('// ' + line if line else '' for line in s.split('\\n'))",
"def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()",
"def _strip_comments(code):\n return re.sub(r'(?m)^ *#.*\\n?', '', code)",
"def is_empty_statement(s):\n if not s:\n return True\n p = sqlparse.parse(s)[0]\n t = p.tokens[0]\n is_a_comment = t.ttype is not None and (t.ttype.parent == sqlparse.tokens.Comment)\n if t.ttype and is_a_comment:\n return True",
"def extract_comments(self, sid, text):\n pass",
"def get_comment_text():\n first = comment_start + len(lang.comment_start)\n return line[first:]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list of SQL statements as strings, stripped
|
def get_statements(self) -> List[str]:
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(" \n;\t")
if sql:
statements.append(sql)
return statements
|
[
"def format_sql_statements(sql_statements):\n sql_statements = sql_statements.strip()\n # Create a list of SQL statements with delimiter as \";\"\n sql_statements = sqlparse.split(SQL_STMNTS)\n # print \"sqls\",sql_statements\n\n for i, sql_statement in enumerate(sql_statements):\n sql_statements[i] = sql_statement.strip()\n\n for i, sql_statement in enumerate(sql_statements):\n # Format the SQL Statement by making all the Keywords uppercase\n # Keywords --> SELECT, WHERE, FROM\n if sql_statement[-1] == ';':\n sql_statements[i] = sql_statement[:-1]\n sql_statement = sql_statements[i]\n sql_statements[i] = sqlparse.format(sql_statement, keyword_case='upper')\n sql_statements[i] = sqlparse.parse(sql_statements[i])[0]\n # print sql_statements[i].tokens\n return sql_statements",
"def sql(self, dialect=None, inserts=False, creates=True,\n drops=True, metadata_source=None):\n result = [self.ddl(dialect, creates=creates, drops=drops)]\n if inserts:\n for row in self.inserts(dialect):\n result.append(row)\n return '\\n'.join(result)",
"def _split_sql_script(self, sql):\n lines = list()\n queries = sql.split(';')\n queries = [self._remove_comments(q) for q in queries if len(q.strip()) > 0]\n return queries",
"def Print_pretty_sql(self, sqlList,):\n return \" \\n\".join(sqlList)",
"def get_sql_commands(self):\r\n\t\tquery = 'SELECT * FROM sqlite_master'\r\n\t\tsql_commands = []\r\n\t\tfor rec in self.query_generic(query):\r\n\t\t\tsql_commands.append(rec['sql'])\r\n\t\treturn sql_commands",
"def getSqls(file):\n if isinstance(file, io.IOBase):\n sqls = file.read().split(\"\\n\")\n file.close()\n return sqls",
"def strip_comments_from_sql(statement: str) -> str:\n return ParsedQuery(statement).strip_comments() if \"--\" in statement else statement",
"def sql(content):\n return [item for item in content if item.extension.lower() == 'sql']",
"def compiled_sql(self):\n return self._compiled_sql",
"def get_sql_statement(self, *_) -> str:\n return self.sql_stmt.format(\n result_limit=self.config.sourceConfig.config.resultLimit,\n filters=self.filters, # pylint: disable=no-member\n )",
"def generate_sqls(job_ids: list) -> list:\n sqls = []\n try:\n for job_id in job_ids:\n sql = \"SELECT * FROM JobsInfo WHERE JobId='\" + job_id + \"'\"\n sqls.append(sql)\n except Exception as err:\n logging.error(f\"query_jobdata : generate_sqls: cannot generate sql strings: {err}\")\n\n return sqls",
"def sql(self):\n return self._sql",
"def CleanSQL(self, sql):\n sql = self.cleanComments(sql)\n\n return sql",
"def get_sqls(table_name):\n return {\n \"prepare_check\": \"SELECT relname FROM pg_class WHERE relkind='r' and relname='{0}';\".format(table_name),\n \"prepare_create\": \"create table {0} (kv_namespace VARCHAR(50), kv_key VARCHAR(100), kv_value VARCHAR(4000), kv_timestamp timestamp with time zone, primary key(kv_namespace, kv_key))\".format(table_name),\n \"get\": \"select kv_value from {0} where kv_namespace=%s and kv_key=%s\".format(table_name),\n \"get_all\": \"select kv_key, kv_value from {0} where kv_namespace=%s\".format(table_name),\n \"keys\": \"select kv_key from {0} where kv_namespace=%s\".format(table_name),\n \"set\": \"\"\"insert into {0} (kv_namespace, kv_key, kv_value, kv_timestamp) values (%s,%s,%s,%s) \n on conflict on constraint {0}_pkey\n do update set kv_namespace=%s, kv_key=%s, kv_value=%s, kv_timestamp=%s\"\"\".format(table_name),\n \"remove\": \"delete from {0} where kv_namespace=%s and kv_key=%s\".format(table_name),\n \"remove_all\": \"delete from {0} where kv_namespace=%s\".format(table_name),\n }",
"def sql(self, quoted=True):\n s = fixed_for_sql(self[:255])\n return (\"'%s'\" % s) if quoted else s",
"def queryset_to_sql(queryset):\n # Do imports here to avoid dependencies\n import sqlparse\n from django.db import connection\n\n # Compile the query to python db api\n sql, sql_params = queryset.query.get_compiler(using=queryset.db).as_sql()\n\n # Translate the python query spec into a postgres query\n with connection.cursor() as cur:\n query = cur.mogrify(sql, sql_params)\n\n # Make the query pretty and return it\n query = sqlparse.format(query, reindent=True, keyword_case='upper')\n return query",
"def raw_sql(s):\n if isinstance(s, (str, Promise)):\n return RawSql(s)\n return RawSql(str(s))",
"def get_query_sql(query=None, /, *, literal_binds: bool = True,\n pretty: bool = False):\n if query is None:\n from .. import queries\n\n query = queries.get_example_query()\n\n compiled = _backend.expression_compile(query, literal_binds=literal_binds)\n result = compiled.string\n\n if pretty and _backend.sqlparse is not None:\n result = _backend.sqlparse.format(result, reindent=True)\n return result",
"def get_col_write_statements(self):\n s=[]\n for i,f in enumerate(self.fields):\n s.append(f.get_write_statement(i))\n\n return '\\n '.join(s)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the query with the specified limit. Does not change the underlying query if user did not apply the limit, otherwise replaces the limit with the lower value between existing limit in the query and new_limit.
|
def set_or_update_query_limit(self, new_limit: int, force: bool = False) -> str:
if not self._limit:
return f"{self.stripped()}\nLIMIT {new_limit}"
limit_pos = None
statement = self._parsed[0]
# Add all items to before_str until there is a limit
for pos, item in enumerate(statement.tokens):
if item.ttype in Keyword and item.value.lower() == "limit":
limit_pos = pos
break
_, limit = statement.token_next(idx=limit_pos)
# Override the limit only when it exceeds the configured value.
if limit.ttype == sqlparse.tokens.Literal.Number.Integer and (
force or new_limit < int(limit.value)
):
limit.value = new_limit
elif limit.is_group:
limit.value = f"{next(limit.get_identifiers())}, {new_limit}"
str_res = ""
for i in statement.tokens:
str_res += str(i.value)
return str_res
|
[
"def add_sql_limit(sql, limit):\n # strip off trialing whitespaces and add limit\n sql = sql.rstrip()\n if sql.endswith(';'):\n sql = sql[:-1]\n sql_with_limit = sql + ' LIMIT %s, %s;' % limit\n return sql_with_limit",
"def setLimit(self, limit):\n self.limit = limit\n return self",
"def _make_limit_clause(limit: int) -> psql.Composed:\n if limit != -1:\n if not isinstance(limit, int):\n raise TypeError(f\"'limit' must be a positive integer. Got {limit}\")\n return psql.SQL(\" LIMIT {limit}\").format(limit=limit)\n return psql.Composed([])",
"def limit_queryset(self):\n self.queryset = self.queryset.limit(self.limit)",
"def set_sync_limit(self, limit: int) -> Optional[int]:\n try:\n prev_limit = json.loads(self.sync_filter)['room']['timeline']['limit']\n except (json.JSONDecodeError, KeyError):\n prev_limit = None\n self.sync_filter = json.dumps({'room': {'timeline': {'limit': limit}}})\n return prev_limit",
"def getQueryLimitName(self):\n return DEFAULT_LIMIT_VARIABLE_NAME",
"def set_limit(self, limit):\n self.limits[self.api_key] = limit",
"def change_limiter(self, limiter, new_value=None):\n self.num_limit=limit.as_limiter(limiter)\n if new_value is None:\n new_value=self._value\n new_value=self._coerce_value(new_value,coerce_on_limit=True)\n if new_value!=self._value:\n self.set_value(new_value)",
"def _render_limit(limit):\n if not limit:\n return ''\n\n return \"LIMIT %s\" % limit",
"def copy(self) -> \"Limit\":\n return Limit(\n self.scan_limit,\n self.item_limit,\n self.min_scan_limit,\n self.strict,\n self.filter,\n )",
"def truncate(self, limit):\n if self.length() > limit:\n return self.normalize() * limit\n return self",
"def _calculate_limit(self, default_limit, max_limit):\n if self._limit is None:\n return default_limit\n\n return min(self._limit, max_limit)",
"def withLimitedSearch(self, searchTerms, limit):\n\t\treturn 'search='+searchTerms+'&limit='+limit+'&'",
"def fix_limit(limit):\n if limit:\n try:\n if int(limit) > 10000:\n return 10000\n return int(limit)\n except Exception:\n pass\n return 10",
"def set_limit(context, site, limit, value):\n _set_limits(context, site, ((limit, value),))",
"def test_limit(self):\n with database() as db:\n db.query('INSERT INTO test_data (variable) VALUES (1), (2), (3), (4), (5)')\n result = db.query('SELECT * FROM test_data', limit=1)\n self.assertEqual(result, [(1,)])\n result = db.query('SELECT * FROM test_data', limit=3)\n self.assertEqual(result, [(1,), (2,), (3,)])\n result = db.query('SELECT * FROM test_data')\n self.assertEqual(result, [(1,), (2,), (3,), (4,), (5,)])",
"def getLimit(self):\n return self.limit",
"def setChangeLimit(limit: 'int const') -> \"int\":\n return _coin.SoGLBigImage_setChangeLimit(limit)",
"def limit_maximum_flux(model, new_limit):\n\n if new_limit < 0:\n new_limit = new_limit * -1\n\n old_limit = model.maximum_flux\n\n if old_limit > new_limit:\n for rr in model.reactions.values():\n\n if abs(rr.upper_bound) > new_limit:\n sign = 1 if rr.upper_bound >= 0 else -1\n rr.upper_bound = new_limit*sign\n\n if abs(rr.lower_bound) > new_limit:\n sign = 1 if rr.lower_bound >= 0 else -1\n rr.lower_bound = new_limit*sign\n else:\n for rr in model.reactions.values():\n\n if abs(rr.upper_bound) == old_limit:\n sign = 1 if rr.upper_bound >= 0 else -1\n rr.upper_bound = new_limit*sign\n\n if abs(rr.lower_bound) > old_limit:\n sign = 1 if rr.lower_bound >= 0 else -1\n rr.lower_bound = new_limit*sign\n\n model._calc_max_flux()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all the dependencies from a SQL sql_text.
|
def extract_table_references(
sql_text: str, sqla_dialect: str, show_warning: bool = True
) -> Set["Table"]:
dialect = "generic"
tree = None
if sqloxide_parse:
for dialect, sqla_dialects in SQLOXITE_DIALECTS.items():
if sqla_dialect in sqla_dialects:
break
sql_text = RE_JINJA_BLOCK.sub(" ", sql_text)
sql_text = RE_JINJA_VAR.sub("abc", sql_text)
try:
tree = sqloxide_parse(sql_text, dialect=dialect)
except Exception as ex: # pylint: disable=broad-except
if show_warning:
logger.warning(
"\nUnable to parse query with sqloxide:\n%s\n%s", sql_text, ex
)
# fallback to sqlparse
if not tree:
parsed = ParsedQuery(sql_text)
return parsed.tables
def find_nodes_by_key(element: Any, target: str) -> Iterator[Any]:
"""
Find all nodes in a SQL tree matching a given key.
"""
if isinstance(element, list):
for child in element:
yield from find_nodes_by_key(child, target)
elif isinstance(element, dict):
for key, value in element.items():
if key == target:
yield value
else:
yield from find_nodes_by_key(value, target)
return {
Table(*[part["value"] for part in table["name"][::-1]])
for table in find_nodes_by_key(tree, "Table")
}
|
[
"def _split_sql_script(self, sql):\n lines = list()\n queries = sql.split(';')\n queries = [self._remove_comments(q) for q in queries if len(q.strip()) > 0]\n return queries",
"def _get_dependencies(pkgbuild):\n\n print(f\"Getting all dependencies within PKGBUILD file\")\n dependencies = []\n\n within_depends = False\n for line in pkgbuild.split('\\n'):\n\n # Remove any unnecessary whitespace\n line = line.strip()\n\n # Search until we find depends\n if not within_depends and line.startswith('depends'):\n within_depends = True\n continue\n\n # Extract the packages\n if within_depends and line != ')':\n # Remove comments\n pkgs = [pkg for pkg in re.sub('#.*', '', line).strip().split(' ')\n if len(pkg) > 0]\n dependencies.extend(pkgs)\n\n # Continue until the closing bracket\n if within_depends and line == ')':\n within_depends = False\n\n print(f\"Pulled {len(dependencies)} dependencies\")\n return dependencies",
"def getDependencies( fileContent ):\n Any.requireIsTextNonEmpty( fileContent )\n\n depList = []\n regexp = re.compile( r\"^\\s*bst_find_package\\s*\\((.*)\\)\\s*$\" )\n\n for line in fileContent.splitlines():\n tmp = regexp.search( line )\n\n if tmp:\n # remove trailing slashes if present, e.g.:\n # bst_find_package(Libraries/Data/C-common/RoadPoints/1.0/)\n # because it violates the regexp for canonical paths\n data = tmp.group(1)\n data = data[:-1] if data[-1] == '/' else data\n\n depList.append( 'sit://' + data )\n\n return depList",
"def get_dependendents(word_id, dependencies, dependency):\n dependent_words = []\n for _dependency, word, dependent in dependencies:\n if word == word_id and _dependency in dependency:\n dependent_words.append(dependent)\n return dependent_words",
"def GerritDependencies(self):\n results = []\n for d in self.patch_dict.get('dependsOn', []):\n gerrit_number = d.get('number')\n if gerrit_number is not None:\n gerrit_number = ParseGerritNumber(gerrit_number, error_ok=False)\n\n change_id = d.get('id')\n if change_id is not None:\n change_id = ParseChangeID(change_id, error_ok=False)\n\n sha1 = d.get('revision')\n if sha1 is not None:\n sha1 = ParseSHA1(sha1, error_ok=False)\n\n if not gerrit_number and not change_id and not sha1:\n raise AssertionError(\n 'While processing the dependencies of change %s, no \"number\", \"id\",'\n ' or \"revision\" key found in: %r' % (self.gerrit_number, d))\n\n results.append(\n PatchQuery(self.remote, project=self.project,\n tracking_branch=self.tracking_branch,\n gerrit_number=gerrit_number,\n change_id=change_id, sha1=sha1))\n return results",
"def populate_sql_with_dependency_specifiers(deps, db_fname=None):\n log = depresolve.logging.getLogger('populate_sql_with_dependency_specifiers')\n log.info(\"Initializing db\")\n\n # Initialize the sqlite3 database that will be populated with dependency\n # information as interpreted from the json files above.\n initialize(db_fname)\n\n for distkey in deps:\n log.info(\"Working through \" + distkey + \"'s dependencies.\")\n\n assume_dep_data_exists_for(distkey, deps)\n\n if not deps[distkey]:\n log.info(distkey + ' has no dependencies. Adding to that table.')\n\n for dep in deps[distkey]: # for every one of its dependencies,\n satisfying_packagename = dep[0]\n spectuples = dep[1]\n specstring = spectuples_to_specstring(spectuples)\n\n log.info(\" satisfying_packagename:\" + satisfying_packagename)\n log.info(\" specstring: \" + specstring)\n\n add_to_table(\n SQL_DEP_SPECIFIER_TABLE,\n distkey,\n satisfying_packagename,\n specstring)\n\n flush()",
"def sql(content):\n return [item for item in content if item.extension.lower() == 'sql']",
"def clean_and_split_sql_v2(sql: str) -> List[str]:\n sql_tokens: List[str] = []\n # fixes that were seen in training data\n if re.findall(r\"SELECT DISTINCT\\s*\\(\\s*[A-Z_]+alias[0-9]\\.[A-Z_]+\\s*\\)\", sql):\n sql = re.sub(r\"SELECT DISTINCT\\s*\\(\\s*([A-Z_]+alias[0-9]\\.[A-Z_]+)\\s*\\)\", r\"SELECT DISTINCT \\g<1>\", sql)\n sql = fix_specific_examples(sql)\n # tokenize\n for token in sql.strip().split():\n token = token.replace('\"', \"'\").replace(\"%\", \"\").replace('(', ' ( ').replace(\",\", \" , \")\n sql_tokens.extend(token.strip().split())\n\n return sql_tokens",
"def dependency_lines(self):\n deps = sorted(self._dependencies_by_address.values(), key=lambda d: d.spec)\n\n def dep_lines():\n yield '{}dependencies = ['.format(' ' * self._indent)\n for dep in deps:\n for line in dep.lines():\n yield line\n yield '{}],'.format(' ' * self._indent)\n return list(dep_lines()) if deps else []",
"def search_dependencies(self):\n result = [self.module.name]\n #First we look at the explicit use references from this module and all\n #its dependencies until the chain terminates.\n stack = self.needs\n while len(stack) > 0:\n module = stack.pop()\n if module in result:\n continue\n \n self.parent.load_dependency(module, True, True, False)\n if module in self.parent.modules:\n for dep in self.parent.modules[module].needs:\n modname = dep.split(\".\")[0]\n if modname not in result:\n result.append(modname)\n if modname not in stack:\n stack.append(modname)\n\n #Add any incidentals from the automatic construction of code. These can be from\n #executables that use special precision types without importing them or from\n #derived types. Same applies to the local members of this module.\n for ekey, anexec in list(self.executables.items()):\n for dep in anexec.search_dependencies():\n if dep is not None and dep not in result:\n result.append(dep)\n\n for member in list(self.members.values()):\n dep = member.dependency()\n if dep is not None and dep not in result:\n result.append(dep)\n \n return result",
"def mkl_deps():\n return select({\n str(Label(\"//third_party/mkl_dnn:build_with_mkl_dnn_only\")): [\"@mkl_dnn\"],\n str(Label(\"//third_party/mkl_dnn:build_with_mkl_dnn_v1_only\")): [\"@mkl_dnn_v1//:mkl_dnn\"],\n str(Label(\"//third_party/mkl:build_with_mkl_ml_only\")): [\"//third_party/mkl:intel_binary_blob\"],\n str(Label(\"//third_party/mkl:build_with_mkl\")): [\n \"//third_party/mkl:intel_binary_blob\",\n \"@mkl_dnn\",\n ],\n \"//conditions:default\": [],\n })",
"def GetPaladinDeps(commit_message):\n PALADIN_DEPENDENCY_RE = re.compile(r'^([ \\t]*CQ.?DEPEND.)(.*)$',\n re.MULTILINE | re.IGNORECASE)\n PATCH_RE = re.compile('[^, ]+')\n EXPECTED_PREFIX = 'CQ-DEPEND='\n matches = PALADIN_DEPENDENCY_RE.findall(commit_message)\n dependencies = []\n for prefix, match in matches:\n if prefix != EXPECTED_PREFIX:\n msg = 'Expected %r, but got %r' % (EXPECTED_PREFIX, prefix)\n raise ValueError(msg)\n for chunk in PATCH_RE.findall(match):\n chunk = ParsePatchDep(chunk, no_sha1=True)\n if chunk not in dependencies:\n dependencies.append(chunk)\n return dependencies",
"def build_deps(dep_workbench, dependent_object):\n core = dep_workbench.get_plugin('enaml.workbench.core')\n dep = core.invoke_command(ANALYSE, {'obj': dependent_object})\n return dep.dependencies",
"def dep_calculater(sentence, nlp_module):\n nlp = nlp_module\n doc = nlp(sentence)\n sent_dep_list=[]\n for token in doc:\n sent_dep_list.append(token.dep_)\n return(sent_dep_list)",
"def _find_dependency_libraries(self):\n\n libs = []\n # Find the .dynamic section\n dsectionh = self._find_section('.dynamic')\n if dsectionh == None:\n return libs\n\n # compile list of needed libraries\n self.seek(dsectionh['offset'],0)\n for i in xrange(0,dsectionh['size'],dsectionh['entsize']):\n # tag value of 1 means the resource is 'needed'\n self.seek(dsectionh['offset'] + i,0)\n tag = self.le_sxword()\n if tag == 1:\n # Next is string table index of needed library name\n # union(word,addr) in 32-bit, union(xword,addr) in 64-bit\n strndx = self.le_addr()\n self.seek(self.dynstrh['offset']+strndx,0)\n libs.append(self.read_to_null())\n\n return libs",
"def get_tokens(sql):\n ddl_keyword = \"\"\n dml_keyword = \"\"\n none_identifiers = []\n wildcard_token = \"\"\n keywords = []\n aggregate_keyword = \"\"\n\n for token in sql.tokens:\n # print token.ttype, token.value\n if token.ttype == Keyword.DDL:\n ddl_keyword = token.value.upper()\n elif token.ttype == Keyword.DML:\n dml_keyword = token.value.upper()\n elif token.ttype == Wildcard:\n wildcard_token = token.value\n elif token.ttype == Keyword:\n keywords.append(token.value)\n elif token.ttype is None:\n none_identifiers.append(token.value)\n\n if wildcard_token != \"*\":\n if len(none_identifiers[0].split(\"(\")) > 1:\n aggregate_keyword = none_identifiers[0].split(\"(\")[0].upper()\n return ddl_keyword, dml_keyword, none_identifiers, wildcard_token, keywords, aggregate_keyword",
"def getOrder(text):\n \"\"\"Loop through while there still exist dependencies, do the first element with no dependencies left\"\"\"\n dependencies = [' ']*26\n finalOrder = ''\n alphaDict = {k: v for v, k in enumerate(string.ascii_uppercase)}\n \n #Value v at position p in dependencies: steps v to be completed before p\n for line in text:\n dependencies[alphaDict[line[7]]] += line[1]\n\n #26 iterations sufficient to find order, remove 'no dependencies', update rest accordingly\n for x in range(26):\n for value, dependency in enumerate(dependencies):\n if dependency == ' ':\n finalOrder += string.ascii_uppercase[value]\n dependencies[value] = '0'\n #Remove all instances of the corresponding letter from all dependencies\n for y in range(26):\n dependencies[y] = dependencies[y].replace(string.ascii_uppercase[value], '')\n break\n \n return finalOrder",
"def get_dependencies(self):\n definition = self.column_json\n dependencies = []\n\n if self.is_op_calc():\n # This table has to be populated\n dependencies.append(self.table)\n\n # Input column objects for which we need to find definitions\n inputs = self.get_inputs()\n dependencies.extend(self.table.get_definitions_for_columns(inputs))\n\n # Remove self-dependency\n dependencies = [x for x in dependencies if x != self]\n\n # TODO: input columns can be column paths\n\n elif self.is_op_link():\n # This (fact) table has to be populated\n dependencies.append(self.table)\n\n # Input (fact table) columns or column paths have to be evaluated\n main_keys = definition.get('keys', [])\n dependencies.extend(self.table.get_definitions_for_columns(main_keys))\n\n # Target (linked) table has to be populated\n linked_table_name = definition.get('linked_table', '')\n linked_table = self.table.workflow.get_table(linked_table_name)\n dependencies.append(linked_table)\n\n # Target columns have to be evaluated in order to contain values. However, they are supposed to be attributes and hence they will be set during population.\n # If we can link to derived columns, then they have to be populated. It might be reasonable, if some attributes are transformed into another form, say date/time conversion to an interval id.\n linked_keys = definition.get('linked_keys', [])\n dependencies.extend(linked_table.get_definitions_for_columns(linked_keys))\n\n elif self.is_op_compose():\n # This (main) table has to be populated\n dependencies.append(self.table)\n\n inputs = definition['inputs']\n # TODO: Validty check. Two elements must be provided in a compose column. (If not two, then they had to be converted to only two by merging them.)\n\n # Link column (first segment) has to be evaluated\n link_column_name = next(iter(inputs), None)\n link_column_definitions = self.table.get_definitions_for_columns(link_column_name)\n link_column_definition = next(iter(link_column_definitions), None)\n dependencies.append(link_column_definition)\n\n # Linked column path (tail) in the linked table has to exist (recursion)\n linked_table_name = link_column_definition.column_json['linked_table']\n linked_table = self.table.workflow.get_table(linked_table_name)\n linked_column_name = inputs[1] if len(inputs) > 1 else None\n\n linked_column_definitions = linked_table.get_definitions_for_columns(linked_column_name)\n linked_column_definition = next(iter(linked_column_definitions), None)\n if linked_column_definition: # A linked column might not have a definition, e.g., an attribute\n dependencies.append(linked_column_definition)\n # Here we assume that the tail dependencies will be retrieved separately.\n # Alternatively, we could retrieve them here using recursion\n\n # Lined table has to be populated. (Yet, it will added to dependency by the link column.)\n dependencies.append(linked_table)\n\n elif self.is_op_aggregate():\n # This table has to be populated\n dependencies.append(self.table)\n\n # The fact table has to be already populated\n fact_table_name = definition.get('fact_table')\n fact_table = self.table.workflow.get_table(fact_table_name)\n dependencies.append(fact_table)\n\n # Group column\n group_column_name = definition.get('group_column')\n group_column = fact_table.get_column(group_column_name)\n dependencies.append(group_column)\n\n # Measure columns\n inputs = self.get_inputs()\n dependencies.extend(fact_table.get_definitions_for_columns(inputs))\n\n else:\n return []\n\n return dependencies",
"def _get_dependencies_for_model(self, app_label, model_name):\n dependencies = []\n model_state = self.to_state.models[app_label, model_name]\n for field in model_state.fields.values():\n if field.is_relation:\n dependencies.extend(\n self._get_dependencies_for_foreign_key(\n app_label,\n model_name,\n field,\n self.to_state,\n )\n )\n return dependencies"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests for ResourceSet Currently does not test for ResourceInfo as it is not implemented in the wrapper yet Otherwise this is a direct manual port of the cxx test
|
def RSTest():
status = 0
result = False
n = 0
resourceSet = smtk.common.ResourceSet()
system1 = smtk.attribute.System.New()
result = resourceSet.addResource(system1, "system1", "", smtk.common.ResourceSet.TEMPLATE);
n = resourceSet.numberOfResources()
if result == False:
print("addResource() call failed")
status = status + 1
elif n != 1:
print("Wrong number of resources: %i, should be 1" % n)
status = status + 1
system2 = smtk.attribute.System.New()
result = resourceSet.addResource(system2, "system2", "path2", smtk.common.ResourceSet.INSTANCE);
n = resourceSet.numberOfResources()
if result == False:
print("addResource() call failed")
status = status + 1
elif n != 2:
print("Wrong number of resources: %i, should be 2" % n)
status = status + 1
result = resourceSet.addResource(system1, "system1-different-id", "", smtk.common.ResourceSet.SCENARIO);
n = resourceSet.numberOfResources()
if result == False:
print("addResource() call failed")
status = status + 1
elif n != 3:
print("Wrong number of resources: %i, should be 3" % n)
status = status + 1
result = resourceSet.addResource(system2, "system2")
n = resourceSet.numberOfResources()
if result == True:
print("addResource() call didn't fail failed")
status = status + 1
elif n != 3:
print("Wrong number of resources: %i, should be 3" % n)
status = status + 1
ids = resourceSet.resourceIds()
if len(ids) != 3:
print("Wrong number of ids: %i, should be 3")
status = status + 1
else:
expectedNames = ["system1", "system2", "system1-different-id"]
for i in range(len(ids)):
if ids[i] != expectedNames[i]:
print("Wrong resource name %s, should be %s" % (ids[i], expectedNames[i]) )
status = status + 1
# Missing: ResourceInfo tests (function not implemented)
# Note: ResourcePtr is not implemented (and cannot be due Resource being abstract -- shiboken issues)
# Note: ResourceSet.get is modified by shiboken to return a ResourcePtr/shared_ptr<Resource>
resource = resourceSet.get("system2")
if resource == None:
print("get() failed")
status = status + 1
rtype = resource.resourceType()
if rtype != smtk.common.Resource.ATTRIBUTE:
print("Incorrect resource type %s, should be smtk.common.Resource.ATTRIBUTE" % rtype)
status = status + 1
print("Number of errors: %i" % status)
return status
|
[
"def test_vmware_service_resources_management_get(self):\n pass",
"def test_get_run_resources(self):\n pass",
"def test_load_response_descriptor_tag_sets_tag_set_tag_set_resource_spaces(self):\n pass",
"def test_get_resources_success(self):\n resources = self.template.get_resources()\n assert len(resources) == 8",
"def test_read_available_resource_actions(self):\n pass",
"def test_resource_tag_resource_get_tag_get(self):\n pass",
"def test_vsphere_computeresources(self):\n config = {}\n self.load_check(config)\n self.check._is_excluded = MagicMock(return_value=False)\n\n # get the client\n client = vsphere_client()\n\n # list_attached_tags method returns list of tags ids of type string\n client.tagging.TagAssociation.list_attached_tags = MagicMock(return_value=[])\n\n # assign the vsphere client object to the check vsphere client object\n self.check.client = client\n\n content_mock = self.mock_content(\"compute\")\n obj_list = self.check._vsphere_computeresources(content_mock, \"ESXi\")\n\n # expect an empty identifier list\n self.assertEqual(len(obj_list[0]['topo_tags']['identifiers']), 0)\n\n # Check if computeresources has tags name and topo_type\n self.assertEqual(len(obj_list), 1)\n self.assertEqual(obj_list[0]['topo_tags']['name'], 'localhost')\n self.assertEqual(obj_list[0]['topo_tags']['topo_type'], 'vsphere-ComputeResource')\n # Check if computeresources list contains host and datastore\n self.assertEqual(obj_list[0]['topo_tags']['hosts'][0], 'localhost.localdomain')\n self.assertEqual(obj_list[0]['topo_tags']['datastores'][0], 'WDC1TB')",
"def test_load_response_descriptor_tag_sets_tag_set_tag_set_resource(self):\n pass",
"def test_create_response_descriptor_tag_sets_tag_set_tag_set_resource_spaces(self):\n pass",
"def test_index_response_descriptor_tag_sets_tag_set_tag_set_resource_spaces(self):\n pass",
"def test_get_resources_by_name(self):\n self.assertEqual(\n [PudlResourceKey(\"epacems\", \"123\", \"second-blue\")],\n list(self.descriptor.get_resources(name=\"second-blue\")),\n )",
"def test_vmware_service_resources_image_get(self):\n pass",
"def test_modify_response_descriptor_tag_sets_tag_set_tag_set_resource_spaces(self):\n pass",
"def test_resource_type(self):\r\n resource_type = get_resource_type(\"merged-0006_application_counterApp-AZFiles.json\") # pylint: disable=line-too-long\r\n self.assertEqual(resource_type, ResourceType.application)\r\n resource_type = get_resource_type(\"merged-0001_secret_azurefilesecret.json\")\r\n self.assertEqual(resource_type, ResourceType.secret)\r\n resource_type = get_resource_type(\"merged-0002_secretValue_azurefilesecret_v1.json\") # pylint: disable=line-too-long\r\n self.assertEqual(resource_type, ResourceType.secretValue)\r\n resource_type = get_resource_type(\"merged-0003_volume_counterVolumeWindows.json\") # pylint: disable=line-too-long\r\n self.assertEqual(resource_type, ResourceType.volume)\r\n resource_type = get_resource_type(\"merged-0004_network_counterAppNetwork.json\")\r\n self.assertEqual(resource_type, ResourceType.network)\r\n resource_type = get_resource_type(\"merged-0005_gateway_counterAppGateway.json\")\r\n self.assertEqual(resource_type, ResourceType.gateway)\r\n with self.assertRaises(Exception):\r\n resource_type = get_resource_type(\"merged-0005_something_counterAppGateway.json\") # pylint: disable=line-too-long\r\n with self.assertRaises(Exception):\r\n resource_type = get_resource_type(\"invalid-file-name.json\")",
"def testResourceMgr(self):\n\n class Res(object):\n \"\"\" simple resource class that dump action to logger for the manager\"\"\"\n\n def __init__(self, name):\n self.name = name\n logger.debug(\"resource %s created\", name)\n\n def close(self):\n \"\"\" close the res \"\"\"\n logger.debug(\"%s disposed\", self.name)\n\n def run(self):\n \"\"\" simulate producer method that return value for the caller \"\"\"\n logger.debug(\"Your are making use of resource provided by(%s)\",\n self.name)\n return self.name\n\n def _newres():\n return Res(str(random.randint(1, 9999)))\n\n def _dispose(res):\n res.close()\n\n mgr = ResourceMgr(_newres, _dispose)\n with ResourceCtx(mgr) as r:\n with ResourceCtx(mgr) as r1:\n self.assertTrue(r == r1,\n \"double fetch, new resource won't be return\")\n self.assertTrue(r.run() == r.name,\n \"Yes, it's the object expected\")\n\n # multi resources into one resource context\n mgr1 = ResourceMgr(_newres, _dispose)\n with ResourceCtx([mgr, mgr1]) as r:\n r[0].run()\n r[1].run()\n\n # Resource is None\n with ResourceCtx(None) as cur:\n self.assertTrue(cur is None)\n\n with ResourceCtx((None, None)) as curs:\n self.assertEqual(2, len(curs))\n self.assertTrue(curs[0] is None)\n self.assertTrue(curs[1] is None)",
"def test_vmware_service_resources_regions_get(self):\n pass",
"def test_resource_tag_resource_find_tags_get(self):\n pass",
"def test_get_resource(self):\r\n\r\n hot_tpl = hot_tpl_generic_resource\r\n self.stack = parser.Stack(self.ctx, 'test_get_resource',\r\n template.Template(hot_tpl))\r\n self.stack.store()\r\n self.stack.create()\r\n self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),\r\n self.stack.state)\r\n\r\n snippet = {'value': {'get_resource': 'resource1'}}\r\n self.assertEqual({'value': 'resource1'}, self.resolve(snippet))",
"def test_created_app_resources(self):\n resources = [('app_domain', self.template['resources'][\n 'app_domain']['type']),\n ('app', self.template['resources'][\n 'app']['type']),\n ('web_tier', self.template[\n 'resources']['web_tier']['type']),\n ('db_tier', self.template['resources'][\n 'db_tier']['type']),\n ('mysql_svc', self.template['resources'][\n 'mysql_svc']['type']),\n ('flow1', self.template['resources'][\n 'flow1']['type']),\n ('web_port', self.template['resources'][\n 'web_port']['type']),\n ('db_port', self.template['resources'][\n 'db_port']['type'])]\n for resource_name, resource_type in resources:\n resource = self.test_resources.get(resource_name, None)\n self.assertIsInstance(resource, dict)\n self.assertEqual(resource_name, resource['logical_resource_id'])\n self.assertEqual(resource_type, resource['resource_type'])\n self.assertEqual('CREATE_COMPLETE', resource['resource_status'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create and compile TeX file to represent CTW tree.
|
def drawCTWTree(root, filename='ctw_tree', show_probs=False):
node = root
file = open('{}.tex'.format(filename), 'w')
# Write header
file.writelines(
["\\documentclass[tikz,border=10pt]{standalone}\n",
"\\usepackage[linguistics]{forest}\n",
"\\begin{document}\n",
"\\begin{forest}\n",
"for tree={grow=west}\n"]
)
file.write("[")
drawCTWNodes(file,root, show_probs)
file.write("]\n")
file.writelines(
["\\end{forest}\n",
"\\end{document}"]
)
file.close()
os.system("pdflatex -interaction=batchmode {}.tex".format(filename))
|
[
"def drawCTMTree(root, filename='ctm_tree', show_probs=False):\n\tnode = root\n\tfile = open('{}.tex'.format(filename), 'w')\n\t# Write header\n\tfile.writelines(\n\t\t[\"\\\\documentclass[tikz,border=10pt]{standalone}\\n\",\n\t\t\"\\\\usepackage[linguistics]{forest}\\n\",\n\t\t\"\\\\begin{document}\\n\",\n\t\t\"\\\\begin{forest}\\n\",\n\t\t\"for tree={grow=west}\\n\"]\n\t\t)\n\t\n\tfile.write(\"[\")\n\tdrawCTMNodes(file,root, show_probs)\n\tfile.write(\"]\\n\")\n\n\tfile.writelines(\n\t\t[\"\\\\end{forest}\\n\",\n\t\t\"\\\\end{document}\"]\n\t\t)\n\tfile.close()\n\n\tos.system(\"pdflatex -interaction=batchmode {}.tex\".format(filename))",
"def generate_tex_file(expression):\n result = os.path.join(TEX_DIR_PATH, str(hash(expression))) + '.tex'\n\n if not os.path.exists(TEX_DIR_PATH):\n os.mkdir(TEX_DIR_PATH)\n print(\"Subdirectory '%s' did not exist, has been created.\" % TEX_DIR_NAME)\n else:\n pass\n\n\n tex_hack = '\\n'.join(['\\setlength{\\unitlength}{1ex}%',\n '\\\\begin{picture}(0,1)',\n '\\\\put(0,0){\\\\line(0,1){1}}',\n '\\\\end{picture}%',\n '\\\\hspace{-0.75pt}%'])\n with open(TEMPLATE_LATEX_FILE_PATH, 'r') as infile:\n body = infile.read()\n body = body.replace(TEX_TEXT_TO_REPLACE, '\\n'.join([tex_hack, expression]))\n \n with open (result, 'w') as outfile:\n outfile.write(body)\n\n return result",
"def compile(self):\n\n\t\tself.save_images(Settings.tmp_dir)\n\n\t\ttex_file = path.join(Settings.tmp_dir, 'pgf_{0}_{1}.tex'.format(Figure._session, self._idx))\n\t\tpdf_file = path.join(Settings.tmp_dir, 'pgf_{0}_{1}.pdf'.format(Figure._session, self._idx))\n\n\t\tcommand = Settings.pdf_compile.format('-output-directory {0} {1}')\n\t\tcommand = command.format(Settings.tmp_dir, tex_file)\n\n\t\t# write LaTeX file\n\t\twith open(tex_file, 'w') as handle:\n\t\t\thandle.write(self.render())\n\n\t\t# compile\n\t\tif system('cd \"{0}\" && {1}'.format(Settings.tmp_dir, command)):\n\t\t\traise RuntimeError('Compiling TeX source file to PDF failed.')\n\n\t\treturn pdf_file",
"def buildtree(filename):\n \n #FIXME\n pass",
"def make_tc_files():\n\n make_dirs()\n\n for i in range(0, TEST_FILES + 1):\n in_file = os.path.join(IN_SOURCE, f'input{i:02d}.txt')\n out_file = os.path.join(OUT_SOURCE, f'output{i:02d}.txt')\n sys.stdout = open(in_file, 'w+')\n\n required_input = RINT(5, POWER(10, (i // 2) + 1))\n print(required_input) # Prints x into input file\n for _ in range(required_input):\n print(RINT(1, POWER(10, min(4, max(i // 2, 2)))))\n\n sys.stdout = sys.__stdout__\n\n generate(TEST_LANG, i)\n\n make_lf_ending(in_file)\n make_lf_ending(out_file)\n\n yield\n\n shutil.rmtree(IN_SOURCE)\n shutil.rmtree(OUT_SOURCE)",
"def compiletofile(self, texfile, styfile=\"pytem.sty\"):\n self.writetofile(styfile)\n compileLaTeX(\n os.path.abspath(os.path.curdir), os.path.abspath(os.path.curdir), texfile\n )",
"def prepare_tex_file(fname, to_the_end=False):\n if to_the_end:\n with open(fname, 'a') as f:\n f.write(\"\\n\\n\\end{enumerate}\\n\\end{document}\")\n else:\n with open(fname, 'w') as f:\n f.write(r\"\"\"\\documentclass[11pt]{article}\n\n\\marginparwidth 0.5in\n\\oddsidemargin 0.25in\n\\evensidemargin 0.25in\n\\marginparsep 0.25in\n\\topmargin 0.25in\n\\textwidth 6in \\textheight 8 in\n\n\\newcommand{\\key}[1]{\\textcolor{lightgray}{#1}}\n\n\\newcounter{CQuery}\n\\newcounter{CStatement}\n\\newcounter{CClick}\n\n\\usepackage{amsmath}\n\\usepackage{hyperref}\n\\usepackage{xcolor}\n\n\\begin{document}\n\\author{}\n\\title{Synthetic Data}\n\\maketitle\n\n\\setcounter{CQuery}{1}\n\\setcounter{CStatement}{1}\n\\setcounter{CClick}{1}\n\n\\begin{enumerate}\"\"\")",
"def drawCTWNodes(file, node, show_probs=False):\n\tif node.label == '':\n\t\tfile.write(\"{{$\\\\lambda$, {}\\\\\\\\ $P_e$={}\\\\\\\\ $P_w$={}}}\\n\".format(node.count,2**node.log2Pe,2**node.log2Pw))\n\telse:\n\t\tfile.write(\"{{`{}\\', {}\\\\\\\\ $P_e$={}\\\\\\\\ $P_w$={}}}\\n\".format(node.label,node.count,2**node.log2Pe,2**node.log2Pw))\n\tfor child in node.children:\n\t\tif child != None:\n\t\t\tfile.write(\"[\")\n\t\t\tif child.isLeaf():\n\t\t\t\tNone\n\t\t\t\tfile.write(\"{{`{}\\', {}\\\\\\\\ $P_e$={}\\\\\\\\ $P_w$={}}}\\n\".format(child.label,child.count,2**child.log2Pe,2**child.log2Pw))\n\t\t\telse:\n\t\t\t\tdrawCTWNodes(file,child)\n\t\t\tfile.write(\"]\\n\")",
"def compile(self, filename):\n\t\t\n\t\t# Check that file exist\n\t\tif os.path.exists(filename) is False:\n\t\t\tprint('File {0} does not exist'.format(filename))\n\t\t\texit()\n\t\t\t\n\t\t# Check that file is not a directory\n\t\tif os.path.isfile(filename) is False:\n\t\t\tprint('File {0} is not a file'.format(filename))\n\t\t\texit()\n\t\t\t\n\t\t# Check if file is readable\n\t\tif os.access(filename, os.R_OK) is False:\n\t\t\tprint('File {0} is not readable'.format(filename))\n\t\t\texit()\n\t\t\n\t\t# Open file and read lines\n\t\tfile = open(filename)\n\t\tlines = file.readlines()\n\t\tfile.close()\n\t\t\n\t\t# Remove comments\n\t\tlines = self.removeMultilineComments(lines)\n\t\tlines = self.removeSinglelineComments(lines)\n\t\t\n\t\t# Remove empty lines\n\t\tlines = self.removeEmptyLines(lines)\n\t\t\n\t\t# Search for includes\n\t\tincludes = self.addIncludes(lines)\n\t\t\n\t\t# Remove includes lines\n\t\tlines = self.removeIncludes(lines)\n\t\t\n\t\t# Create root tags\n\t\ttags = self.findTags(0, lines)\n\t\tcontent = ''\n\t\tfor tagInfo in tags:\n\n\t\t\ttag = self.createTag(tagInfo['tagLine'], tagInfo['contentLines'])\n\t\t\n\t\t\n\t\t\tcontent += str(tag)\n\t\t\t\n\t\t# Compile html\n\t\thtml = self.template.render({\n\t\t\t'title': 'Min webbsida',\n\t\t\t'content': content,\n\t\t\t'styles': includes['styles'],\n\t\t\t'javascripts': includes['javascripts']\n\t\t})\n\t\t\n\t\treturn html",
"def build_toc_file(self):\n\n filename = path.join(self.outdir, 'helptoc.xml')\n\n with open(filename, 'w', encoding='utf-8') as f:\n\n f.write(\"\"\"<?xml version='1.0' encoding=\"utf-8\"?>\\n\"\"\")\n f.write(\"\"\"<toc version=\"2.0\">\\n\"\"\")\n f.write('<tocitem target=\"{}\">{}\\n'.format(\n self.config.master_doc + '.html', self.config.project))\n\n toctree = self.env.get_and_resolve_doctree(self.config.master_doc, self,\n prune_toctrees=False)\n visitor = ToCTreeVisitor(toctree)\n matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True)\n for node in toctree.traverse(matcher): # type: addnodes.compact_paragraph\n node.walkabout(visitor)\n\n f.write(visitor.astext() + '\\n')\n\n f.write('</tocitem>\\n')\n f.write('</toc>\\n')",
"def makeTxt():\n print('start')\n model = KeyedVectors.load_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\LSTM-PICO-Detection-master\\\\other_spyder\\\\Extended embeddings\\\\2019-07-19-09-34-51-bigrams_FINAL.bin', binary=True)#, limit = 20 for tests\n model.save_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\Data\\\\extended.txt', binary=False)\n print('done creating text files')",
"def compile(self, tree: list, output_file: str):\n print('compiling to %s [%s]' % (output_file, self.name))\n # merge duplicates in tree\n tree = self.merge_duplicates(tree)\n\n out = open(output_file, 'w')\n out.write(self.header)\n\n for node in tree:\n # compile node\n out.write(self.compile_node(node))\n\n print('%scompiled %i keys' % (Fore.GREEN, len(tree)))\n out.close()",
"def write_tex():\n datadir = livvkit.index_dir\n outdir = os.path.join(datadir, \"tex\")\n print(outdir)\n # functions.mkdir_p(outdir)\n\n data_files = glob.glob(datadir + \"/**/*.json\", recursive=True)\n\n for each in data_files:\n data = functions.read_json(each)\n tex = translate_page(data)\n outfile = os.path.join(outdir, os.path.basename(each).replace('json', 'tex'))\n with open(outfile, 'w') as f:\n f.write(tex)",
"def createSyntaxFile():\n try:\n from . import Paths\n from .JSONFile import JSONFile\n except:\n from libs import Paths\n from libs.JSONFile import JSONFile\n\n keywords = getKeywords()\n\n LITERAL1s = []\n KEYWORD1s = []\n KEYWORD2s = []\n KEYWORD3s = []\n\n # set keywords\n for k in keywords:\n for w in k.get_keywords():\n if 'LITERAL1' in w.get_type():\n LITERAL1s.append(w.get_id())\n if 'KEYWORD1' in w.get_type():\n KEYWORD1s.append(w.get_id())\n if 'KEYWORD2' in w.get_type():\n KEYWORD2s.append(w.get_id())\n if 'KEYWORD3' in w.get_type():\n KEYWORD3s.append(w.get_id())\n\n # formating\n LITERAL1s = set(LITERAL1s)\n LITERAL1s = '|'.join(LITERAL1s)\n KEYWORD1s = set(KEYWORD1s)\n KEYWORD1s = '|'.join(KEYWORD1s)\n KEYWORD2s = set(KEYWORD2s)\n KEYWORD2s = '|'.join(KEYWORD2s)\n KEYWORD3s = set(KEYWORD3s)\n KEYWORD3s = '|'.join(KEYWORD3s)\n\n # get sintax preset\n sintax_path = Paths.getSyntaxPath()\n sintax_file = JSONFile(sintax_path)\n sintax = sintax_file.readFile()\n\n # replace words in sintax file\n sintax = sintax.replace('${LITERAL1}', LITERAL1s)\n sintax = sintax.replace('${KEYWORD1}', KEYWORD1s)\n sintax = sintax.replace('${KEYWORD2}', KEYWORD2s)\n sintax = sintax.replace('${KEYWORD3}', KEYWORD3s)\n\n # Save File\n file_path = Paths.getTmLanguage()\n language_file = JSONFile(file_path)\n language_file.writeFile(sintax)",
"def render_tree():\n graph = TREE.graphviz(node_attr={'shape': 'record', 'height': '.1'})\n graph.body\n graph.render(GRAPHDIR, format='png')\n #graph.view()",
"def generate_tex_file(self, filename):\n write_all(filename, self.unpack_content(self.preamble + self.body))",
"def compute_CTXT_view(language='en'):\n print('creating Context views')\n things = get_things()\n for thing in things:\n # print(thing)\n entityId = thing[1].split('/')[-1][1:]\n text = get_articleText(entityId)\n if text is None:\n print('text is null')\n continue\n context = get_CTXT(text)\n if context is not None:\n # CTXT.append(context)\n # TODO: get article title from thing\n insert_view(int(get_item_id(sqlConnection, 'name')), 'CTXT', language, context)",
"def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))",
"def att_totex(self,arquivo=None):\n\n if arquivo is None:\n arquivo = str(self.matricula).zfill(6) + '.tex'\n\n with open(arquivo, 'w') as f:\n f.write('\\\\section*{' + str(self.nome_completo) + '\\\\hfill ' + str(self.matricula).zfill(6) + '}\\n')\n\n f.write('\\\\begin{multicols}{2}\\n \\\\scriptsize')\n for s in range(1,3):\n f.write('\\\\begin{center} \\\\begin{tabular}{|c|c|c|c|c|c|c|}\\\\toprule\\n')\n f.write('\\\\multicolumn{7}{|c|}{' + str(s) + '$^\\\\circ$ semestre} \\\\\\\\ \\\\midrule\\n')\n f.write('& S & T & Q & Q & S & S \\\\\\\\ \\\\midrule\\n')\n for i in range(1,17):\n f.write(str(i) );\n for j in range(2,8):\n\n f.write('& ')\n\n for t in self.turmas_a_lecionar:\n if t.semestralidade == s and (j,i) in t.horarios:\n f.write(str(t.codigo) + ' ' + str(t.turma))\n\n f.write('\\\\\\\\ \\\\midrule \\n')\n\n f.write('\\\\end{tabular} \\\\end{center}\\n\\n')\n\n f.write('\\\\end{multicols}\\n')\n f.write('\\\\begin{multicols}{2}\\n')\n f.write('\\\\begin{center} \\\\begin{tabular}{|lm{6cm}|}\\n')\n f.write('\\\\multicolumn{2}{c}{Disciplinas a lecionar} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{1$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 1]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{2$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 2]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\end{tabular} \\\\end{center} \\\\vfill\\\\columnbreak\\n')\n f.write('\\\\end{multicols}\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add each CTW node to TeX file.
|
def drawCTWNodes(file, node, show_probs=False):
if node.label == '':
file.write("{{$\\lambda$, {}\\\\ $P_e$={}\\\\ $P_w$={}}}\n".format(node.count,2**node.log2Pe,2**node.log2Pw))
else:
file.write("{{`{}\', {}\\\\ $P_e$={}\\\\ $P_w$={}}}\n".format(node.label,node.count,2**node.log2Pe,2**node.log2Pw))
for child in node.children:
if child != None:
file.write("[")
if child.isLeaf():
None
file.write("{{`{}\', {}\\\\ $P_e$={}\\\\ $P_w$={}}}\n".format(child.label,child.count,2**child.log2Pe,2**child.log2Pw))
else:
drawCTWNodes(file,child)
file.write("]\n")
|
[
"def ctw(control):\n if isinstance(control, list) and len(control) == 1:\n control = control[0]\n return control.toxml()",
"def generate_tc_output_variables(self, staged_tc_data):\n for staged_data in staged_tc_data:\n self.add_tc_output_variable(staged_data.get('key'), staged_data.get('data'))",
"def generate_additonal_tocs(app, pagename, templatename, context, doctree):\n pages_list = []\n content_tocs = []\n glossary_tocs = []\n content_toc = ''\n glossary_toc = ''\n figures_toc = bullet_list()\n tables_toc = bullet_list()\n index = app.env.config.master_doc\n doctree_index = app.env.get_doctree(index)\n\n for toctreenode in doctree_index.traverse(toctree):\n page_index = 0\n while page_index < len(toctreenode['includefiles']):\n page_in_toc = toctreenode['includefiles'][page_index]\n if page_in_toc not in pages_list:\n pages_list.append(page_in_toc)\n page_index += 1\n else:\n toctreenode['includefiles'].remove(page_in_toc)\n for entry in toctreenode['entries']:\n if page_in_toc in entry:\n toctreenode['entries'].remove(entry)\n\n toctree_element = TocTree(app.env).resolve(pagename, app.builder, toctreenode, includehidden=True)\n try:\n toc_caption = next(child for child in toctree_element.children if isinstance(child, caption))\n toctree_element.children.remove(toc_caption)\n except StopIteration:\n pass\n except AttributeError:\n continue\n if 'glossary_toc' in toctreenode.parent.attributes['names']:\n glossary_tocs.append(toctree_element)\n else:\n content_tocs.append(toctree_element)\n\n if content_tocs:\n content_toc = content_tocs[0]\n for content_element in content_tocs[1:]:\n try:\n content_toc.extend(content_element.children)\n except AttributeError:\n continue\n\n if glossary_tocs:\n glossary_toc = glossary_tocs[0]\n for glossary_element in glossary_tocs[1:]:\n glossary_toc.extend(glossary_element.children)\n glossary_toc = glossary_toc.children[0].children[0].children[1]\n\n pages_with_fignumbers = (x for x in pages_list if x in app.env.toc_fignumbers)\n for page in pages_with_fignumbers:\n doctree_page = app.env.get_doctree(page)\n\n for figurenode in doctree_page.traverse(figure):\n if not figurenode.attributes['ids']:\n continue\n figure_id = figurenode.attributes['ids'][0]\n toc_fig_tables = app.env.toc_fignumbers[page].get('figure', {})\n figure_number = toc_fig_tables.get(figure_id)\n if figure_number is None:\n continue\n figure_title = figurenode.children[-1].children[0] or context['t']['no_description']\n try:\n figure_text_string = u'Fig. {}.{} - {}'.format(\n figure_number[0], figure_number[1], figure_title)\n except IndexError:\n continue\n figure_text = Text(figure_text_string)\n figure_text.rawsource = figure_text_string\n figure_reference = reference()\n figure_reference.attributes['internal'] = True\n figure_reference.attributes['refuri'] = app.builder.get_relative_uri(pagename, page) + '#' + figure_id\n figure_compact_paragraph = compact_paragraph()\n figure_list_item = list_item()\n figure_text.parent = figure_reference\n figure_reference.children.append(figure_text)\n figure_reference.parent = figure_compact_paragraph\n figure_compact_paragraph.children.append(figure_reference)\n figure_compact_paragraph.parent = figure_list_item\n figure_list_item.children.append(figure_compact_paragraph)\n figure_list_item.parent = figures_toc\n figures_toc.children.append(figure_list_item)\n\n for tablenode in doctree_page.traverse(table):\n if not tablenode.attributes['ids']:\n continue\n table_id = tablenode.attributes['ids'][0]\n toc_fig_tables = app.env.toc_fignumbers[page].get('table', {})\n table_number = toc_fig_tables.get(table_id)\n if table_number is None:\n continue\n table_title = tablenode.children[0].rawsource if tablenode.children[0].rawsource else context['t']['no_description']\n table_title = (table_title[:60] + '...') if len(table_title) > 60 else table_title\n table_text_string = 'Tab. ' + '.'.join([str(n) for n in table_number]) + ' - ' + table_title\n table_text = Text(table_text_string)\n table_text.rawsource = table_text_string\n table_reference = reference()\n table_reference.attributes['internal'] = True\n table_reference.attributes['refuri'] = app.builder.get_relative_uri(pagename, page) + '#' + table_id\n table_compact_paragraph = compact_paragraph()\n table_list_item = list_item()\n table_text.parent = table_reference\n table_reference.children.append(table_text)\n table_reference.parent = table_compact_paragraph\n table_compact_paragraph.children.append(table_reference)\n table_compact_paragraph.parent = table_list_item\n table_list_item.children.append(table_compact_paragraph)\n table_list_item.parent = tables_toc\n tables_toc.children.append(table_list_item)\n\n context['content_toc'] = app.builder.render_partial(content_toc)['fragment'] if hasattr(content_toc, 'children') and content_toc.children else None\n context['glossary_toc'] = app.builder.render_partial(glossary_toc)['fragment'] if hasattr(glossary_toc, 'children') and glossary_toc.children else None\n context['figures_toc'] = app.builder.render_partial(figures_toc)['fragment'] if hasattr(figures_toc, 'children') and figures_toc.children else None\n context['tables_toc'] = app.builder.render_partial(tables_toc)['fragment'] if hasattr(tables_toc, 'children') and tables_toc.children else None",
"def compute_CTXT_view(language='en'):\n print('creating Context views')\n things = get_things()\n for thing in things:\n # print(thing)\n entityId = thing[1].split('/')[-1][1:]\n text = get_articleText(entityId)\n if text is None:\n print('text is null')\n continue\n context = get_CTXT(text)\n if context is not None:\n # CTXT.append(context)\n # TODO: get article title from thing\n insert_view(int(get_item_id(sqlConnection, 'name')), 'CTXT', language, context)",
"def cmtyAddFromList(g, cmtyID, nodes):\n #print cmtyID, nodes\n #for n in g.nbunch_iter(nodes):\n for n in nodes:\n g.node[n]['cmtys'].add(cmtyID)",
"def set_CPTs(self, cptdict):\n for name in list(cptdict.keys()):\n self.node_dict[name].CPT = cptdict[name]",
"def drawCTWTree(root, filename='ctw_tree', show_probs=False):\n\tnode = root\n\tfile = open('{}.tex'.format(filename), 'w')\n\t# Write header\n\tfile.writelines(\n\t\t[\"\\\\documentclass[tikz,border=10pt]{standalone}\\n\",\n\t\t\"\\\\usepackage[linguistics]{forest}\\n\",\n\t\t\"\\\\begin{document}\\n\",\n\t\t\"\\\\begin{forest}\\n\",\n\t\t\"for tree={grow=west}\\n\"]\n\t\t)\n\t\n\tfile.write(\"[\")\n\tdrawCTWNodes(file,root, show_probs)\n\tfile.write(\"]\\n\")\n\n\tfile.writelines(\n\t\t[\"\\\\end{forest}\\n\",\n\t\t\"\\\\end{document}\"]\n\t\t)\n\tfile.close()\n\n\tos.system(\"pdflatex -interaction=batchmode {}.tex\".format(filename))",
"def add_nodes(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"\\n%%%%%%%%%% ADDING NODES %%%%%%%%%%%%%\\n\\n\")\n\t\t\ti = 0\n\t\t\tfor v in self.G.nodes:\n\t\t\t\tf.write('\\t\\\\Vertex[x={}, y={}]{{{}}}\\n'.format(round(self.factor*v.x, 3), round(self.factor*v.y, 3), i))\n\t\t\t\t\n\t\t\t\tself.vtoid[v] = i\t\t\t\t\n\t\t\t\t\n\t\t\t\ti += 1",
"def writeTecplot(self, fileName):\n\n f = open(fileName, \"w\")\n f.write('TITLE = \"DVConstraints Data\"\\n')\n f.write('VARIABLES = \"CoordinateX\" \"CoordinateY\" \"CoordinateZ\"\\n')\n\n # loop over the constraints and add their data to the tecplot file\n for conTypeKey in self.constraints:\n constraint = self.constraints[conTypeKey]\n for key in constraint:\n constraint[key].writeTecplot(f)\n\n for key in self.linearCon:\n self.linearCon[key].writeTecplot(f)\n f.close()",
"def make_tc_files():\n\n make_dirs()\n\n for i in range(0, TEST_FILES + 1):\n in_file = os.path.join(IN_SOURCE, f'input{i:02d}.txt')\n out_file = os.path.join(OUT_SOURCE, f'output{i:02d}.txt')\n sys.stdout = open(in_file, 'w+')\n\n required_input = RINT(5, POWER(10, (i // 2) + 1))\n print(required_input) # Prints x into input file\n for _ in range(required_input):\n print(RINT(1, POWER(10, min(4, max(i // 2, 2)))))\n\n sys.stdout = sys.__stdout__\n\n generate(TEST_LANG, i)\n\n make_lf_ending(in_file)\n make_lf_ending(out_file)\n\n yield\n\n shutil.rmtree(IN_SOURCE)\n shutil.rmtree(OUT_SOURCE)",
"def add_text(doc, t):\n current_pos = 0\n for m in re.finditer(r'latex::(.+?)::', t):\n doc.append(t[current_pos: m.start()])\n doc.append(NoEscape(' ' + m.group(1) + ' '))\n current_pos = m.end()\n doc.append(t[current_pos:])\n return doc",
"def talk_generator(CG, sorted_nodes, tfile):\n # http://www.regular-expressions.info/floatingpoint.html\n reg_flt = re.compile('[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?.')\n\n ttime = CG.graph['total_time']\n\n with open(tfile) as tkn:\n # this is nasty, but used to check if we're at the last line\n prevcourse = []\n tknlines = tkn.readlines()\n for line in tknlines:\n if reg_flt.match(line) :\n course = map(float, line.strip().split())\n time = course[0]\n\n for e, occu in enumerate(course[1:]) :\n # is it above visibility threshold?\n ss = sorted_nodes[e][0]\n\n yield CG.node[ss]['identity'], ttime+time, occu, \\\n ss[:CG.graph['transcript_length']], CG.node[ss]['energy']\n prevcourse = course\n return",
"def prepare_tex_file(fname, to_the_end=False):\n if to_the_end:\n with open(fname, 'a') as f:\n f.write(\"\\n\\n\\end{enumerate}\\n\\end{document}\")\n else:\n with open(fname, 'w') as f:\n f.write(r\"\"\"\\documentclass[11pt]{article}\n\n\\marginparwidth 0.5in\n\\oddsidemargin 0.25in\n\\evensidemargin 0.25in\n\\marginparsep 0.25in\n\\topmargin 0.25in\n\\textwidth 6in \\textheight 8 in\n\n\\newcommand{\\key}[1]{\\textcolor{lightgray}{#1}}\n\n\\newcounter{CQuery}\n\\newcounter{CStatement}\n\\newcounter{CClick}\n\n\\usepackage{amsmath}\n\\usepackage{hyperref}\n\\usepackage{xcolor}\n\n\\begin{document}\n\\author{}\n\\title{Synthetic Data}\n\\maketitle\n\n\\setcounter{CQuery}{1}\n\\setcounter{CStatement}{1}\n\\setcounter{CClick}{1}\n\n\\begin{enumerate}\"\"\")",
"def merge_text_nodes(self):\n ...",
"def save_tm3(self, path):\n with open(path, 'w') as f:\n f.write('')\n with open(path, 'a') as f:\n for entry in self.entries:\n line = '\\t'.join([str(e) for e in entry]) + '\\n'\n f.write(line)",
"def generate_text_nodes(node: PendingGlueReference, output: dict[str, Any]):\n data = output[\"data\"]\n if \"text/plain\" not in data:\n ref_warning(f\"No text/plain found in {node.key!r}\", node)\n return []\n try:\n text = format_plain_text(data[\"text/plain\"], node[\"fmt_spec\"])\n except Exception as exc:\n ref_warning(f\"Failed to format text/plain: {exc}\", node)\n return []\n return [nodes.inline(text, text, classes=[\"pasted-text\"])]",
"def WriteFiles(file, tfidf, class_type):\r\n\r\n try:\r\n out_file = open(DT_PATH/('DecisionTree_' + file + '_' + class_type+'.txt'),'a')\r\n except:\r\n out_file = open(DT_PATH/('DecisionTree_' + file + '_' + class_type+'.txt'),'w')\r\n \r\n out_file.write('\\n------------------------------- TF-IDF '+ str(tfidf).upper() +' -------------------------------\\n')\r\n\r\n for ngram in range(1,4):\r\n out_file.write('n-grams(1-'+str(ngram)+')\\n' + 'df\\t\\t Precision \\t\\t\\t Recall \\t\\t\\t F-score\\n')\r\n \r\n for df in range(1+ngram, 21+ngram):\r\n X_matrix, y_labels = build_matrices(file, ngram, df, tfidf, class_type)\r\n P,R,F = CrossVal_DecisionTree(X_matrix, y_labels)\r\n out_file.write(str(df) + '\\t' + str(P) + '\\t\\t' + str(R) +'\\t\\t' + str(F) + '\\n')\r\n \r\n out_file.write('\\n')\r\n \r\n out_file.write('\\n\\n')\r\n out_file.close()",
"def write_tex():\n datadir = livvkit.index_dir\n outdir = os.path.join(datadir, \"tex\")\n print(outdir)\n # functions.mkdir_p(outdir)\n\n data_files = glob.glob(datadir + \"/**/*.json\", recursive=True)\n\n for each in data_files:\n data = functions.read_json(each)\n tex = translate_page(data)\n outfile = os.path.join(outdir, os.path.basename(each).replace('json', 'tex'))\n with open(outfile, 'w') as f:\n f.write(tex)",
"def ttfs(self):\n if not self._ttfs:\n for font in self.fonts:\n for instance in font.instances:\n instance.generate(Format='ttf', FontPath=self._temp_dir)\n self._ttfs = [TTFont(os.path.join(self._temp_dir, f)) for f\n in os.listdir(self._temp_dir) if f.endswith('.ttf')]\n return self._ttfs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create and compile TeX file to represent CTM tree.
|
def drawCTMTree(root, filename='ctm_tree', show_probs=False):
node = root
file = open('{}.tex'.format(filename), 'w')
# Write header
file.writelines(
["\\documentclass[tikz,border=10pt]{standalone}\n",
"\\usepackage[linguistics]{forest}\n",
"\\begin{document}\n",
"\\begin{forest}\n",
"for tree={grow=west}\n"]
)
file.write("[")
drawCTMNodes(file,root, show_probs)
file.write("]\n")
file.writelines(
["\\end{forest}\n",
"\\end{document}"]
)
file.close()
os.system("pdflatex -interaction=batchmode {}.tex".format(filename))
|
[
"def drawCTWTree(root, filename='ctw_tree', show_probs=False):\n\tnode = root\n\tfile = open('{}.tex'.format(filename), 'w')\n\t# Write header\n\tfile.writelines(\n\t\t[\"\\\\documentclass[tikz,border=10pt]{standalone}\\n\",\n\t\t\"\\\\usepackage[linguistics]{forest}\\n\",\n\t\t\"\\\\begin{document}\\n\",\n\t\t\"\\\\begin{forest}\\n\",\n\t\t\"for tree={grow=west}\\n\"]\n\t\t)\n\t\n\tfile.write(\"[\")\n\tdrawCTWNodes(file,root, show_probs)\n\tfile.write(\"]\\n\")\n\n\tfile.writelines(\n\t\t[\"\\\\end{forest}\\n\",\n\t\t\"\\\\end{document}\"]\n\t\t)\n\tfile.close()\n\n\tos.system(\"pdflatex -interaction=batchmode {}.tex\".format(filename))",
"def buildtree(filename):\n \n #FIXME\n pass",
"def compiletofile(self, texfile, styfile=\"pytem.sty\"):\n self.writetofile(styfile)\n compileLaTeX(\n os.path.abspath(os.path.curdir), os.path.abspath(os.path.curdir), texfile\n )",
"def compile(self):\n\n\t\tself.save_images(Settings.tmp_dir)\n\n\t\ttex_file = path.join(Settings.tmp_dir, 'pgf_{0}_{1}.tex'.format(Figure._session, self._idx))\n\t\tpdf_file = path.join(Settings.tmp_dir, 'pgf_{0}_{1}.pdf'.format(Figure._session, self._idx))\n\n\t\tcommand = Settings.pdf_compile.format('-output-directory {0} {1}')\n\t\tcommand = command.format(Settings.tmp_dir, tex_file)\n\n\t\t# write LaTeX file\n\t\twith open(tex_file, 'w') as handle:\n\t\t\thandle.write(self.render())\n\n\t\t# compile\n\t\tif system('cd \"{0}\" && {1}'.format(Settings.tmp_dir, command)):\n\t\t\traise RuntimeError('Compiling TeX source file to PDF failed.')\n\n\t\treturn pdf_file",
"def generate_tex_file(expression):\n result = os.path.join(TEX_DIR_PATH, str(hash(expression))) + '.tex'\n\n if not os.path.exists(TEX_DIR_PATH):\n os.mkdir(TEX_DIR_PATH)\n print(\"Subdirectory '%s' did not exist, has been created.\" % TEX_DIR_NAME)\n else:\n pass\n\n\n tex_hack = '\\n'.join(['\\setlength{\\unitlength}{1ex}%',\n '\\\\begin{picture}(0,1)',\n '\\\\put(0,0){\\\\line(0,1){1}}',\n '\\\\end{picture}%',\n '\\\\hspace{-0.75pt}%'])\n with open(TEMPLATE_LATEX_FILE_PATH, 'r') as infile:\n body = infile.read()\n body = body.replace(TEX_TEXT_TO_REPLACE, '\\n'.join([tex_hack, expression]))\n \n with open (result, 'w') as outfile:\n outfile.write(body)\n\n return result",
"def compile(self, tree: list, output_file: str):\n print('compiling to %s [%s]' % (output_file, self.name))\n # merge duplicates in tree\n tree = self.merge_duplicates(tree)\n\n out = open(output_file, 'w')\n out.write(self.header)\n\n for node in tree:\n # compile node\n out.write(self.compile_node(node))\n\n print('%scompiled %i keys' % (Fore.GREEN, len(tree)))\n out.close()",
"def compile(self, filename):\n\t\t\n\t\t# Check that file exist\n\t\tif os.path.exists(filename) is False:\n\t\t\tprint('File {0} does not exist'.format(filename))\n\t\t\texit()\n\t\t\t\n\t\t# Check that file is not a directory\n\t\tif os.path.isfile(filename) is False:\n\t\t\tprint('File {0} is not a file'.format(filename))\n\t\t\texit()\n\t\t\t\n\t\t# Check if file is readable\n\t\tif os.access(filename, os.R_OK) is False:\n\t\t\tprint('File {0} is not readable'.format(filename))\n\t\t\texit()\n\t\t\n\t\t# Open file and read lines\n\t\tfile = open(filename)\n\t\tlines = file.readlines()\n\t\tfile.close()\n\t\t\n\t\t# Remove comments\n\t\tlines = self.removeMultilineComments(lines)\n\t\tlines = self.removeSinglelineComments(lines)\n\t\t\n\t\t# Remove empty lines\n\t\tlines = self.removeEmptyLines(lines)\n\t\t\n\t\t# Search for includes\n\t\tincludes = self.addIncludes(lines)\n\t\t\n\t\t# Remove includes lines\n\t\tlines = self.removeIncludes(lines)\n\t\t\n\t\t# Create root tags\n\t\ttags = self.findTags(0, lines)\n\t\tcontent = ''\n\t\tfor tagInfo in tags:\n\n\t\t\ttag = self.createTag(tagInfo['tagLine'], tagInfo['contentLines'])\n\t\t\n\t\t\n\t\t\tcontent += str(tag)\n\t\t\t\n\t\t# Compile html\n\t\thtml = self.template.render({\n\t\t\t'title': 'Min webbsida',\n\t\t\t'content': content,\n\t\t\t'styles': includes['styles'],\n\t\t\t'javascripts': includes['javascripts']\n\t\t})\n\t\t\n\t\treturn html",
"def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))",
"def make_tc_files():\n\n make_dirs()\n\n for i in range(0, TEST_FILES + 1):\n in_file = os.path.join(IN_SOURCE, f'input{i:02d}.txt')\n out_file = os.path.join(OUT_SOURCE, f'output{i:02d}.txt')\n sys.stdout = open(in_file, 'w+')\n\n required_input = RINT(5, POWER(10, (i // 2) + 1))\n print(required_input) # Prints x into input file\n for _ in range(required_input):\n print(RINT(1, POWER(10, min(4, max(i // 2, 2)))))\n\n sys.stdout = sys.__stdout__\n\n generate(TEST_LANG, i)\n\n make_lf_ending(in_file)\n make_lf_ending(out_file)\n\n yield\n\n shutil.rmtree(IN_SOURCE)\n shutil.rmtree(OUT_SOURCE)",
"def render_tree():\n graph = TREE.graphviz(node_attr={'shape': 'record', 'height': '.1'})\n graph.body\n graph.render(GRAPHDIR, format='png')\n #graph.view()",
"def build_toc_file(self):\n\n filename = path.join(self.outdir, 'helptoc.xml')\n\n with open(filename, 'w', encoding='utf-8') as f:\n\n f.write(\"\"\"<?xml version='1.0' encoding=\"utf-8\"?>\\n\"\"\")\n f.write(\"\"\"<toc version=\"2.0\">\\n\"\"\")\n f.write('<tocitem target=\"{}\">{}\\n'.format(\n self.config.master_doc + '.html', self.config.project))\n\n toctree = self.env.get_and_resolve_doctree(self.config.master_doc, self,\n prune_toctrees=False)\n visitor = ToCTreeVisitor(toctree)\n matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True)\n for node in toctree.traverse(matcher): # type: addnodes.compact_paragraph\n node.walkabout(visitor)\n\n f.write(visitor.astext() + '\\n')\n\n f.write('</tocitem>\\n')\n f.write('</toc>\\n')",
"def createSyntaxFile():\n try:\n from . import Paths\n from .JSONFile import JSONFile\n except:\n from libs import Paths\n from libs.JSONFile import JSONFile\n\n keywords = getKeywords()\n\n LITERAL1s = []\n KEYWORD1s = []\n KEYWORD2s = []\n KEYWORD3s = []\n\n # set keywords\n for k in keywords:\n for w in k.get_keywords():\n if 'LITERAL1' in w.get_type():\n LITERAL1s.append(w.get_id())\n if 'KEYWORD1' in w.get_type():\n KEYWORD1s.append(w.get_id())\n if 'KEYWORD2' in w.get_type():\n KEYWORD2s.append(w.get_id())\n if 'KEYWORD3' in w.get_type():\n KEYWORD3s.append(w.get_id())\n\n # formating\n LITERAL1s = set(LITERAL1s)\n LITERAL1s = '|'.join(LITERAL1s)\n KEYWORD1s = set(KEYWORD1s)\n KEYWORD1s = '|'.join(KEYWORD1s)\n KEYWORD2s = set(KEYWORD2s)\n KEYWORD2s = '|'.join(KEYWORD2s)\n KEYWORD3s = set(KEYWORD3s)\n KEYWORD3s = '|'.join(KEYWORD3s)\n\n # get sintax preset\n sintax_path = Paths.getSyntaxPath()\n sintax_file = JSONFile(sintax_path)\n sintax = sintax_file.readFile()\n\n # replace words in sintax file\n sintax = sintax.replace('${LITERAL1}', LITERAL1s)\n sintax = sintax.replace('${KEYWORD1}', KEYWORD1s)\n sintax = sintax.replace('${KEYWORD2}', KEYWORD2s)\n sintax = sintax.replace('${KEYWORD3}', KEYWORD3s)\n\n # Save File\n file_path = Paths.getTmLanguage()\n language_file = JSONFile(file_path)\n language_file.writeFile(sintax)",
"def __creatProjectTree(self):\n # get last project\n prj = self.lastProject()\n # creat substractions\n subs = {\n 'PROJECTNAME': prj.name,\n 'PROJECTDESCRIPTION': prj.description\n }\n # targeted directory\n target = path('./projects') / path(\n self.nextDate.format('YYYY_MM', locale='fr_FR'))\n # do the copy !\n cptree('model', target, subs)",
"def att_totex(self,arquivo=None):\n\n if arquivo is None:\n arquivo = str(self.matricula).zfill(6) + '.tex'\n\n with open(arquivo, 'w') as f:\n f.write('\\\\section*{' + str(self.nome_completo) + '\\\\hfill ' + str(self.matricula).zfill(6) + '}\\n')\n\n f.write('\\\\begin{multicols}{2}\\n \\\\scriptsize')\n for s in range(1,3):\n f.write('\\\\begin{center} \\\\begin{tabular}{|c|c|c|c|c|c|c|}\\\\toprule\\n')\n f.write('\\\\multicolumn{7}{|c|}{' + str(s) + '$^\\\\circ$ semestre} \\\\\\\\ \\\\midrule\\n')\n f.write('& S & T & Q & Q & S & S \\\\\\\\ \\\\midrule\\n')\n for i in range(1,17):\n f.write(str(i) );\n for j in range(2,8):\n\n f.write('& ')\n\n for t in self.turmas_a_lecionar:\n if t.semestralidade == s and (j,i) in t.horarios:\n f.write(str(t.codigo) + ' ' + str(t.turma))\n\n f.write('\\\\\\\\ \\\\midrule \\n')\n\n f.write('\\\\end{tabular} \\\\end{center}\\n\\n')\n\n f.write('\\\\end{multicols}\\n')\n f.write('\\\\begin{multicols}{2}\\n')\n f.write('\\\\begin{center} \\\\begin{tabular}{|lm{6cm}|}\\n')\n f.write('\\\\multicolumn{2}{c}{Disciplinas a lecionar} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{1$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 1]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{2$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 2]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\end{tabular} \\\\end{center} \\\\vfill\\\\columnbreak\\n')\n f.write('\\\\end{multicols}\\n')",
"def file_compile(ast, file):\n output = encoder.ModuleFileOutput(file)\n Compiler(output).compile(ast)",
"def prepare_tex_file(fname, to_the_end=False):\n if to_the_end:\n with open(fname, 'a') as f:\n f.write(\"\\n\\n\\end{enumerate}\\n\\end{document}\")\n else:\n with open(fname, 'w') as f:\n f.write(r\"\"\"\\documentclass[11pt]{article}\n\n\\marginparwidth 0.5in\n\\oddsidemargin 0.25in\n\\evensidemargin 0.25in\n\\marginparsep 0.25in\n\\topmargin 0.25in\n\\textwidth 6in \\textheight 8 in\n\n\\newcommand{\\key}[1]{\\textcolor{lightgray}{#1}}\n\n\\newcounter{CQuery}\n\\newcounter{CStatement}\n\\newcounter{CClick}\n\n\\usepackage{amsmath}\n\\usepackage{hyperref}\n\\usepackage{xcolor}\n\n\\begin{document}\n\\author{}\n\\title{Synthetic Data}\n\\maketitle\n\n\\setcounter{CQuery}{1}\n\\setcounter{CStatement}{1}\n\\setcounter{CClick}{1}\n\n\\begin{enumerate}\"\"\")",
"def compile(self, file_path):\n \n # Make sure the directory exists\n # If the path contains the output file name, it will be replaced by the default one\n if os.path.isfile(file_path):\n file_path = os.path.dirname(file_path)\n if not os.path.isdir(file_path):\n e = IOError(2, 'Output path does not exist')\n e.filename = file_path\n raise e\n \n subgraphs = [] # references to Himesis sub-graphs of this graph\n \n #with open(file, 'w') as file:\n \n file = open(os.path.join(file_path, self.name + '.py'), 'w')\n if True:\n # Save the nodes in increasing order of the occurrence of its meta-model:\n # First build a dictionary {meta-model element: number of nodes of that type}\n meta_models = {}\n if self.vcount() > 0:\n tmp = self.vs[Himesis.Constants.META_MODEL]\n for mm in tmp:\n if mm not in meta_models:\n meta_models[mm] = tmp.count(mm)\n del tmp\n # Then, sort the node indices such that the node that has the least frequent type is in the beginning\n # We save the list as an object attribute, because it might be used by a sub-class' compiler\n self.ordered_nodes = sorted(self.node_iter(),\n key=lambda v: meta_models[self.vs[v][Himesis.Constants.META_MODEL]])\n \n file.write('''\n\nfrom core.himesis import Himesis''')\n if self.import_name != 'Himesis':\n file.write(''', %s''' % self.import_name)\n \n init_params = ''\n init_params_values = ''\n if len(self.init_params) > 0:\n init_params = reduce(lambda p1, p2: p1 + p2,\n map(lambda p: ', %s' % p,\n self.init_params))\n init_params_values = reduce(lambda p1, p2: p1 + p2,\n map(lambda p: ', %s=%s' % (p, p),\n self.init_params))\n file.write('''\nimport cPickle as pickle\nfrom uuid import UUID\n\nclass %s(%s):\n def __init__(self%s):\n \"\"\"\n Creates the himesis graph representing the AToM3 model %s.\n \"\"\"\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(%s, self).__init__(name='%s', num_nodes=%d, edges=[]%s)\n''' % (self.name,\n self.import_name,\n init_params,\n self.name,\n self.name,\n self.name,\n self.vcount(),\n init_params_values))\n \n # Add the edges\n file.write(''' \n # Add the edges''')\n edge_list = []\n # Determines if it's possible to provide the edges as a single list\n # Because it's faster than adding each edge individually.\n # However, due to Python limitations, the edge list is specified one 1 line\n # So for bigger graphs, we need to add edges in groups.\n if self.ecount() < Himesis.EDGE_LIST_THRESHOLD:\n edge_list = self.get_edgelist()\n # Order the edge list\n for i in range(len(edge_list)):\n edge_list[i] = [self.ordered_nodes.index(edge_list[i][0]),\n self.ordered_nodes.index(edge_list[i][1])]\n file.write('''\n self.add_edges(%s)''' % str(edge_list))\n else:\n pitch = 0\n while pitch < self.ecount():\n edgeId = pitch\n edge_list = []\n while edgeId - pitch < Himesis.EDGE_LIST_THRESHOLD and edgeId < self.ecount():\n # Using the new order of nodes\n edge = (self.ordered_nodes.index(self.es[edgeId].source),\n self.ordered_nodes.index(self.es[edgeId].target))\n edge_list.append(edge)\n edgeId += 1\n file.write('''\n self.add_edges(%s)''' % str(edge_list))\n pitch += Himesis.EDGE_LIST_THRESHOLD\n file.write('''\n ''')\n \n # Set the graph attributes\n file.write('''\n # Set the graph attributes''')\n for attr in self.attributes():\n value = self[attr]\n access = 'self[\"%s\"]' % attr\n file.write(self.__compile_attribute(access, value))\n if isinstance(value, Himesis):\n subgraphs.append(value)\n \n # Set node attributes\n file.write('''\n \n # Set the node attributes''')\n # Compile the node attributes, always in the reight order\n for new_node_index, old_node_index in enumerate(self.ordered_nodes):\n for attr in self.vs[old_node_index].attribute_names():\n if self.vs[old_node_index][attr] is not None:\n value = self.vs[old_node_index][attr]\n access = 'self.vs[%d][\"%s\"]' % (new_node_index, attr)\n file.write(self.__compile_attribute(access, value))\n if isinstance(value, Himesis):\n subgraphs.append(value)\n \n file.write('\\n')\n self._compile_additional_info(file)\n file.write('\\n')\n \n file.close()\n \n for sg in subgraphs:\n sg.compile(file_path)\n \n return file.name",
"def generate_tex_file(self, filename):\n write_all(filename, self.unpack_content(self.preamble + self.body))",
"def makeCss(infiles, outfile):\n\n css = \"\"\n\n for f in infiles.split():\n base = os.path.splitext(os.path.basename(f))[0]\n base = os.path.basename(base)\n font = TTFont(f)\n css += genCSS(font, base)\n font.close()\n\n out = open(outfile, \"w\")\n out.write(css)\n out.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assume emb1.dim == emb2.dim
|
def __init__(self, emb1, emb2, normalize=False):
self.dim = emb1.dim
vocab1 = emb1.wi.viewkeys()
vocab2 = emb2.wi.viewkeys()
joint_vocab = list(vocab1 & vocab2)
only_vocab1 = list(vocab1 - vocab2)
only_vocab2 = list(vocab2 - vocab1)
self.iw = joint_vocab + only_vocab1 + only_vocab2
self.wi = dict([(w, i) for i, w in enumerate(self.iw)])
m_joint = emb1.m[[emb1.wi[w] for w in joint_vocab]] \
+ emb2.m[[emb2.wi[w] for w in joint_vocab]]
m_only1 = emb1.m[[emb1.wi[w] for w in only_vocab1]]
m_only2 = emb2.m[[emb2.wi[w] for w in only_vocab2]]
self.m = np.vstack([m_joint, m_only1, m_only2])
if normalize: self.normalize()
|
[
"def __init__(self, emb1, emb2, normalize=False):\r\n self.dim = emb1.dim\r\n\r\n vocab1 = emb1.wi.viewkeys()\r\n vocab2 = emb2.wi.viewkeys()\r\n joint_vocab = list(vocab1 & vocab2)\r\n only_vocab1 = list(vocab1 - vocab2)\r\n only_vocab2 = list(vocab2 - vocab1)\r\n self.iw = joint_vocab + only_vocab1 + only_vocab2\r\n self.wi = dict([(w, i) for i, w in enumerate(self.iw)])\r\n\r\n m_joint = emb1.m[[emb1.wi[w] for w in joint_vocab]] + \\\r\n emb2.m[[emb2.wi[w] for w in joint_vocab]]\r\n m_only1 = emb1.m[[emb1.wi[w] for w in only_vocab1]]\r\n m_only2 = emb2.m[[emb2.wi[w] for w in only_vocab2]]\r\n self.m = np.vstack([m_joint, m_only1, m_only2])\r\n\r\n if normalize:\r\n self.normalize()",
"def _equal_embeddings(a: Representation, b: Representation) -> bool:\n return (a(indices=None) == b(indices=None)).all()",
"def forward(self, emb1, emb2, batch_processing=False):\n\n # make sure that batch processing works, even for single data points\n emb1 = emb1.unsqueeze(0) if len(emb1.shape) < 3 else emb1\n emb2 = emb2.unsqueeze(0) if len(emb2.shape) < 3 else emb2\n\n x_len = emb1.size(1) # (batch, x_len, hidden_size)\n y_len = emb2.size(1) # (batch, y_len, hidden_size)\n\n xy = []\n for i in range(x_len):\n xi = emb1.select(1, i).unsqueeze(1) # (batch, 1, hidden_size)\n yi = self.att_weight_cq(emb2 * xi).squeeze(-1) # (batch, y_len, 1) --> (batch, y_len)\n xy.append(yi) # (x_len, batch, y_len)\n xy = torch.stack(xy, dim=-1) # (batch, y_len, x_len)\n\n # (batch, y_len, x_len)\n s = self.att_weight_c(emb2).expand(-1, -1, x_len) + \\\n self.att_weight_q(emb1).permute(0, 2, 1).expand(-1, y_len, -1) + \\\n xy\n\n a = nnF.softmax(s, dim=2) # (batch, y_len, x_len)\n\n # (batch, y_len, x_len) * (batch, x_len, hidden_size) -> (batch, y_len, hidden_size)\n y2x_att = torch.bmm(a, emb1)\n\n b = nnF.softmax(torch.max(s, dim=2)[0], dim=1).unsqueeze(1) # (batch, 1, y_len)\n\n # (batch, 1, y_len) * (batch, y_len, hidden_size) -> (batch, hidden_size)\n x2y_att = torch.bmm(b, emb2).squeeze(1)\n\n # (batch, y_len, hidden_size) (tiled)\n x2y_att = x2y_att.unsqueeze(1).expand(-1, y_len, -1)\n\n # (batch, y_len, hidden_size * 4)\n z = torch.cat([emb2, y2x_att, emb2 * y2x_att, emb2 * x2y_att], dim=-1)\n z = self.reduction_layer(z) # (batch, y_len, output_size)\n\n return z.squeeze(0) if not batch_processing else z # (y_len, output_size) if no batch_processing",
"def compare_evecs(self):\n\n try:\n input2 = file(self._input2_file, \"r\")\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n\n samples_overlap_list = [] # val=sample\n sample_dim = {} # key=sample, val=listofdimensions\n\n # read matrix 2\n header_pattern = re.compile(\"^.*#eigvals.*$\")\n line = input2.readline().replace(\"\\n\",\"\")\n if not header_pattern.search(line):\n print >> sys.stderr, \"error: wrong header in file \\\"\" + self._input_file + \"\\\"\" \n sys.exit(1)\n\n line = input2.readline().replace(\"\\n\",\"\")\n while line:\n\n list = re.split(\"\\s+\",line)\n \n # delete first element if empty\n if list[0] == \"\":\n del list[0]\n \n sample_id = list[0]\n if self._samples_dict.has_key(sample_id):\n sample_dim[sample_id] = list[1:-1] \n samples_overlap_list.append(sample_id)\n\n line = input2.readline().replace(\"\\n\",\"\")\n\n input2.close() \n \n # ------------------------------ #\n # - dimensions of input file 1 - #\n # ------------------------------ #\n\n # list with self._numofdim lists in it\n dimensions_input1 = []\n dimensions_input1_samples = []\n for i in xrange(self._numofdim):\n dimensions_input1.append([])\n \n for sample_id in samples_overlap_list:\n\n # if outlier then ignore outlier\n if self._outlier.has_key(sample_id):\n pass\n else:\n # check for same number of dimensions in first input file\n if self._numofdim != len(self._sample_dim[sample_id]):\n print >> sys.stderr, \"error: different number of dimensions in file \\\"\" +\\\n self._input1_file + \"\\\"\" \n print >> sys.stderr, str(self._numofdim) +\" != \"+\\\n str(len(self._sample_dim[sample_id]))\n sys.exit(1)\n \n # fill list with self._numofdim lists\n for i in xrange(len(self._sample_dim[sample_id])):\n dimensions_input1[i].append(float(self._sample_dim[sample_id][i]))\n dimensions_input1_samples.append(sample_id)\n \n # ------------------------------ #\n # - dimensions of input file 2 - #\n # ------------------------------ #\n\n # list with self._numofdim lists in it\n dimensions_input2 = []\n dimensions_input2_samples = []\n for i in xrange(self._numofdim):\n dimensions_input2.append([])\n \n for sample_id in samples_overlap_list:\n\n # if outlier then ignore outlier\n if self._outlier.has_key(sample_id):\n pass\n else:\n # check for same number of dimensions in first input file\n if self._numofdim != len(sample_dim[sample_id]):\n print >> sys.stderr, \"error: different number of dimensions in file \\\"\" +\\\n self._input2_file + \"\\\"\" \n print >> sys.stderr, str(self._numofdim) +\" != \"+\\\n str(len(self._sample_dim[sample_id]))\n sys.exit(1)\n \n # fill list with self._numofdim lists\n for i in xrange(len(sample_dim[sample_id])):\n dimensions_input2[i].append(float(sample_dim[sample_id][i]))\n dimensions_input2_samples.append(sample_id)\n \n # ------------------------------------------------------------------ #\n # - calc correlation pearson for each dimension in file1 and file2 - #\n # ------------------------------------------------------------------ #\n \n assert(dimensions_input1_samples == dimensions_input2_samples)\n dimensions_correlation = []\n assert(len(dimensions_input1) == len(dimensions_input2))\n\n # write header\n for i in xrange(len(dimensions_input1)):\n if i == 0:\n out.writelines(\"dim\" + str(i+1))\n else:\n out.writelines(\"\\tdim\" + str(i+1))\n out.writelines(\"\\n\")\n\n # write body\n for i in xrange(len(dimensions_input1)):\n assert(len(dimensions_input1[i]) == len(dimensions_input2[i]))\n #print dimensions_input1[i]\n #print dimensions_input2[i]\n dimensions_correlation.append(\\\n statistics.correlation(\\\n dimensions_input1[i],\\\n dimensions_input2[i],\\\n method=\"Pearson\"))\n if i == 0:\n out.writelines(str(dimensions_correlation[i]))\n else:\n out.writelines(\"\\t\"+ str(dimensions_correlation[i]))\n out.writelines(\"\\n\")\n out.close()",
"def check_dims(matIn1, matIn2):\n\n\tm,n = matIn1.shape\n\tr,k = matIn2.shape\n\n\tif r == n:\n\t\treturn True\n\n\telse:\n\t\treturn False",
"def test0_try_scale2(self):\n\t\tfor im in self.imlist:\n\t\t\temim = embed_data(im, direction=1, scale=2)\n\t\t\tememim = embed_data(emim, direction=-1, scale=2)\n\t\t\t#print \"test0_try_scale2(): im = %s, em = %s\" % (str(im.shape), str(emim.shape))",
"def test_is_not_mub_dim_2():\n e_0, e_1 = basis(2, 0), basis(2, 1)\n mub_1 = [e_0, e_1]\n mub_2 = [1 / np.sqrt(2) * (e_0 + e_1), e_1]\n mub_3 = [1 / np.sqrt(2) * (e_0 + 1j * e_1), e_0]\n mubs = [mub_1, mub_2, mub_3]\n np.testing.assert_equal(is_mub(mubs), False)",
"def __mul__(self, other):\r\n if self.size == (1, 1):\r\n return other\r\n elif other.size == (1, 1):\r\n return self\r\n elif self.cols == other.rows:\r\n return Shape(self.rows, other.cols)\r\n else:\r\n raise ValueError(\"Incompatible dimensions %s %s\" % (self, other))",
"def is_compatible(first, second):\n\n first_set = set((SharedBufferArea.OFM, SharedBufferArea.Accumulators))\n second_set = set((SharedBufferArea.IFM, SharedBufferArea.Weights))\n\n first_mask = first.generate_used_mask(first_set)\n second_mask = second.generate_used_mask(second_set)\n\n if np.sum(first_mask & second_mask):\n # overlap\n return False\n\n return True",
"def equals(first_img, second_img): \n\n if first_img is None or second_img is None: \n return False \n diff = ImageChops.difference(first_img, second_img)\n if diff.getbbox() != None: \n return False\n else: \n diff = None\n return True",
"def test_is_mub_dim_2():\n e_0, e_1 = basis(2, 0), basis(2, 1)\n mub_1 = [e_0, e_1]\n mub_2 = [1 / np.sqrt(2) * (e_0 + e_1), 1 / np.sqrt(2) * (e_0 - e_1)]\n mub_3 = [1 / np.sqrt(2) * (e_0 + 1j * e_1), 1 / np.sqrt(2) * (e_0 - 1j * e_1)]\n mubs = [mub_1, mub_2, mub_3]\n np.testing.assert_equal(is_mub(mubs), True)",
"def check_same_dim(shape_x, shape_y):\n shape_x_len = len(shape_x)\n for k in range(shape_x_len):\n if shape_x[k] != shape_y[k]:\n return False\n\n return True",
"def common_dimensions(v1, v2):\n list1, list2 = [], []\n for i in range(0, len(v1)):\n if v1[i] != 0 and v2[i] != 0:\n list1.append(v1[i])\n list2.append(v2[i])\n # print 'INDEX SAME:',i\n return list1, list2",
"def checkspacematch(hdr1, hdr2):\n dimmatch = checkspaceresmatch(hdr1[\"pixdim\"], hdr2[\"pixdim\"])\n resmatch = checkspacedimmatch(hdr1[\"dim\"], hdr2[\"dim\"])\n return dimmatch and resmatch",
"def assert_equal(A: np.ndarray, B: np.ndarray):\n\n if A.ndim != B.ndim:\n raise ValueError(\"A has different dimension of B.\")\n\n if A.shape != B.shape:\n raise ValueError(\"A has different shape of B.\")\n\n if not np.allclose(A, B):\n raise ValueError(\"A is not equal to B.\")",
"def areDomainsIdentical(var1, var2):\n #check they have the same number of axis\n if len(var1.getAxisList()) != len(var2.getAxisList()):\n return False\n\n for i in range(len(var1.getAxisList())):\n ax1 = var1.getAxis(i)\n ax2 = var2.getAxis(i)\n #print ax1, ax2\n if axis_utils.areAxesIdentical(ax1, ax2) == False:\n return False\n\n return True",
"def _check_dims_parallel(d1: Dimensions, d2: Dimensions) -> bool:\n return vec.multiply(d1, vec.dot(d2, d2)) == vec.multiply(d2, vec.dot(d1, d2))",
"def testsame(self):\n im1 = edfimage()\n im1.read(self.fn_edf)\n im2 = adscimage()\n im2.read(self.fn_adsc)\n diff = (im1.data.astype(\"float32\") - im2.data.astype(\"float32\"))\n logger.debug(\"type: %s %s shape %s %s \" % (im1.data.dtype, im2.data.dtype, im1.data.shape, im2.data.shape))\n logger.debug(\"im1 min %s %s max %s %s \" % (im1.data.min(), im2.data.min(), im1.data.max(), im2.data.max()))\n logger.debug(\"delta min %s max %s mean %s\" % (diff.min(), diff.max(), diff.mean()))\n self.assertEqual(abs(diff).max(), 0.0, \"asdc data == edf data\")",
"def __eq__(self, other):\n if other is self:\n return True\n if not isinstance(other, Offset2D):\n return False\n if other.reference != self.reference:\n return False\n return super().__eq__(other)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
On first time log in check if there is a profile and if there are data entries from a person with name user in the profile.
|
def check_profile(sender, user: str, request, **kwargs):
user_obj = User.objects.get(username=user)
if Profile.objects.filter(user__username=user).exists(): # if user has a profile
user_profile = Profile.objects.get(user__username=user)
if user_profile.checkedAssociation: # Profile should be filled, so there is nothing to do.
pass
elif user_profile.metacatalogPerson_id: # no Association for data checked, so check if there is data now.
# TODO: implement this! __assign_data()
__assign_data(user_obj, user_profile)
pass
else: # only user in profile, so assign_person first
__assign_data(user_obj, user_profile)
# print('\033[91mYour user needs first and last name to associate user with data.\033[0m')
else: # there is no profile at all for this user
user_profile = __assign_person(user)
__assign_data(user_obj, user_profile)
|
[
"def verify_profile_availability(self, profile):\n pass",
"def check_profile_exists(cls, user_id):\n profile = cls.c.execute(\n select([cls.table]).where(cls.table.c.user_id == user_id)\n ).fetchone()\n\n return profile is not None",
"def test_profile_is_created_automatically(self):\n \n user = User.objects.create_user('duffman', 'duffman@test.com', 'pass')\n user.save()\n self.assertTrue(user.get_profile())",
"def step04():\n try:\n if PersonalProfile.exists(\"zhixian\"):\n logging.info(\"Record 'zhixian' exists\")\n return\n\n PersonalProfile\n new_ent = PersonalProfile()\n new_ent.email = \"zhixian_profile@hotmail.com\"\n new_ent.login = [ndb.Key(Login, \"zhixian\")]\n new_ent_key = new_ent.put()\n except Exception as ex:\n logging.error(ex)",
"def check_profile(profile=None):\n from conf.profiles import getAllProfilesObjects\n from core.exceptions import ProfileDoesNotExist\n\n if not profile:\n return False\n\n profile_available = []\n [profile_available.append(p.name) for p in getAllProfilesObjects()]\n try:\n for p in profile:\n if p not in profile_available: # Check profile exist\n raise ProfileDoesNotExist(\"Profile %s doesnt exist !\" % profile)\n else:\n return True\n except ProfileDoesNotExist as pne:\n print pne\n exit(pne.code)",
"def validate_profile_exists(self):\n\n if self.args.profile_name not in self.profiles:\n self.handle_error('Could not find profile \"{}\"'.format(self.args.profile_name))",
"def has_profile_loaded(self):\n if self.browsermodel:\n return True\n else:\n return False",
"def _populate_and_save_user_profile(self):\n try:\n profile = models.UserProfile.objects.get(user=self._user)\n self._populate_profile_fields(profile)\n\n if len(ldap_settings.AUTH_LDAP_USER_ATTR_MAP) > 0:\n profile = self._populate_profile_fields(profile)\n profile.save()\n except (SiteProfileNotAvailable, ObjectDoesNotExist), e:\n profile = models.UserProfile(user=self._user,\n role=models.UserProfile.ROLE_USER,\n ldap_user=True)\n \n\n profile = self._populate_profile_fields(profile)\n\n profile.save()",
"def testUserWithNoProfileAccessGranted(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n access_checker = access.HasNoProfileAccessChecker()\n access_checker.checkAccess(self.data, None)",
"def at_first_login(self):\r\n pass",
"def search_profile(cls, param):\n\n for profile in cls.profile_list:\n while (profile.profile_name == param) or (profile.profile_username == param) or (profile.profile_email == param):\n return profile",
"def test_user_has_profile_attached(self):\n user = self.users[0]\n self.assertTrue(hasattr(user, \"profile\"))\n self.assertIsInstance(user.profile, UserProfile)",
"def filterorgbyprofileuser(org_info, profileID):\n org_members = list(filter(lambda x: x['name'] == profileID, [\n org_info['owner']] +\n org_info['admins'] +\n org_info['members']))\n if len(org_members) > 0:\n return True\n else:\n return False",
"def test_user_has_profile_is_hirable_by_default(self):\n this_user = self.users[0]\n this_user.save()\n self.assertTrue(self.users[0].profile.hireable is True)",
"async def no_profile(ctx):\n await ctx.send(\n f\"QA Tester profile does not exist within PrismarineCo. Ltd.'s database. To create a profile, use `{ctx.prefix}profile init`.'\"\n )",
"def check_user_data(self):\n if not os.path.exists(self.user_data):\n print(\"User data does not exist\")\n self.get_default_location()\n self.get_default_mode()\n self.get_default_time_bw_events()\n else:\n with open(self.user_data) as json_file:\n data = json.load(json_file)\n self.mode = data['mode']\n self.time_bw_event = int(data['time_bw_event'])\n self.default_location = self.get_lat_log(data['add'])\n print(\"Default mode of transport:\", self.mode)\n print(\"Default Location: \", data['add'].replace(\"+\", \" \"))\n print(\"Max time in mins between 2 events to go directly from one event to another:\",\n str(int(self.time_bw_event/60)))\n if self.default_location == \"\":\n print(\"error reading default location\")\n self.get_default_location()",
"def test_superuser_can_see_any_profile(self):\n SUPERUSER = 0\n self.client.login(\n username=self.users[SUPERUSER].get('username'),\n password=self.users[SUPERUSER].get('password')\n )\n for user in User.objects.all():\n response = self.client.get('/1.0/users/{0}/'.format(user.pk))\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def load_profile_page(l):\n username = login(l)\n if not username:\n l.interrupt()\n simulate_loading_profile_page(l)\n logout(l)\n release_user(username)\n l.interrupt()",
"def test_find_profile(self):\n self.profile.save_profile()\n profile2 = Profile(profile_photo ='test_profile_photo2',bio = 'test_bio2')\n profile2.save_profile()\n search_profile = Profile.find_profile('test_bio2')\n self.assertFalse(len(search_profile)==1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that lookups can be performed on data once stored in the database.
|
def testLookups(self):
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
self.assertEquals(value, TestingModel.objects.get(pickle_field__exact=value).pickle_field)
model_test.delete()
|
[
"def test_lookup(self):\n\n # TEST 1: test with abbrevation and use_cache True\n self.assertEqual(states.lookup(val='KA', field='abbr'), states.KA)\n\n # TEST 2: test with full name and use_cache = True\n self.assertEqual(states.lookup(val='manipur', field='name'), states.MN)\n\n # TEST 3: test with abbrevation without using cache\n self.assertEqual(states.lookup(val='HR', field='abbr', use_cache=False), states.HR)\n\n # TEST 4: test with name without using cache\n self.assertEqual(states.lookup(val='delhi', field='name', use_cache=False), states.DL)\n\n # TEST 5: test with faulty name so that matching fail\n with self.assertRaises(ValueError):\n states.lookup(val='XY', field='abbr')",
"def test_lookup_cache(self):\n # At this point, a lookup for a ContentType should hit the DB\n with self.assertNumQueries(1):\n ContentType.objects.get_for_model(ContentType)\n\n # A second hit, though, won't hit the DB, nor will a lookup by ID\n # or natural key\n with self.assertNumQueries(0):\n ct = ContentType.objects.get_for_model(ContentType)\n with self.assertNumQueries(0):\n ContentType.objects.get_for_id(ct.id)\n with self.assertNumQueries(0):\n ContentType.objects.get_by_natural_key(\"contenttypes\", \"contenttype\")\n\n # Once we clear the cache, another lookup will again hit the DB\n ContentType.objects.clear_cache()\n with self.assertNumQueries(1):\n ContentType.objects.get_for_model(ContentType)\n\n # The same should happen with a lookup by natural key\n ContentType.objects.clear_cache()\n with self.assertNumQueries(1):\n ContentType.objects.get_by_natural_key(\"contenttypes\", \"contenttype\")\n # And a second hit shouldn't hit the DB\n with self.assertNumQueries(0):\n ContentType.objects.get_by_natural_key(\"contenttypes\", \"contenttype\")",
"def test_exists(self):\n with database() as db:\n db.query('INSERT INTO test_data (variable) VALUES (1), (2), (3), (4), (5)')\n self.assertTrue(db.exists('test_data'))\n self.assertTrue(db.exists('test_data', variable=3))\n self.assertFalse(db.exists('test_data', variable=6))",
"def test_record_loading(self):\n test_record = self.db.lookup(accession = \"X55053\")\n assert test_record.name == \"ATCOR66M\"\n assert test_record.id == \"X55053.1\", test_record.id\n assert test_record.description == \"A.thaliana cor6.6 mRNA.\"\n assert isinstance(test_record.seq.alphabet, Alphabet.DNAAlphabet)\n assert test_record.seq[:10].tostring() == 'AACAAAACAC'\n\n test_record = self.db.lookup(accession = \"X62281\")\n assert test_record.name == \"ATKIN2\"\n assert test_record.id == \"X62281.1\", test_record.id\n assert test_record.description == \"A.thaliana kin2 gene.\"\n assert isinstance(test_record.seq.alphabet, Alphabet.DNAAlphabet)\n assert test_record.seq[:10].tostring() == 'ATTTGGCCTA'",
"def test_retrieve_existing_weather(self):\n self.assertEqual(self.EXPECTED_WEATHER_COUNT, Weather.objects.count())\n self.assertEqual('San Jose', Weather.objects.retrieve_weather_object(city='San Jose', state='CA').city)\n self.assertEqual(self.EXPECTED_WEATHER_COUNT, Weather.objects.count())",
"def test_data_source_postgre_sqls_find_one_get(self):\n pass",
"def test_data_persistence(self):\n tag = Tag.objects.get(tag_name=\"Novel\")\n self.assertEqual(\n Tag.objects.all().count(),\n 1\n )\n self.assertEqual(\n tag.tag_name,\n \"Novel\"\n )",
"def test_can_build_lookup_table_and_use_it_for_known_values():\n\n # John prepares data to be looked up\n ts = array([0.1, 1.1, 2.1])\n x1 = array([10.2, -1.4, 4.1])\n x2 = array([0.1, 0.01, 0.4])\n\n # John calculates \"trajectory\" for his data\n table = LookupTable({\n 'name': 'lookup',\n 'tdata': ts,\n 'ics': dict(zip(['x1', 'x2'], [x1, x2])),\n })\n\n traj = table.compute('ltable')\n\n # Now John can retrieve his values from table\n for i, t in enumerate(ts):\n assert traj(t) == Point({'coordnames': ['x1', 'x2'], 'coordarray': [x1[i], x2[i]]})\n assert traj(t, 'x1') == Point({'x1': x1[i]})\n assert traj(t, 'x2') == Point({'x2': x2[i]})\n\n # John can get only those values, that he has previously inserted\n with pytest.raises(ValueError):\n traj(0.4)\n with pytest.raises(ValueError):\n traj(0.4, 'x1')\n with pytest.raises(ValueError):\n traj(0.4, 'x2')",
"def test_test_query(self):\n pass",
"def test_retrieve_non_existing_weather(self):\n self.assertEqual(self.EXPECTED_WEATHER_COUNT, Weather.objects.count())\n self.assertEqual('San Diego', Weather.objects.retrieve_weather_object(city='San Diego', state='CA').city)\n self.assertEqual(self.EXPECTED_WEATHER_COUNT+1, Weather.objects.count())",
"def test_bulk_retrieve_existing_weather_some_exist(self):\n self.assertEqual(3, Weather.objects.count())\n weathers = Weather.objects.retrieve_weather_objects([('Atlanta', 'GA'), ('Boston', 'MA'), ('Phoenix', 'AZ')])\n self.assertEqual('Atlanta', weathers[0].city)\n self.assertEqual('Boston', weathers[1].city)\n self.assertEqual('Phoenix', weathers[2].city)\n self.assertEqual('GA', weathers[0].state)\n self.assertEqual('MA', weathers[1].state)\n self.assertEqual('AZ', weathers[2].state)\n self.assertEqual(5, Weather.objects.count())",
"def initialize_lookups():\n\n if this.lookups_loaded:\n return\n\n # row factory - BEWARE - do not use con.row_factory as !\n # db_functions.conn.row_factory = lambda cursor, row: row[0] # important, this has side effects\n # Fill the lookups:\n\n if sqlite_db_functions.conn is None:\n conn = sqlite_db_functions.open_db()\n if conn is None:\n return\n\n # COUNT IDs and words\n c = sqlite_db_functions.conn.cursor()\n data = c.execute('SELECT Value FROM requiredFields').fetchall()\n this.required_fields = {val[0] for val in data}\n\n c = sqlite_db_functions.conn.cursor()\n data = c.execute('SELECT Value FROM recommendedFields').fetchall()\n this.recommended_fields = {val[0] for val in data}\n\n # SAMPLE SIZE IDs and words\n c = sqlite_db_functions.conn.cursor()\n data = c.execute('SELECT Value FROM basisOfRecordValues').fetchall()\n this.values_basis_of_record = {val[0] for val in data}\n\n this.vocab = {value.lower() for value in this.values_basis_of_record}\n this.fields_to_compare = {value.lower() for value in this.required_fields}\n\n this.lookups_loaded = True",
"def test_live_db():\n # Details for the database have been provided in the instructions\n # test that the output is as expected for the live system\n\n test_query = 'brc'\n expected_suggestions = ['BRCA1', 'BRCA2', 'BRCC3', 'BRCC3P1']\n\n results = get_suggestions(test_query)\n\n # Ensure all expected suggestions are present\n # results may not always be the same if database is changed\n for suggestion in expected_suggestions:\n assert suggestion in results\n\n for result in results:\n assert test_query.lower() in result.lower()",
"def test_query_detail(self):\n pass",
"def lookup(self):",
"def test_computed_keys(self):\n\n class TestDataProvider(BaseDataProvider):\n def gen_load_key(self, key):\n # Use profession to load data from the backend.\n return key['profession']\n\n def gen_cache_key(self, key):\n # Cache results by last name.\n return key['lastName']\n\n def fetch_from_backend(self, load_keys):\n # This roughly simulates a scenario where we have to hit a\n # different backend depending on the load key.\n # For example, the load key could be the name of the R function\n # to invoke, the name of the database or collection to execute\n # the query against, etc.\n for lk in load_keys:\n method = getattr(self, 'get_{job}s'.format(job=lk), None)\n if method:\n for key, data in method().items():\n yield key, data\n\n def gen_empty_result(self):\n return {'firstName': '*unknown*'}\n\n @staticmethod\n def get_historians():\n return {\n 'Brody': {'firstName': 'Marcus'},\n 'Jones': {'firstName': 'Henry'},\n }\n\n @staticmethod\n def get_nazi_stooges():\n return {\n 'Belloq': {'firstName': 'René'},\n 'Donovan': {'firstName': 'Walter'},\n }\n\n users = [\n {'lastName': 'Jones', 'profession': 'historian'},\n {'lastName': 'Brody', 'profession': 'historian'},\n {'lastName': 'Belloq', 'profession': 'nazi_stooge'},\n {'lastName': 'Donovan', 'profession': 'nazi_stooge'},\n\n # The backend won't return any data for this user.\n {'lastName': 'Ravenwood', 'profession': 'adventurer'},\n ]\n\n data_provider = TestDataProvider()\n data_provider.register(users)\n\n for user_data in users:\n user_data.update(data_provider[user_data])\n\n self.assertDictEqual(\n users[0],\n\n {\n 'firstName': 'Henry',\n 'lastName': 'Jones',\n 'profession': 'historian',\n },\n )\n\n self.assertDictEqual(\n users[1],\n\n {\n 'firstName': 'Marcus',\n 'lastName': 'Brody',\n 'profession': 'historian',\n },\n )\n\n self.assertDictEqual(\n users[2],\n\n {\n 'firstName': 'René',\n 'lastName': 'Belloq',\n 'profession': 'nazi_stooge',\n },\n )\n\n self.assertDictEqual(\n users[3],\n\n {\n 'firstName': 'Walter',\n 'lastName': 'Donovan',\n 'profession': 'nazi_stooge',\n },\n )\n\n self.assertDictEqual(\n users[4],\n\n {\n # See :py:meth:`TestDataProvider.gen_empty_result` above.\n 'firstName': '*unknown*',\n\n 'lastName': 'Ravenwood',\n 'profession': 'adventurer',\n },\n )",
"def test_db_map(self):\n class WildDBNames(Model):\n\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\n content = columns.Text(db_field='words_and_whatnot')\n numbers = columns.Integer(db_field='integers_etc')\n\n db_map = WildDBNames._db_map\n self.assertEquals(db_map['words_and_whatnot'], 'content')\n self.assertEquals(db_map['integers_etc'], 'numbers')",
"def test_db_connections(self):\n generate_basic_db()\n self.assertEqual(Study.objects.get(pk=1).genotype, Genotype.objects.get(pk=1)) # Test study-genotype relationship",
"def test_all_database_methods(self):\n # This is based on state and not well-suited to unit tests\n model = FairModel('model')\n model.bulk_import_data(self._BULK_IMPORT_DATA)\n # Check uncalcualted models throw errors (metamodel always calc'd)\n self.assertRaises(FairException, self._db.store, model)\n model = FairModel('model')\n model.bulk_import_data(self._BULK_IMPORT_DATA)\n model.calculate_all()\n # All argument and model types\n metamodel = FairMetaModel('meta', models=[model, model])\n metamodel.calculate_all()\n # Things to fetch from db\n model_name = model.get_name()\n model_uuid = model.get_uuid()\n meta_model_name = metamodel.get_name()\n meta_model_uuid = metamodel.get_uuid()\n load_strings = [\n model_name, \n model_uuid, \n meta_model_name, \n meta_model_uuid\n ]\n # Store\n for m in [model, metamodel]:\n self._db.store(m)\n # For load via all stirngs\n for string in load_strings:\n _ = self._db.load(string)\n # Confirm query is working\n result = self._db.query(\n self._QUERY_STRING,\n (model_uuid,)\n )\n self.assertTrue(len(result) == 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that values can be serialized to a fixture.
|
def testFixture(self):
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
dumpdata = Dumpdata()
json = dumpdata.handle('mbdb')
pass
|
[
"def test_serializer_field_values(self):\n pass",
"def test_serialize_a_pet(self):\n pet = PetFactory()\n data = pet.serialize()\n logging.debug(\"Pet data: %s\", data)\n self.assertNotEqual(data, None)\n self.assertNotIn(\"_id\", data)\n self.assertEqual(data[\"name\"], pet.name)\n self.assertEqual(data[\"category\"], pet.category)\n self.assertEqual(data[\"available\"], pet.available)\n self.assertEqual(data[\"gender\"], pet.gender.name)\n self.assertEqual(data[\"birthday\"], pet.birthday.isoformat())",
"def test_serialize_a_category(self):\n category = Category(name=\"Dog\")\n data = category.serialize()\n self.assertNotEqual(data, None)\n self.assertIn('id', data)\n self.assertEqual(data['id'], None)\n self.assertIn('name', data)\n self.assertEqual(data['name'], \"Dog\")",
"def test_Serialize(self):\n #Confirm its number than suit\n test_card = Card(3, 'Hearts')\n self.assertEqual(test_card.serialize(), (3, 'Hearts'))",
"def test_is_serialised(self, serialised):\n\t\tself.assertTrue(integer_module.is_serialised(serialised), \"This must be identified as a serialised integer.\")",
"def test_standardise_data(self):\n\t\tassert False, \"Write Test\"",
"def test_data_formats():",
"def test_deserialize_with_bad_available(self):\n data = PetFactory().serialize()\n data[\"available\"] = \"foo\"\n pet = Pet()\n self.assertRaises(DataValidationError, pet.deserialize, data)",
"def encode_test(self, obj, expected):\n self.assertEqual(json.dumps(obj, sort_keys=True, cls=policy.ConfigEncoder), expected)",
"def test_with_data_fixture(one_fixture):\n print \"\\nRunning test_with_data_fixture: {}\".format(one_fixture)\n assert one_fixture == 1",
"def test_serialization(self):\n dags = collect_dags()\n serialized_dags = {}\n for v in dags.values():\n dag = SerializedDAG.to_dict(v)\n SerializedDAG.validate_schema(dag)\n serialized_dags[v.dag_id] = dag\n\n # Compares with the ground truth of JSON string.\n actual, expected = self.prepare_ser_dags_for_comparison(\n actual=serialized_dags[\"simple_dag\"],\n expected=serialized_simple_dag_ground_truth,\n )\n assert actual == expected",
"def test_userserializer_field_content(self):\n data = self.userserializer.data\n self.assertEqual(data['name'], self.user.name)\n self.assertEqual(data['uuid'], str(self.user.uuid))",
"def test_get_serializable_fields(self):\n composer = Composer()\n fields = [\"name\"]\n self.assertEqual(fields, composer.get_serializable_fields())",
"def test_core_save_stored_value_v1(self):\n pass",
"def test_field_content(self):\n data = self.serializer.data\n\n self.assertEqual(data['name'], self.dog_attr['name'])",
"def test_model_saving(self):\n source_trait_encoded_value = factories.SourceTraitEncodedValueFactory.create()\n self.assertIsInstance(\n models.SourceTraitEncodedValue.objects.get(pk=source_trait_encoded_value.pk),\n models.SourceTraitEncodedValue)",
"def test_serialize_a_wishlist(self):\n wishlist = Wishlist(name=\"wishlist_name\", customer_id=1234)\n data = wishlist.serialize()\n self.assertNotEqual(data, None)\n self.assertIn('id', data)\n self.assertEqual(data['id'], None)\n self.assertIn('name', data)\n self.assertEqual(data['name'], \"wishlist_name\")\n self.assertIn('customer_id', data)\n self.assertEqual(data['customer_id'], 1234)",
"def test_serialize_instance(self):\n from app.datum.models import DatumObject\n from app.datum.serializers import DatumObjectSerializer\n\n test_object = DatumObject.objects.first()\n test_data = DatumObjectSerializer(\"serial_default\"\n ).serialize(test_object)\n\n actual = test_data[\"datum_type_id\"]\n expected = self.test.datum_type1.datum_type_id\n self.assertEqual(expected, actual)",
"def test_tokenserializer_field_content(self):\n data = self.tokenserializer.data\n self.assertEqual(data['key'], self.token.key)\n self.assertEqual(data['user'], self.userserializer.data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute rotation matrix about the XYZaxes. R = rotxyz(rph) returns a 3x3 rotation matrix R where (r,p,h) is a 3vector of Euler angles (roll, pitch, heading) measured in radians.
|
def rotxyz(r, p, h):
cr = math.cos(r); sr = math.sin(r)
cp = math.cos(p); sp = math.sin(p)
ch = math.cos(h); sh = math.sin(h)
R = np.array([[ch*cp, (-sh*cr + ch*sp*sr), ( sh*sr + ch*sp*cr)], \
[sh*cp, ( ch*cr + sh*sp*sr), (-ch*sr + sh*sp*cr)], \
[-sp, cp*sr, cp*cr ]])
return R
|
[
"def rotationMatrix(self):\n\n # R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n # self.exteriorOrientationParameters[5])\n\n return self.__rotationMatrix",
"def rotor_to_rotation_matrix(R):\n q = rotor_to_quaternion(R)\n return quaternion_to_matrix(q)",
"def expmap2rotmat(r):\n theta = np.linalg.norm( r )\n r0 = np.divide( r, theta + np.finfo(np.float32).eps )\n r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3,3)\n r0x = r0x - r0x.T\n R = np.eye(3,3) + np.sin(theta)*r0x + (1-np.cos(theta))*(r0x).dot(r0x);\n return R",
"def _rotation_matrix(psi,theta,phi, R):\n cospsi = cos(psi)\n sinpsi = sin(psi)\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n\n sinphi_sinpsi = sinphi * sinpsi\n sinphi_cospsi = sinphi * cospsi \n\n cosphi_sinpsi = cosphi * sinpsi\n cosphi_cospsi = cosphi * cospsi\n \n R[0,0] = costheta * cosphi_cospsi - sinphi_sinpsi\n R[0,1] = - costheta * cosphi_sinpsi - sinphi_cospsi\n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi_cospsi + cosphi_sinpsi\n R[1,1] = cosphi_cospsi - costheta * sinphi_sinpsi\n R[1,2] = sintheta * sinphi\n R[2,0] = - cospsi * sintheta\n R[2,1] = sintheta*sinpsi\n R[2,2] = costheta",
"def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta",
"def rot(phi, P):\n R = np.array([[np.cos(phi), np.sin(phi)], [-np.sin(phi), np.cos(phi)]])\n return np.dot(P, R.T)",
"def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])",
"def getRotZ(angle):\n\tc, s = math.cos(angle), math.sin(angle)\n\treturn Matrix3((c, s, 0), (-s, c, 0), (0, 0, 1))",
"def rotz(theta,x):\n \n R_z = np.array([[np.cos(theta), -np.sin(theta), 0.],\n [np.sin(theta), np.cos(theta), 0.],\n [0., 0., 1.]])\n \n return np.dot(R_z,x)",
"def rpy2r(roll, pitch, jaw):\n \n # Rotational matrix for roll angle (around X)\n Rx = np.array([[1, 0, 0],\n [0, np.cos(roll),-np.sin(roll)],\n [0, np.sin(roll),np.cos(roll)]])\n # Rotational matrix for pitch angle (around Y)\n Ry = np.array([[1, 0, 0],\n [0, np.cos(pitch),-np.sin(pitch)],\n [0, np.sin(pitch),np.cos(pitch)]])\n # Rotational matrix for jaw angle (around Z)\n Rz = np.array([[1, 0, 0],\n [0, np.cos(yaw),-np.sin(yaw)],\n [0, np.sin(yaw),np.cos(yaw)]])\n \n # Rotates around x-axis, then new y-axis and then z-axis\n R = Rx.dot(Ry).dot(Rz)\n \n return R",
"def euler_angles_to_rotation_matrix(theta) -> np.ndarray:\n\n print(\"theta\", type(theta))\n\n r_x = np.array([[1, 0, 0],\n [0, cos(theta[0]), -sin(theta[0])],\n [0, sin(theta[0]), cos(theta[0])]\n ])\n\n r_y = np.array([[cos(theta[1]), 0, sin(theta[1])],\n [0, 1, 0],\n [-sin(theta[1]), 0, cos(theta[1])]\n ])\n\n r_z = np.array([[cos(theta[2]), -sin(theta[2]), 0],\n [sin(theta[2]), cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n rmat = np.dot(r_z, np.dot(r_y, r_x))\n return rmat",
"def rotz(delta):\n deltaRad = m.pi*delta/180;\n return np.array([[m.cos(deltaRad),-m.sin(deltaRad),0.],[m.sin(deltaRad),m.cos(deltaRad),0.],[0.,0.,1.]]);",
"def rotationFromAxesXYZ(pX, pY, pZ):\n return _almathswig.rotationFromAxesXYZ(pX, pY, pZ)",
"def generar_matriz_R(self, tp):\n # modulo del campo en el plano xy\n B1 = np.array([self.Bx, self.By])\n B1 = np.linalg.norm(B1, axis=0)\n\n # tres componentes de la direccion de rotacion. Cada U es un array de\n # n elementos, uno por cada sitio. Uz son ceros porque el campo en z\n # NO excita los spines.\n Ux = self.Bx/B1\n Uy = self.By/B1\n Uz = np.zeros_like(Ux)\n \n angulo = B1*tp\n \n # array de ceros y unos de tamano nx1\n zeros = np.zeros_like(Ux)\n ones = np.ones_like(Ux)\n \n # para definir la matriz uso la formula de Rodrigues:\n # https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle\n U_matrix = np.array([[ zeros, -Uz , Uy ],\n [ Uz , zeros, -Ux ],\n [-Uy , Ux , zeros]]\n )\n \n Uxy, Uxz, Uyz = [Ux*Uy, Ux*Uz, Uy*Uz]\n U2_matrix = np.array([[Ux*Ux, Uxy , Uxz ],\n [Uxy , Uy*Uy, Uyz ],\n [Uxz , Uyz , Uz*Uz]]\n )\n \n I = np.array([[ones, zeros, zeros], [zeros, ones, zeros], [zeros, zeros, ones]])\n \n R = np.cos(angulo) * I + np.sin(angulo) * U_matrix + (1-np.cos(angulo)) * U2_matrix\n # convierto en array nx3x3\n R = np.moveaxis(R,2,0)\n return R",
"def getRotationMatrix( self):",
"def RotToQuat(R):\n #takes in W_R_B rotation matrix\n\n tr = np.sum(np.trace(R))\n\n if (tr > 0):\n S = np.sqrt(tr + 1.0) * 2 # S=4*qw\n qw = 0.25 * S\n qx = (R[2, 1] - R[1, 2]) / S\n qy = (R[0, 2] - R[2, 0]) / S\n qz = (R[1, 0] - R[0, 1]) / S\n elif (R[0, 0] > R[1, 1]) and (R[0, 0] > R[2, 2]):\n S = np.sqrt(1.0 + R(1,1) - R(2,2) - R(3,3)) * 2 # S=4*qx\n qw = (R[2, 1] - R[1, 2]) / S\n qx = 0.25 * S\n qy = (R[0, 1] + R[1, 0]) / S\n qz = (R[0, 2] + R[2, 0]) / S\n elif R[1, 1] > R[2, 2] :\n S = np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2] ) * 2 # S=4*qy\n qw = (R[0, 2] - R[2, 0] ) / S\n qx = (R[0, 1] + R[1, 0] ) / S\n qy = 0.25 * S\n qz = (R[1, 2] + R[2, 1] ) / S\n else:\n S = np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1] ) * 2 # S=4*qz\n qw = (R[1, 0] - R[0, 1] ) / S\n qx = (R[0, 2] + R[2, 0] ) / S\n qy = (R[1, 2] + R[2, 1] ) / S\n qz = 0.25 * S\n\n q = np.array([[qw], [qx], [qy], [qz]])\n q = q * np.sign(qw)\n\n return q",
"def rotor_to_quaternion(R):\n Q = (e123*R).value[0:4]\n Q[0] = R[0]\n return Q",
"def rotationFrom3DRotation(pWX, pWY, pWZ):\n return _almathswig.rotationFrom3DRotation(pWX, pWY, pWZ)",
"def RotY90():\n from numpy import zeros\n\n rot = zeros((3, 3))\n rot[0][2] = -1.0\n rot[1][1] = 1.0\n rot[2][0] = 1.0\n return rot"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get rotation matrix (of dim N x N) about zaxis with angle alpha in randians.
|
def rotZ(alpha, N = 3):
R = np.identity(N)
R[0,0] = math.cos(alpha)
R[0,1] = -math.sin(alpha)
R[1,0] = math.sin(alpha)
R[1,1] = math.cos(alpha)
return R
|
[
"def create_rot_mat(alpha):\n rot_mat = np.array(\n [[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]]\n )\n return rot_mat",
"def getRotZ(angle):\n\tc, s = math.cos(angle), math.sin(angle)\n\treturn Matrix3((c, s, 0), (-s, c, 0), (0, 0, 1))",
"def rotation_matrix_z(angle, out = None):\n c,s = np.cos(angle), np.sin(angle)\n if out is None:\n out = np.zeros(shape = c.shape + (3,3), dtype = FDTYPE)\n out[...,0,0] = c\n out[...,0,1] = -s\n out[...,1,0] = s\n out[...,1,1] = c\n out[...,2,2] = 1.\n return out",
"def get_rotate_matrix(theta,alpha):\n sin_ = np.sin(theta)\n cos_ = np.cos(theta)\n if alpha == 0:\n return np.array([[1,0,0],\n [0,cos_,-sin_],\n [0,sin_,cos_]])\n elif alpha == 1:\n return np.array([[cos_, 0, -sin_],\n [0, 1, 0],\n [sin_, 0, cos_]])\n elif alpha == 2:\n return np.array([[cos_,-sin_,0],\n [sin_,cos_,0],\n [0,0,1]])\n else :\n print(\"invalid alpha\")",
"def rotate_z(angle):\n sin_t = math.sin(math.radians(angle))\n cos_t = math.cos(math.radians(angle))\n m = Matrix4x4(cos_t, -sin_t, 0.0, 0.0,\n sin_t, cos_t, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0)\n return Transform(m, transpose(m))",
"def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length\r\n # 1 or same as len(tipper):\r\n if np.iterable(alpha) == 0:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n if len(alpha) == 1:\r\n try:\r\n degreeangle = float(alpha % 360)\r\n except ValueError:\r\n self._logger.error('\"Angle\" must be a valid number (in degrees)')\r\n return\r\n # make an n long list of identical angles\r\n lo_angles = [degreeangle for ii in self.z]\r\n else:\r\n try:\r\n lo_angles = [float(ii % 360) for ii in alpha]\r\n except ValueError:\r\n self._logger.error('\"Angles\" must be valid numbers (in degrees)')\r\n return\r\n\r\n self.rotation_angle = np.array([(oldangle + lo_angles[ii]) % 360\r\n for ii, oldangle in enumerate(self.rotation_angle)])\r\n\r\n if len(lo_angles) != len(self.z):\r\n self._logger.warn('Wrong number of \"angles\" - I need {0}'.format(len(self.z)))\r\n # self.rotation_angle = 0.\r\n return\r\n\r\n z_rot = copy.copy(self.z)\r\n z_err_rot = copy.copy(self.z_err)\r\n\r\n for idx_freq in range(len(self.z)):\r\n\r\n angle = lo_angles[idx_freq]\r\n if np.isnan(angle):\r\n angle = 0.\r\n\r\n if self.z_err is not None:\r\n z_rot[idx_freq], z_err_rot[idx_freq] = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle,\r\n self.z_err[idx_freq, :, :])\r\n else:\r\n z_rot[idx_freq], z_err_rot = \\\r\n rotatematrix_incl_errors(self.z[idx_freq, :, :],\r\n angle)\r\n\r\n self.z = z_rot\r\n if self.z_err is not None:\r\n self.z_err = z_err_rot\r\n\r\n # for consistency recalculate resistivity and phase\r\n self.compute_resistivity_phase()",
"def rotate_z(self, points, theta):\n xy, z = torch.split(points, [2, 1], dim=-1)\n c, s = torch.cos(theta), torch.sin(theta)\n R = torch.stack((c, -s, s, c), dim=-1).view(-1, 2, 2)\n xyz = torch.cat((torch.einsum('ijk,imk->imj', R, xy), z), dim=-1)\n return xyz",
"def generate_random_rotation_matrix() -> np.ndarray:\n u = generate_random_unit_vector()\n v = generate_random_unit_vector()\n while np.abs(np.dot(u, v)) >= 0.99:\n v = generate_random_unit_vector()\n\n vp = v - (np.dot(u, v) * u)\n vp /= np.linalg.norm(vp)\n w = np.cross(u, vp)\n R = np.column_stack((u, vp, w))\n return R",
"def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)",
"def random_rotation_matrix(randgen=None):\n # adapted from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c\n \n if randgen is None:\n randgen = np.random.RandomState()\n \n theta, phi, z = tuple(randgen.rand(3).tolist())\n \n theta = theta * 2.0*np.pi # Rotation about the pole (Z).\n phi = phi * 2.0*np.pi # For direction of pole deflection.\n z = z * 2.0 # For magnitude of pole deflection.\n \n # Compute a vector V used for distributing points over the sphere\n # via the reflection I - V Transpose(V). This formulation of V\n # will guarantee that if x[1] and x[2] are uniformly distributed,\n # the reflected points will be uniform on the sphere. Note that V\n # has length sqrt(2) to eliminate the 2 in the Householder matrix.\n \n r = np.sqrt(z)\n Vx, Vy, Vz = V = (\n np.sin(phi) * r,\n np.cos(phi) * r,\n np.sqrt(2.0 - z)\n )\n \n st = np.sin(theta)\n ct = np.cos(theta)\n \n R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n # Construct the rotation matrix ( V Transpose(V) - I ) R.\n\n M = (np.outer(V, V) - np.eye(3)).dot(R)\n return M",
"def rotateZ(self, angle):\n (cosa, sina) = cos_and_sin(angle)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)",
"def rotz(delta):\n deltaRad = m.pi*delta/180;\n return np.array([[m.cos(deltaRad),-m.sin(deltaRad),0.],[m.sin(deltaRad),m.cos(deltaRad),0.],[0.,0.,1.]]);",
"def get_rotation_matrix(zone, verbose=False):\n\n # spherical coordinates of zone\n zone = np.array(zone)\n r = np.sqrt((zone*zone).sum())\n theta = np.arccos(zone[2]/r)\n if zone[0] < 0:\n theta = -theta\n if zone[0] == 0:\n phi = np.pi/2\n else:\n phi = (np.arctan(zone[1]/zone[0]))\n\n if verbose:\n print('Rotation theta ', np.degrees(theta), ' phi ', np.degrees(phi))\n # unit = np.array([[1, 0, 0],[0,1, 0],[0, 0,1]])\n\n # first we rotate phi about z-axis\n c, s = np.cos(phi), np.sin(phi)\n rotz = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n\n # second we rotate theta about y axis\n c, s = np.cos(theta), np.sin(theta)\n roty = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])\n\n # the rotation now makes zone-axis coincide with plane normal\n return np.dot(rotz, roty), np.degrees(theta), np.degrees(phi)",
"def rotation_matrix(a):\n \n R = np.eye(4)\n R[:3, :3] = linalg.expm([[0,-a[2],a[1]],[a[2],0,-a[0]],[-a[1],a[0],0]])\n\n return R",
"def rotation_matrix(self) -> Tensor:\n return self.extrinsics[..., :3, :3]",
"def rotationFromAxesXYZ(pX, pY, pZ):\n return _almathswig.rotationFromAxesXYZ(pX, pY, pZ)",
"def rotz(theta,x):\n \n R_z = np.array([[np.cos(theta), -np.sin(theta), 0.],\n [np.sin(theta), np.cos(theta), 0.],\n [0., 0., 1.]])\n \n return np.dot(R_z,x)",
"def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])",
"def getRotationMatrix( self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calling string returns str(self.Cij).
|
def __str__(self):
return str(self.Cij)
|
[
"def to_string(self, *_):\n return str(self.constant_coefficient)",
"def __str__(self):\n return \"vals: \" + str(self.val) + \" jacobian: \" + str(self.der)",
"def toString(self) -> \"SbString\":\n return _coin.SbVec3d_toString(self)",
"def __str__(self):\n\n if self.initialized == 0:\n myStr = \"CellCenterData2d object not yet initialized\"\n return myStr\n\n myStr = \"cc data: nx = \" + repr(self.grid.nx) + \\\n \", ny = \" + repr(self.grid.ny) + \\\n \", ng = \" + repr(self.grid.ng) + \"\\n\" + \\\n \" nvars = \" + repr(self.nvar) + \"\\n\" + \\\n \" variables: \\n\"\n\n ilo = self.grid.ilo\n ihi = self.grid.ihi\n jlo = self.grid.jlo\n jhi = self.grid.jhi\n\n for n in range(self.nvar):\n myStr += \"%16s: min: %15.10f max: %15.10f\\n\" % \\\n (self.vars[n],\n np.min(self.data[n,ilo:ihi+1,jlo:jhi+1]),\n np.max(self.data[n,ilo:ihi+1,jlo:jhi+1]) )\n myStr += \"%16s BCs: -x: %-12s +x: %-12s -y: %-12s +y: %-12s\\n\" %\\\n (\" \" , self.BCs[self.vars[n]].xlb,\n self.BCs[self.vars[n]].xrb,\n self.BCs[self.vars[n]].ylb,\n self.BCs[self.vars[n]].yrb)\n\n return myStr",
"def __str__(self):\r\n return str(self.vector)",
"def __repr__(self) -> \"char const *\":\n return _coin.SbString___repr__(self)",
"def toString(self) -> \"SbString\":\n return _coin.SbVec3s_toString(self)",
"def toString(self) -> \"SbString\":\n return _coin.SbVec3f_toString(self)",
"def __str__(self):\r\n self_str = \"$({})\".format(self.lane)\r\n if self.customer != None:\r\n cashier_w_cust = \"{} - {}\".format(self_str, self.customer)\r\n return cashier_w_cust\r\n elif self.customer != None and len(self.line) == 2:\r\n cashier_w_line = \"{} - {}<- {}\".format(self.str, self.customer, self.line[0])\r\n return cashier_w_line\r\n elif self.customer != None and len(self.line) == 3:\r\n cashier_w_2line = \"{} - {}<- {},{}\".format(self.str, self.customer, self.line[0], self.line[1])\r\n return cashier_w_2line\r\n\r\n return self_str",
"def __str__(self):\n\t\treturn self.__cadena",
"def __str__ (self):\n return str(self.pieces)",
"def __str__(self):\n\t\tstring = \"\"\n\t\tfor aa in aa_to_codon:\n\t\t\tstring += \" Amino Acid: \" + aa + \" Count: \" + str(self.aa_count[aa]) +\"\\n\"\n\t\t\tstring += \" Codon Usage: \"\n\t\t\tfor codon in aa_to_codon[aa]:\n\t\t\t\tstring += codon + \" : \" + str(self.codon_table[codon]) + \" \"\n\t\t\tstring += \"\\n RSCU Values: \"\n\t\t\tfor codon in aa_to_codon[aa]:\n\t\t\t\tstring += codon + \" : \" + str(self.rscu_table[codon]) + \" \"\n\t\t\tstring += \"\\n\\n\"\n\n\t\treturn string",
"def toString(self) -> \"SbString\":\n return _coin.SbVec2s_toString(self)",
"def __str__(self):\n # Probably some performance issues with this code because of Python's\n # immutable strings. This code is only ever called in development or\n # testing, so it should be fine.\n i = 1\n result_string = \"\"\n for line in self._instructions:\n result_string += \"{0}: {1}\\n\".format(i, line)\n i += 1\n\n result_string += 'Labels:'\n for label, line_number in self._labels.iteritems():\n result_string += \"\\n\\t{0}: {1}\".format(label, line_number+1)\n # Added 1 because the line numbers are stored 0-indexed,\n # but we are printing 1-indexed line numbers.\n\n return result_string",
"def toString(self) -> \"SbString\":\n return _coin.SbRotation_toString(self)",
"def GetCStr(self, *args):\n return _snap.TBigStrPool_GetCStr(self, *args)",
"def __str__(self):\n\t\treturn 'vector( '+', '.join(map(str, self.data))+' )'",
"def make_cosmat_string(self):\n\n mat = self.matX\n\n if not mat.ready:\n return 1, '** no X-matrix to compute correlation matrix from'\n\n if not mat.cormat_ready: # then set it\n mat.set_cormat()\n\n mstr = ''\n for r in range(mat.ncols):\n for c in range(mat.ncols):\n mstr += '%6.3f ' % mat.cosmat[r,c]\n mstr += '\\n'\n\n return 0, mstr",
"def __str__(self):\n return str(\"Vec3(%s,%s,%s)\" % (self.x, self.y, self.z))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Transforms the elastic constant matrix based on the supplied axes.
|
def transform(self, axes, tol=1e-8):
axes = np.asarray(axes, dtype='float64')
T = axes_check(axes)
Q = np.einsum('km,ln->mnkl', T, T)
C = np.einsum('ghij,ghmn,mnkl->ijkl', Q, self.Cijkl, Q)
C[abs(C / C.max()) < tol] = 0.0
return elastic_constants(Cijkl=C)
|
[
"def elastic_transform(x: np.ndarray, amplitude: float, axis: AxesLike = None, order: int = 1):\n axis = axis_from_dim(axis, x.ndim)\n grid_shape = extract(x.shape, axis)\n deltas = [gaussian_filter(np.random.uniform(-amplitude, amplitude, grid_shape), 1) for _ in grid_shape]\n grid = np.mgrid[tuple(map(slice, grid_shape))] + deltas\n\n return apply_along_axes(partial(map_coordinates, coordinates=grid, order=order), x, axis)",
"def cast_axes(tensor, axes):\n return AxesCastOp(tensor, axes)",
"def set_axis_scales(self, axes, scales):\n for axis, scale in zip(axes, scales):\n self.axis_scale[axis] = scale",
"def elastic(X, y):\n \n [Xel, yel] = elasticdeform.deform_random_grid([X, y], sigma=5, axis=[(0, 1, 2), (0, 1, 2)], order=[3, 0])\n \n return Xel, yel",
"def axes(self, axes):\n self.__axes = axes if isinstance(axes, (list, tuple)) else [axes]\n warning('GCSCommands.axes: coerced to %r', self.__axes)",
"def invertOtherAxes(matrix, axis=0):\n axis = getAxis(axis)\n others = getOtherAxes(axis)\n x, y, z = matrix[:3]\n for v in (x, y, z):\n for a in others:\n v[a.index] *= -1\n return pm.dt.Matrix(x, y, z)",
"def invertAxis(matrix, axis=0):\n axis = getAxis(axis)\n x, y, z = matrix[:3]\n for v in (x, y, z):\n v[axis.index] *= -1\n return pm.dt.Matrix(x, y, z)",
"def _set_xy_matrixes(self, risk_matrixes):\n \n underlyings = list(set([rm.item for rm in risk_matrixes]))\n \n underlyings.sort(self._cmp_zar, None, True) # want the ZAR to be at the beginning\n indexed_rm = []\n matrix_len_x = len(risk_matrixes[0].data[0])\n matrix_len_y = len(risk_matrixes[0].data)\n \n self.col_defs.sort(key=lambda x: x.position)\n \n y_special_space = 0\n for yj, u in enumerate(underlyings):\n display_column_title = False\n has_formulas = False\n if u == self._CHANGES or u == self._COMBINED:\n has_formulas = True\n if yj == 0:\n display_column_title = True\n if u == self._CHANGES:\n y_special_space += 2 # will add two additional rows for combobox \n y = yj * (matrix_len_y + self._SPACE_Y) + self._TOP_Y + self._HEADER_Y_SIZE + y_special_space\n x = self._TOP_X\n # have to move the x index after each matrix\n # have to move y index after each underlying\n set_xy_matrix = lambda column: self._set_xy_matrix(\n u, column, x, y,\n matrix_len_x,\n self._HEADER_X_SIZE,\n self._SPACE_X,\n risk_matrixes,\n indexed_rm,\n display_column_title)\n for xj, col_def in enumerate(self.col_defs):\n display_u_title = False\n if xj == 0:\n display_u_title = True\n (x, new_rm) = set_xy_matrix(col_def.title)\n self._set_matrix_attr(new_rm, display_u_title, has_formulas, value_type=XLSCell.t_number)\n # have to insert the BREAKEVEN VOL after the _THETA,\n # which is not in the list of columns\n if col_def.title == self._THETA:\n (x, new_rm) = set_xy_matrix(self._BREAKEVEN_VOL)\n self._set_matrix_attr(new_rm, display_u_title=False, has_formulas=True, value_type=XLSCell.t_percent)\n \n # iterate to next matrix with a different underlying \n x = 0\n \n return indexed_rm",
"def ImposeSumRule(self, force_constant, asr = \"simple\", axis = 1, zeu = None):\n \n QE_fc = np.zeros( (3, 3, self.QE_nat, self.QE_nat), order =\"F\", dtype = np.complex128)\n \n # Fill the effective charges if required\n if zeu is not None:\n # Convert in the correct indexing and use the fortran order\n f_zeu = np.einsum(\"ijk -> kji\", zeu, order = \"F\", dtype = np.float64)\n else: \n f_zeu = np.zeros( (3, 3, self.QE_nat), order = \"F\", dtype = np.float64)\n \n # Prepare the force constant\n if asr != \"custom\":\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n QE_fc[:, :, na, nb] = force_constant[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]\n # \n# print \"ASR:\", asr\n# print \"AXIS:\", axis\n# print \"NAT:\", self.QE_nat\n# print \"TAU SHAPE:\", np.shape(self.QE_tau)\n# print \"QE_FC SHAPE:\", np.shape(self.QE_fc)\n \n \n symph.set_asr(asr, axis, self.QE_tau, QE_fc, f_zeu)\n \n # Copy the new value on output\n for na in range(self.QE_nat):\n if zeu is not None:\n zeu[na, :,:] = f_zeu[:,:, na]\n \n for nb in range(self.QE_nat):\n force_constant[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = QE_fc[:,:, na, nb]\n else:\n CustomASR(force_constant)",
"def translate_upper_spheres(self, spheres_upper, axis):\n spheres_upper[:,axis] -= self.L[axis]\n spheres_upper[:,axis] *= -1\n return spheres_upper",
"def to_euler(vector0, vector1, aim_axis=0, up_axis=1, axes=XYZ, extrapolate=False): \n \n vector0 = _setDimension(vector0,2)\n vector1 = _setDimension(vector1,2)\n aim_axis = _setDimension(aim_axis,1,dtype=np.int32) % 3\n up_axis = _setDimension(up_axis,1,dtype=np.int32) % 3\n axes = _setDimension(axes,1,dtype=np.int32)\n \n vector0, vector1, aim_axis, up_axis, axes = _matchDepth(vector0, vector1, aim_axis, up_axis, axes)\n \n return _matrixToEuler(_vectorToMatrix(vector0, vector1, aim_axis, up_axis), axes)",
"def makexaxis(y, dx, x0=0):\r\n\r\n N = len(y)\r\n return np.linsace(x0, x0+(N-1)*dx, N)",
"def axis_calc(self, axis):\n # TODO: Rewrite this method to allow non-90deg planes to work\n # Figure out which axes the plane exists in\n axes = [1, 1, 1]\n axes[0] = (axis.v0.x - axis.v1.x - axis.v2.x) / 3.0\n axes[1] = (axis.v0.y - axis.v1.y - axis.v2.y) / 3.0\n axes[2] = (axis.v0.z - axis.v1.z - axis.v2.z) / 3.0\n # if axis.v0.x == axis.v1.x == axis.v2.x:\n # axes[0] = 0\n # if axis.v0.y == axis.v1.y == axis.v2.y:\n # axes[1] = 0\n # if axis.v0.z == axis.v1.z == axis.v2.z:\n # axes[2] = 0\n\n # Figure out uaxis xyz\n u = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n u[i] = axes[i]\n axes[i] = 0\n break\n\n # Figure out vaxis xyz\n v = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n v[i] = -axes[i]\n break\n\n uaxis = Axis(u[0], u[1], u[2])\n vaxis = Axis(v[0], v[1], v[2])\n return (uaxis, vaxis)",
"def fix_axis(self, axis, value):\n # Pre-evaluate the fixed axis, adjusting b\n b = self.b[:] - self.A[:, axis] * value\n # Remove that axis from a\n A = numpy.delete(self.A, axis, 1)\n fixed_values = self.fixed_values[:]\n fixed_values[axis] = value\n return QEF(A, b, fixed_values)",
"def reduce_axes(arr: numpy.ndarray, axes: Optional[Tuple[bool, ...]] = None) -> numpy.ndarray:\n if axes is None:\n axes = tuple(True for _ in arr.shape)\n axes_slices = [[numpy.s_[+1:], numpy.s_[:-1]] if axis else [numpy.s_[:]] for axis in axes]\n return numpy.mean([arr[tuple(p)] for p in itertools.product(*axes_slices)], axis=0) # type: ignore",
"def scaling2D(xscale, yscale):\n\n scalemat = np.array([[1/xscale, 0, 0],\n [0, 1/yscale, 0],\n [0, 0, 1]])\n\n return scalemat",
"def apply_over_axes(func, a, axes):\n a = _to_tensor(a)\n if isinstance(axes, int):\n axes = (axes,)\n res = a\n for axis in axes:\n res = func(res, axis=axis)\n res = F.expand_dims(res, axis) if res.ndim != a.ndim else res\n if res.ndim != a.ndim:\n _raise_value_error(\"function is not returning a tensor of the correct shape\")\n return res",
"def adjust_axes(self):\n\n # reduce number of ticks\n self.ax.yaxis.set_major_locator(MaxNLocator(2))\n self.ax.xaxis.set_major_locator(MaxNLocator(2))\n\n # log format for tick labels\n self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n\n self.ax.set_xlabel('Time')\n self.ax.set_ylabel('Intensity')\n\n # despine for aesthetics\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)",
"def _assign_axes(self, xarr):\n axes = ['']*len(xarr.dims)\n\n for axis in xarr.dims:\n axis_str = self._pydim_to_ijdim(axis)\n\n ax_type = Axes.get(axis_str)\n ax_num = self._get_axis_num(xarr, axis)\n\n scale = self._get_scale(xarr.coords[axis])\n if scale is None:\n logging.warning(f\"The {ax_type.label} axis is non-numeric and is translated to a linear index.\")\n doub_coords = [Double(numpy.double(x)) for x in numpy.arange(len(xarr.coords[axis]))]\n else:\n doub_coords = [Double(numpy.double(x)) for x in xarr.coords[axis]]\n\n # EnumeratedAxis is a new axis made for xarray, so is only present in ImageJ versions that are released\n # later than March 2020. This actually returns a LinearAxis if using an earlier version.\n java_axis = EnumeratedAxis(ax_type, ij.py.to_java(doub_coords))\n\n axes[ax_num] = java_axis\n\n return axes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Voigt bulk modulus estimate. Uses hydrostatic stress.
|
def bulk_Voigt(self):
c = self.Cij
return ((c[0, 0] + c[1, 1] + c[2, 2]) + 2 * (c[0, 1] + c[1, 2] + c[0, 2])) / 9
|
[
"def model_elastic_modulus(T):\n return 2.25e6",
"def Tvir(Mvir):\n return (mu * mp / (2 * kB)) * (G * Mvir * solmass) / Rvir(Mvir)",
"def analyte_injected_pmol(self):\n return (self.analyte_injected_ng()/self.molweight)*1000",
"def latent_heat_vaporization(T, units=\"mass\"):\n if np.any(T > 200):\n T = T - NT #T must be in degC\n Lv = 1.0e6*(2.501 - 2.361e-3*T)*MH2O # J mol-1 \n if units==\"mass\":\n Lv = Lv / MH2O # J kg-1\n return Lv",
"def DMSunAnnihilationRateHooper(DM_mass,DM_cs,param):\n DM_rho = 0.3*param.GeV/param.cm**3\n vel_rot = 220.0*param.km/param.sec\n \n return 3.35*1.0e20*param.sec**-1*(DM_rho/(0.3*param.GeV/param.cm**3))*((270.0*param.km/param.sec)/vel_rot)**3*(100.0*param.GeV/DM_mass)**2*(DM_cs/(1.0e-6*param.picobarn))",
"def calculate_stress_and_tangent_modulus(self):\n eps = self._strain\n ep0 = self._strain_0\n epp = self._strain_p\n epr = self._strain_r\n sgr = self._stress_r\n K = self._K\n Z = self._Z\n fc = self._fc\n\n # == inequality signs are reversed compared to theory becuase of the negative signs\n\n # positive strain\n if eps >= 0:\n self._stress = 0.0\n self._Et = 0.0\n return\n\n # loading path\n if eps <= epr:\n if eps >= ep0:\n stress = K * fc * (2 * eps / ep0 - (eps / ep0) ** 2)\n tangen = K * fc * (2 / ep0 - 2 * (eps / ep0 ** 2))\n else:\n stress = K * fc * (1 + Z * (eps - ep0))\n if stress < 0.2 * K * fc:\n stress = 0.2 * K * fc\n tangen = 0\n else:\n tangen = K * fc * Z\n\n # unloading path\n else:\n if eps >= epp:\n self._stress = 0.0\n self._Et = 0.0\n return\n stress = -(sgr * eps - epp * sgr) / (epr - epp)\n tangen = -sgr / (epr - epp)\n\n self._stress = -1 * stress\n self._Et = -1 * tangen",
"def inducedR(stress,young,poisson,hs,hf):\n young = young/(1-poisson)\n return -young*hs**2/6/hf/stress",
"def latent_heat_vaporization_pure_water(tC_water):\n return (2500.8 - 2.37 * tC_water) * 1000.0",
"def orbital_velocity(height): #in meters\n #height *= m\n v = (G*mars.mass/height)**(1/2)\n return v",
"def _compute_kinetic_energy_cell(self):\n return self.b_masses_cell * self.b_velocities_cell ** 2",
"def angular_velocity(self):\n return 0.0",
"def shear_Voigt(self):\r\n c = self.Cij\r\n return ((c[0, 0] + c[1, 1] + c[2, 2]) - (c[0, 1] + c[1, 2] + c[0, 2]) + 3 * (c[3, 3] + c[4, 4] + c[5, 5])) / 15",
"def generationEnergy(self):\n return self.energy + BULLETKEFACTOR * self.energy * self.relativevelocity ** 2",
"def mass(self):\n return self.volume * self.rho",
"def __itruediv__(self, k):\n self.x /= k\n self.y /= k\n self.z /= k\n self.t /= k\n self.calculate_param()\n return self",
"def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega",
"def rk4_mass_spring_system(amp,omega,k_spr_m,n_balls,t_f,delta_t):\n\n t_steps = int(t_f/delta_t)\n\n t = np.arange(0,t_f,delta_t)\n x = np.empty([n_balls, t_steps])\n v = np.empty([n_balls, t_steps])\n\n #k factors of Runge Kutta 4\n kx = np.empty([4,n_balls])\n kv = np.empty([4,n_balls])\n\n #Initial Conditions\n x[:,0] = 0.0\n v[:,0] = 0.0\n\n #Motion of the 0 mass\n x[0,:] = amp*np.sin(omega*t)*(1-0.5*(np.sign(t-5)+1.0))\n # v[0,:] = omega*amp*np.sin(omega*t)\n\n #Only the proportion between k_spr and m appears, not k_spr or m_b alone\n # k_spr_m = k_spr/m_b\n\n for jt in range(t_steps-1):\n\n #k1 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_in(x[n,jt], x[n+1,jt], x[n-1,jt])\n elif n == (n_balls-1):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt], x[n-1,jt])\n\n #k2 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[0,n], x[n+1,jt]+0.5*kx[0,n+1], x[n-1,jt]+0.5*kx[0,n-1])\n elif n == (n_balls-1):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt]+0.5*kx[0,n], x[n-1,jt]+0.5*kx[0,n-1])\n\n #k3 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[1,n], x[n+1,jt]+0.5*kx[1,n+1], x[n-1,jt]+0.5*kx[1,n-1])\n elif n == (n_balls-1):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+0.5*kx[1,n],x[n-1,jt]+0.5*kx[1,n-1])\n\n #k4 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[3,n] = delta_t*(v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+kx[2,n],x[n+1,jt]+0.5*kx[2,n+1],x[n-1,jt]+0.5*kx[2,n-1])\n elif n == (n_balls-1):\n kx[3,n] = delta_t* (v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+kx[2,n],x[n-1,jt]+kx[2,n-1])\n\n #next position/velocity\n\n for n in range(1,n_balls):\n x[n,jt+1] = x[n,jt] + (kx[0,n]+2*kx[1,n]+2*kx[2,n]+kx[3,n])/6.0\n v[n,jt+1] = v[n,jt] + (kv[0,n]+2*kv[1,n]+2*kv[2,n]+kv[3,n])/6.0\n\n del(kx,kv,v)\n return t_steps,t,x",
"def make_sz_spin_adapted_hamiltonian(oei, tei):\n sdim = oei.shape[0]\n bas_aa = {}\n bas_ab = {}\n cnt_aa = 0\n cnt_ab = 0\n for p, q in product(range(sdim), repeat=2):\n if p < q:\n bas_aa[(p, q)] = cnt_aa\n cnt_aa += 1\n bas_ab[(p, q)] = cnt_ab\n cnt_ab += 1\n v2aa = np.zeros((sdim * (sdim - 1) // 2, sdim * (sdim - 1) // 2))\n v2ab = np.zeros((sdim * sdim , sdim * sdim))\n rev_bas_aa = dict(zip(bas_aa.values(), bas_aa.keys()))\n rev_bas_ab = dict(zip(bas_ab.values(), bas_ab.keys()))\n\n for r, s in product(range(len(bas_aa)), repeat=2):\n i, j = rev_bas_aa[r]\n k, l = rev_bas_aa[s]\n v2aa[r, s] = 0.5 * (tei[i, j, l, k] - tei[j, i, l, k] -\n tei[i, j, k, l] + tei[j, i, k, l])\n\n for r, s in product(range(len(bas_ab)), repeat=2):\n i, j = rev_bas_ab[r]\n k, l = rev_bas_ab[s]\n # we don't multiply by 0.5 because we count alpha-beta and beta-alpha\n v2ab[r, s] = tei[i, j, l, k]\n\n opdm_a = Tensor(oei, name='ck_a')\n opdm_b = Tensor(oei, name='ck_b')\n bas_aa, bas_ab = geminal_spin_basis(sdim)\n v2ab = Tensor(v2ab, basis=bas_ab, name='cckk_ab')\n v2bb = Tensor(v2aa, basis=bas_aa, name='cckk_bb')\n v2aa = Tensor(v2aa, basis=bas_aa, name='cckk_aa')\n return opdm_a, opdm_b, v2aa, v2bb, v2ab",
"def velocity(slowness):\n return 0.3048 / ((slowness * (10**(-6))))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Hill shear modulus estimate. Equal to average of Voigt and Reuss shear modulus.
|
def shear(self):
return (self.shear_Voigt + self.shear_Reuss) / 2
|
[
"def wichmann_hill(seed):\n a, x = divmod(seed, 30268)\n a, y = divmod(a, 30306)\n a, z = divmod(a, 30322)\n x = (171 * x) % 30269\n y = (172 * y) % 30307\n z = (170 * z) % 30323\n ret_val = (x / 30269.0 + y / 30307.0 + z / 30323.0) % 1.0\n return ret_val",
"def __dowson_hamrock_parameters(r_eff, param_g, param_u, param_w):\n param_ehd = r_eff * param_g ** 0.53 * param_u ** 0.67 * param_w ** -0.067\n return param_ehd",
"def shear_Voigt(self):\r\n c = self.Cij\r\n return ((c[0, 0] + c[1, 1] + c[2, 2]) - (c[0, 1] + c[1, 2] + c[0, 2]) + 3 * (c[3, 3] + c[4, 4] + c[5, 5])) / 15",
"def shear_Reuss(self):\r\n s = self.Sij\r\n return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))",
"def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. :\n phi[i] = phi[i] + np.pi\n\n for l in xrange(0,self.L_max+1) :\n for m in xrange(0,l+1) :\n P_ml = scipy.special.lpmv(m,l,self.omega[:,2])\n# Normalization of the associated Legendre polynomials\n if m == 0 :\n norm_P = P_ml\n else :\n norm_P = (-1.0)**m*np.sqrt(2*sci.factorial(l-m)/sci.factorial(l+m))\\\n *P_ml\n size = norm_P.shape\n for i in xrange(0,size[0]) :\n Ye[l,m,i] = norm_P[i]*np.cos(m*phi[i])\n Yo[l,m,i] = norm_P[i]*np.sin(m*phi[i])\n\n# Build the matrix M \n self.sphr = np.zeros((self.n_dir,self.n_mom))\n self.M = np.zeros((self.n_dir,self.n_mom))\n if self.galerkin == True :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd for L<sn of L=sn and m=0\n if l<self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd for l<=sn\n if l<=self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n else :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1",
"def calculate_shear(self,B31c = 0):\n logger.debug('Calculating magnetic shear...')\n \n # Shorthand introduced: we also have to ransform to 1/B**2 expansion parameters, taking into account the \n # difference in the definition of the radial coordinate. In the work of Rodriguez et al.,\n # Phys. Plasmas, (2021), epsilon=sqrt(psi) while in the work of Landreman et al.,\n # J. Plasma Physics (2019) it is defined r=\\sqrt(2*psi/B0). Need to transform between the\n # two.\n\n eps_scale = np.sqrt(2/self.B0) \n\n # sign_psi = self.spsi\n # sign_G = self.sG # Sign is taken to be positive for simplicity. To include this, need to track expressions\n d_d_varphi = self.d_d_varphi\n G2 = self.G2*eps_scale**2\n G0 = self.G0\n I2 = self.I2*eps_scale**2\n X1c = self.X1c*eps_scale\n Y1c = self.Y1c*eps_scale\n Y1s = self.Y1s*eps_scale\n X20 = self.X20*eps_scale**2\n X2s = self.X2s*eps_scale**2\n X2c = self.X2c*eps_scale**2\n Y20 = self.Y20*eps_scale**2\n Y2s = self.Y2s*eps_scale**2\n Y2c = self.Y2c*eps_scale**2\n Z20 = self.Z20*eps_scale**2\n Z2s = self.Z2s*eps_scale**2\n Z2c = self.Z2c*eps_scale**2\n torsion = -self.torsion # I use opposite sign for the torsion\n curvature = self.curvature\n iota = self.iotaN\n dldp = self.abs_G0_over_B0\n dXc1v = self.d_X1c_d_varphi*eps_scale\n dY1cdp = self.d_Y1c_d_varphi*eps_scale\n dY1sdp = self.d_Y1s_d_varphi*eps_scale\n dZ20dp = self.d_Z20_d_varphi*eps_scale**2\n dZ2cdp = self.d_Z2c_d_varphi*eps_scale**2\n dZ2sdp = self.d_Z2s_d_varphi*eps_scale**2\n dX20dp = self.d_X20_d_varphi*eps_scale**2\n dX2cdp = self.d_X2c_d_varphi*eps_scale**2\n dX2sdp = self.d_X2s_d_varphi*eps_scale**2\n dY20dp = self.d_Y20_d_varphi*eps_scale**2\n dY2cdp = self.d_Y2c_d_varphi*eps_scale**2\n dY2sdp = self.d_Y2s_d_varphi*eps_scale**2\n # Transformation to 1/B**2 parameters \n B0 = 1/self.B0**2\n Ba0 = G0\n Ba1 = G2 + self.iotaN*I2\n eta = self.etabar*np.sqrt(2)*B0**0.25\n B1c = -2*B0*eta\n B20 = (0.75*self.etabar**2/np.sqrt(B0) - self.B20)*4*B0**2\n B31s = 0 # To preserve stellarator symmetry\n I4 = 0 # Take current variations at this order to be 0\n \n # Compute Z31c and Z31s from Cp2: we assume standard equilibria, meaning that we may\n # pick Bpsi0=0 and Bpsi1=0\n Z31c = -1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) - 2*Ba0*X2s*Y1c*Z20 +\n 2*Ba0* X2c*Y1s*Z20 + 2*Ba0*X1c*Y2s*Z20 - 4*Ba0*X2s*Y1c*Z2c - 2*Ba0* X20*Y1s*Z2c +\n 4*Ba0*X1c*Y2s*Z2c - dldp*(torsion*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c +\n X2s*Y1s) + I2*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c + X2s*Y1s) - \n 2*curvature*X1c*Z20 - curvature*X1c*Z2c) + 2*Ba0*X20*Y1c*Z2s + 4*Ba0*X2c*Y1c*Z2s - \n 2*Ba0*X1c*Y20*Z2s - 4*Ba0*X1c*Y2c*Z2s + 2*X1c*dX20dp + X1c*dX2cdp+2*Y1c*dY20dp +\n Y1c*dY2cdp + Y1s*dY2sdp)\n \n dZ31cdp = np.matmul(d_d_varphi, Z31c)\n \n Z31s = 1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2c + Y1c*Y2c + Y1s*Y2s) - 2*Ba0*X2c*Y1c*Z20 + \n 2*Ba0*X1c*Y2c*Z20 - 2*Ba0*X2s*Y1s*Z20 + 2*Ba0*X20*Y1c*Z2c - 2*Ba0*X1c*Y20*Z2c +\n 4*Ba0*X2s*Y1s*Z2c + 2*Ba0*X20*Y1s*Z2s - 4*Ba0*X2c*Y1s*Z2s + dldp*(I2*X2s*Y1c + \n 2*I2*X20*Y1s - I2*X2c*Y1s - I2*X1c*Y2s + torsion*(X2s*Y1c + 2*X20*Y1s - X2c*Y1s -\n X1c*Y2s) - curvature*X1c*Z2s) - X1c*dX2sdp - 2*Y1s*dY20dp + Y1s*dY2cdp - Y1c*dY2sdp)\n \n dZ31sdp = np.matmul(d_d_varphi, Z31s)\n\n \n # Equation J3: expression for X31c/s\n X31c = 1/2/dldp**2/curvature*(-2*Ba0*Ba1*B1c - Ba0**2*B31c+2*dldp**2*torsion**2*X1c*X20 +\n 2*iota**2*X1c*X2c + dldp**2*torsion**2*X1c*X2c + dldp**2*curvature**2*X1c*(2*X20 + X2c) + \n 3*dldp*iota*torsion*X2s*Y1c + 2*dldp**2*torsion**2*Y1c*Y20 + 2*iota**2*Y1c*Y2c +\n dldp**2*torsion**2*Y1c*Y2c - 2*dldp*iota*torsion*X20*Y1s - 3*dldp*iota*torsion*X2c*Y1s -\n 3*dldp*iota*torsion*X1c*Y2s + 2*iota**2*Y1s*Y2s + dldp**2*torsion**2*Y1s*Y2s + \n 2*dldp*iota*Z31s + 2*iota*X2s*dXc1v + 2*dldp*torsion*Y20*dXc1v + dldp*torsion*Y2c*dXc1v + \n 2*dldp*torsion*Y1c*dX20dp + 2*dXc1v*dX20dp + dldp*torsion*Y1c*dX2cdp + dXc1v*dX2cdp - \n iota*X1c*dX2sdp + dldp*torsion*Y1s*dX2sdp - 2*dldp*torsion*X20*dY1cdp - dldp*torsion*X2c*dY1cdp +\n 2*iota*Y2s*dY1cdp - 2*dldp*torsion*X1c*dY20dp + 2*iota*Y1s*dY20dp + 2*dY1cdp*dY20dp - \n dldp*torsion*X1c*dY2cdp + iota*Y1s*dY2cdp + dY1cdp*dY2cdp - dldp*torsion*X2s*dY1sdp - \n 2*iota*Y2c*dY1sdp - iota*Y1c*dY2sdp + dY1sdp*dY2sdp + dldp*curvature*(-3*iota*X1c*Z2s + \n dldp*torsion*(Y1c*(2*Z20 + Z2c) + Y1s*Z2s) + 2*Z20*dXc1v + Z2c*dXc1v - 2*X1c*dZ20dp - \n X1c*dZ2cdp) + 2*dldp*dZ31cdp)\n \n X31s = 1/2/dldp**2/curvature*(-Ba0**2*B31s + dldp**2*curvature**2*X1c*X2s + dldp**2*torsion**2*X1c*X2s +\n 2*dldp**2*torsion**2*Y20*Y1s - dldp**2*torsion**2*Y2c*Y1s + dldp**2*torsion**2*Y1c*Y2s +\n 2*iota**2*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) + 2*dldp**2*curvature*torsion*Y1s*Z20 - \n dldp**2*curvature*torsion*Y1s*Z2c + dldp**2*curvature*torsion*Y1c*Z2s + dldp*torsion*Y2s*dXc1v +\n dldp*curvature*Z2s*dXc1v + 2*dldp*torsion*Y1s*dX20dp - dldp*torsion*Y1s*dX2cdp + \n dldp*torsion*Y1c*dX2sdp + dXc1v*dX2sdp - dldp*torsion*X2s*dY1cdp - 2*dldp*torsion*X20*dY1sdp + \n dldp*torsion*X2c*dY1sdp + 2*dY20dp*dY1sdp - dY2cdp*dY1sdp - dldp*torsion*X1c*dY2sdp + dY1cdp*dY2sdp +\n iota*(dldp*torsion*(2*X20*Y1c - 3*X2c*Y1c - 2*X1c*Y20 + 3*X1c*Y2c - 3*X2s*Y1s) + dldp*curvature*X1c*\n (-2*Z20 + 3*Z2c) - 2*dldp*Z31c - 2*X2c*dXc1v - 2*X1c*dX20dp + X1c*dX2cdp - 2*Y2c*dY1cdp -\n 2*Y1c*dY20dp + Y1c*dY2cdp - 2*Y2s*dY1sdp + Y1s*dY2sdp) - dldp*curvature*X1c*dZ2sdp +2*dldp*dZ31sdp)\n\n dX31sdp = np.matmul(d_d_varphi, X31s)\n \n # Equation Cb2\n Y31s = 1/4/Ba0/X1c*(-2*Ba1*X1c*Y1s + 2*iota*I2*X1c*Y1s - dldp*(4*curvature*X20 + torsion*I2*\n (X1c**2 + Y1c**2 + Y1s**2)) + 4*Ba0*(X31s*Y1c + 2*X2s*Y2c - X31c*Y1s - 2*X2c*Y2s) -\n I2*Y1c*dXc1v + I2*X1c*dY1cdp + 4*dZ20dp) \n\n dY31sdp = np.matmul(d_d_varphi, Y31s)\n\n \n # From the equation for Bt to order n=4, and looking at m=0\n LamTilde = 2/Y1s**2*(Ba0*B0*I4 + (Ba1*B0 + Ba0*B20)*I2) + 1/Y1s**2*(-2*iota*(2*X2c**2 + X1c*X31c + \n 2*X2s**2 + 2*Y2c**2 + 2*Y2s**2 + Y1s*Y31s + 2*Z2c**2 + 2*Z2s**2) + 2*dldp*(torsion*(-X31s*Y1c -\n 2*X2s*Y2c + X31c*Y1s + 2*X2c*Y2s + X1c*Y31s) + curvature*(-2*X2s*Z2c + 2*X2c*Z2s + X1c*Z31s)) -\n X31s*dXc1v - 2*X2s*dX2cdp + 2*X2c*dX2sdp + X1c*dX31sdp - Y31s*dY1cdp - 2*Y2s*dY2cdp +\n 2*Y2c*dY2sdp + Y1c*dY31sdp - 2*Z2s*dZ2cdp + 2*Z2c*dZ2sdp)\n\n # Need to compute the integration factor necessary for computing the shear\n DMred = d_d_varphi[1:,1:] # The differentiation matrix has a linearly dependent row, focus on submatrix\n\n # Distinguish between the stellarator symmetric case and the non-symmetric one at order r^1.\n # Distinction leads to the expSig function being periodic (stell. sym.) or not.\n if self.sigma0 == 0 and np.max(np.abs(self.rs)) == 0 and np.max(np.abs(self.zc)) == 0:\n # Case in which sigma is stellarator-symmetric:\n integSig = np.linalg.solve(DMred,self.sigma[1:]) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig = np.exp(2*iota*integSig)\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n self.iota2 = self.B0/2*sum(expSig*LamTilde*self.d_varphi_d_phi)/sum(expSig*(X1c**2 + Y1c**2 + Y1s**2)/Y1s**2*self.d_varphi_d_phi) \n else:\n # Case in which sigma is not stellarator-symmetric:\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n avSig = sum(self.sigma*self.d_varphi_d_phi)/len(self.sigma) # Separate the piece that gives secular part, so all things periodic\n integSigPer = np.linalg.solve(DMred,self.sigma[1:]-avSig) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = integSigPer + avSig*self.varphi[1:] # Include the secular piece\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig_ext = np.append(np.exp(2*iota*integSig),np.exp(2*iota*(avSig*2*np.pi/self.nfp))) # Add endpoint at 2*pi for better integration\n LamTilde_ext = np.append(LamTilde,LamTilde[0])\n fac_denom = (X1c**2 + Y1c**2 + Y1s**2) / Y1s**2\n fac_denom_ext = np.append(fac_denom, fac_denom[0])\n varphi_ext = np.append(self.varphi, 2 * np.pi / self.nfp)\n self.iota2 = self.B0 / 2 \\\n * integ.trapz(expSig_ext * LamTilde_ext, varphi_ext) \\\n / integ.trapz(expSig_ext * fac_denom_ext, varphi_ext)\n \n # Using cumtrapz without exploiting periodicity\n # expSig = np.exp(2*iota*integ.cumtrapz(self.sigma,self.varphi,initial=0))",
"def Iyy_beam(b, h):\n return 1 / 12. * b * h ** 3",
"def ham_ising():\n E = np.array([[1, 0], [0, 1]])\n X = np.array([[0, 1], [1, 0]])\n Z = np.array([[1, 0], [0, -1]])\n # hmat = np.kron(X, np.kron(Z, X))\n hmat -= 0.5 * (np.kron(np.kron(X, X), E) + np.kron(E, np.kron(X, X)))\n return np.reshape(hmat, [2] * 6)",
"def HankSph(n, kr):\n return scipy.special.spherical_jn(n, kr) - 1j*scipy.special.spherical_yn(n, kr)",
"def test_hsmparams_nodefault():\n import time\n # First make some profile\n bulge = galsim.DeVaucouleurs(half_light_radius = 0.3)\n disk = galsim.Exponential(half_light_radius = 0.5)\n disk = disk.shear(e1=0.2, e2=-0.3)\n psf = galsim.Kolmogorov(fwhm = 0.6)\n gal = bulge + disk # equal weighting, i.e., B/T=0.5\n tot_gal = galsim.Convolve(gal, psf)\n tot_gal_image = tot_gal.drawImage(scale=0.18)\n tot_psf_image = psf.drawImage(scale=0.18)\n\n # Check that recompute_flux changes give results that are as expected\n test_t = time.time()\n res = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image)\n dt = time.time() - test_t\n res2 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux = 'sum')\n assert(res.moments_amp < res2.moments_amp),'Incorrect behavior with recompute_flux=sum'\n res3 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux = 'none')\n assert(res3.moments_amp == 0),'Incorrect behavior with recompute_flux=none'\n\n # Check correction_status and error message when recompute_flux is invalid.\n with assert_raises(galsim.GalSimError):\n galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux='invalid')\n res4 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux='invalid',\n strict=False)\n assert res4.correction_status == -1\n assert \"Unknown value\" in res4.error_message\n\n # Check that results, timing change as expected with nsig_rg\n # For this, use Gaussian as galaxy and for ePSF, i.e., no extra pixel response\n p = galsim.Gaussian(fwhm=10.)\n g = galsim.Gaussian(fwhm=20.)\n g = g.shear(g1=0.5)\n obj = galsim.Convolve(g, p)\n # HSM allows a slop of 1.e-8 on nsig_rg, which means that default float32 images don't\n # actually end up with different result when using nsig_rg=0. rather than 3.\n im = obj.drawImage(scale=1., method='no_pixel', dtype=float)\n psf_im = p.drawImage(scale=1., method='no_pixel', dtype=float)\n test_t1 = time.time()\n g_res = galsim.hsm.EstimateShear(im, psf_im)\n test_t2 = time.time()\n g_res2 = galsim.hsm.EstimateShear(im, psf_im, hsmparams=galsim.hsm.HSMParams(nsig_rg=0.))\n dt2 = time.time()-test_t2\n dt1 = test_t2-test_t1\n if test_timing:\n assert(dt2 > dt1),'Should take longer to estimate shear without truncation of galaxy'\n assert(not equal_hsmshapedata(g_res, g_res2)),'Results should differ with diff nsig_rg'\n assert g_res != g_res2,'Results should differ with diff nsig_rg'\n\n # Check that results, timing change as expected with convergence_threshold\n test_t2 = time.time()\n res2 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image,\n hsmparams=galsim.hsm.HSMParams(convergence_threshold = 1.e-3))\n dt2 = time.time() - test_t2\n if test_timing:\n assert(dt2 < dt),'Should be faster to estimate shear with higher convergence_threshold'\n assert(not equal_hsmshapedata(res, res2)),'Outputs same despite change in convergence_threshold'\n assert res != res2,'Outputs same despite change in convergence_threshold'\n\n # Check that max_amoment, max_ashift work as expected\n assert_raises(galsim.GalSimError,\n galsim.hsm.EstimateShear, tot_gal_image, tot_psf_image,\n hsmparams=galsim.hsm.HSMParams(max_amoment = 10.))\n assert_raises(galsim.GalSimError,\n galsim.hsm.EstimateShear, tot_gal_image, tot_psf_image,\n guess_centroid=galsim.PositionD(47., tot_gal_image.true_center.y),\n hsmparams=galsim.hsm.HSMParams(max_ashift=0.1))",
"def shear(hx, hy):\n m = identity(3)\n\n m[0, 1] = tan(hx)\n m[1, 0] = tan(hy)\n \n return m",
"def por_r_herm(data):\n tdata = dc(data)\n\n m_e = tdata.get('m_e', np.array(2.))\n\n return por_r_harm(tdata)**(1./m_e)",
"def h(state, landmark, scanner_displacement):\r\n\t\tdx = landmark[0] - (state[0] + scanner_displacement * cos(state[2]))\r\n\t\tdy = landmark[1] - (state[1] + scanner_displacement * sin(state[2]))\r\n\t\tr = sqrt(dx * dx + dy * dy)\r\n\t\talpha = (atan2(dy, dx) - state[2] + pi) % (2*pi) - pi\r\n\t\treturn np.array([r, alpha])",
"def haversin(angle):\n return ((1.0 - math.cos(angle*math.pi/180.0))/2.0)",
"def hessenberg_ev(H):\n m, n = H.shape\n assert(m == n)\n assert(np.linalg.norm(H[np.tril_indices(m, -2)]) < 1.0e-6)\n ee, V = np.linalg.eig(H)\n return ee, V",
"def getH(self, nE, s):\n\n para = self.getBeta(nE, s)\n disp = self.getDisp(nE, s)\n\n HX = (para.GAMX * disp.DX**2) + (2 * para.ALFX * disp.DX * disp.DPX) + (para.BETX * disp.DPX**2)\n HY = (para.GAMY * disp.DY**2) + (2 * para.ALFY * disp.DY * disp.DPY) + (para.BETY * disp.DPY**2)\n return dct([('HX', HX),\n ('HY', HY)])",
"def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \\w 100 bins\n h1, h2 = __prepare_histogram(h1, h2)\n s = (h1 + h2) / 2.\n return __kullback_leibler(h1, s) / 2. + __kullback_leibler(h2, s) / 2.",
"def get_hill_estimator(ordered_data):\r\n logs = np.log(ordered_data)\r\n logs_cumsum = np.cumsum(logs[:-1])\r\n k_vector = np.arange(1, len(ordered_data))\r\n m1 = (1./k_vector)*logs_cumsum - logs[1:]\r\n return m1",
"def test_h298(self):\n self.assertAlmostEqual(self.thermodata.H298.value_si / constants.R / 298., self.H298, 4)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Voigt shear modulus estimate. Uses nonhydrostatic stresses.
|
def shear_Voigt(self):
c = self.Cij
return ((c[0, 0] + c[1, 1] + c[2, 2]) - (c[0, 1] + c[1, 2] + c[0, 2]) + 3 * (c[3, 3] + c[4, 4] + c[5, 5])) / 15
|
[
"def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2",
"def shear_Reuss(self):\r\n s = self.Sij\r\n return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))",
"def wichmann_hill(seed):\n a, x = divmod(seed, 30268)\n a, y = divmod(a, 30306)\n a, z = divmod(a, 30322)\n x = (171 * x) % 30269\n y = (172 * y) % 30307\n z = (170 * z) % 30323\n ret_val = (x / 30269.0 + y / 30307.0 + z / 30323.0) % 1.0\n return ret_val",
"def model_elastic_modulus(T):\n return 2.25e6",
"def bulk_Voigt(self):\r\n c = self.Cij\r\n return ((c[0, 0] + c[1, 1] + c[2, 2]) + 2 * (c[0, 1] + c[1, 2] + c[0, 2])) / 9",
"def HankSph(n, kr):\n return scipy.special.spherical_jn(n, kr) - 1j*scipy.special.spherical_yn(n, kr)",
"def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):\r\n sigma_g = sigma\r\n return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +\r\n fraction*lorentzian(x, amplitude, center, sigma))",
"def nVoigt(x, wave0, sigma, gamma):\n\n # Calculate the new full-width half-max (magic numbers come from paper in\n # documentation)\n f = fwhm(sigma, gamma)\n\n # Calculate the mixing parameter (magic numbers come from paper in\n # documentation)\n m = mix(gamma, f)\n\n # Calculate normalized Gaussian and Lorentzians with FWHM 'f' at the point\n # of interest\n g = (2./f)*np.sqrt(np.log(2.)/np.pi)*np.exp(-4.*np.power((x-wave0)/f, 2))\n l = (f/(2.*np.pi))/(np.power(x-wave0, 2)+np.power(f/2., 2))\n\n # Create the normalized Voigt profile at the point of interest\n v = m*l + (1.-m)*g\n\n return v",
"def calculate_shear(self,B31c = 0):\n logger.debug('Calculating magnetic shear...')\n \n # Shorthand introduced: we also have to ransform to 1/B**2 expansion parameters, taking into account the \n # difference in the definition of the radial coordinate. In the work of Rodriguez et al.,\n # Phys. Plasmas, (2021), epsilon=sqrt(psi) while in the work of Landreman et al.,\n # J. Plasma Physics (2019) it is defined r=\\sqrt(2*psi/B0). Need to transform between the\n # two.\n\n eps_scale = np.sqrt(2/self.B0) \n\n # sign_psi = self.spsi\n # sign_G = self.sG # Sign is taken to be positive for simplicity. To include this, need to track expressions\n d_d_varphi = self.d_d_varphi\n G2 = self.G2*eps_scale**2\n G0 = self.G0\n I2 = self.I2*eps_scale**2\n X1c = self.X1c*eps_scale\n Y1c = self.Y1c*eps_scale\n Y1s = self.Y1s*eps_scale\n X20 = self.X20*eps_scale**2\n X2s = self.X2s*eps_scale**2\n X2c = self.X2c*eps_scale**2\n Y20 = self.Y20*eps_scale**2\n Y2s = self.Y2s*eps_scale**2\n Y2c = self.Y2c*eps_scale**2\n Z20 = self.Z20*eps_scale**2\n Z2s = self.Z2s*eps_scale**2\n Z2c = self.Z2c*eps_scale**2\n torsion = -self.torsion # I use opposite sign for the torsion\n curvature = self.curvature\n iota = self.iotaN\n dldp = self.abs_G0_over_B0\n dXc1v = self.d_X1c_d_varphi*eps_scale\n dY1cdp = self.d_Y1c_d_varphi*eps_scale\n dY1sdp = self.d_Y1s_d_varphi*eps_scale\n dZ20dp = self.d_Z20_d_varphi*eps_scale**2\n dZ2cdp = self.d_Z2c_d_varphi*eps_scale**2\n dZ2sdp = self.d_Z2s_d_varphi*eps_scale**2\n dX20dp = self.d_X20_d_varphi*eps_scale**2\n dX2cdp = self.d_X2c_d_varphi*eps_scale**2\n dX2sdp = self.d_X2s_d_varphi*eps_scale**2\n dY20dp = self.d_Y20_d_varphi*eps_scale**2\n dY2cdp = self.d_Y2c_d_varphi*eps_scale**2\n dY2sdp = self.d_Y2s_d_varphi*eps_scale**2\n # Transformation to 1/B**2 parameters \n B0 = 1/self.B0**2\n Ba0 = G0\n Ba1 = G2 + self.iotaN*I2\n eta = self.etabar*np.sqrt(2)*B0**0.25\n B1c = -2*B0*eta\n B20 = (0.75*self.etabar**2/np.sqrt(B0) - self.B20)*4*B0**2\n B31s = 0 # To preserve stellarator symmetry\n I4 = 0 # Take current variations at this order to be 0\n \n # Compute Z31c and Z31s from Cp2: we assume standard equilibria, meaning that we may\n # pick Bpsi0=0 and Bpsi1=0\n Z31c = -1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) - 2*Ba0*X2s*Y1c*Z20 +\n 2*Ba0* X2c*Y1s*Z20 + 2*Ba0*X1c*Y2s*Z20 - 4*Ba0*X2s*Y1c*Z2c - 2*Ba0* X20*Y1s*Z2c +\n 4*Ba0*X1c*Y2s*Z2c - dldp*(torsion*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c +\n X2s*Y1s) + I2*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c + X2s*Y1s) - \n 2*curvature*X1c*Z20 - curvature*X1c*Z2c) + 2*Ba0*X20*Y1c*Z2s + 4*Ba0*X2c*Y1c*Z2s - \n 2*Ba0*X1c*Y20*Z2s - 4*Ba0*X1c*Y2c*Z2s + 2*X1c*dX20dp + X1c*dX2cdp+2*Y1c*dY20dp +\n Y1c*dY2cdp + Y1s*dY2sdp)\n \n dZ31cdp = np.matmul(d_d_varphi, Z31c)\n \n Z31s = 1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2c + Y1c*Y2c + Y1s*Y2s) - 2*Ba0*X2c*Y1c*Z20 + \n 2*Ba0*X1c*Y2c*Z20 - 2*Ba0*X2s*Y1s*Z20 + 2*Ba0*X20*Y1c*Z2c - 2*Ba0*X1c*Y20*Z2c +\n 4*Ba0*X2s*Y1s*Z2c + 2*Ba0*X20*Y1s*Z2s - 4*Ba0*X2c*Y1s*Z2s + dldp*(I2*X2s*Y1c + \n 2*I2*X20*Y1s - I2*X2c*Y1s - I2*X1c*Y2s + torsion*(X2s*Y1c + 2*X20*Y1s - X2c*Y1s -\n X1c*Y2s) - curvature*X1c*Z2s) - X1c*dX2sdp - 2*Y1s*dY20dp + Y1s*dY2cdp - Y1c*dY2sdp)\n \n dZ31sdp = np.matmul(d_d_varphi, Z31s)\n\n \n # Equation J3: expression for X31c/s\n X31c = 1/2/dldp**2/curvature*(-2*Ba0*Ba1*B1c - Ba0**2*B31c+2*dldp**2*torsion**2*X1c*X20 +\n 2*iota**2*X1c*X2c + dldp**2*torsion**2*X1c*X2c + dldp**2*curvature**2*X1c*(2*X20 + X2c) + \n 3*dldp*iota*torsion*X2s*Y1c + 2*dldp**2*torsion**2*Y1c*Y20 + 2*iota**2*Y1c*Y2c +\n dldp**2*torsion**2*Y1c*Y2c - 2*dldp*iota*torsion*X20*Y1s - 3*dldp*iota*torsion*X2c*Y1s -\n 3*dldp*iota*torsion*X1c*Y2s + 2*iota**2*Y1s*Y2s + dldp**2*torsion**2*Y1s*Y2s + \n 2*dldp*iota*Z31s + 2*iota*X2s*dXc1v + 2*dldp*torsion*Y20*dXc1v + dldp*torsion*Y2c*dXc1v + \n 2*dldp*torsion*Y1c*dX20dp + 2*dXc1v*dX20dp + dldp*torsion*Y1c*dX2cdp + dXc1v*dX2cdp - \n iota*X1c*dX2sdp + dldp*torsion*Y1s*dX2sdp - 2*dldp*torsion*X20*dY1cdp - dldp*torsion*X2c*dY1cdp +\n 2*iota*Y2s*dY1cdp - 2*dldp*torsion*X1c*dY20dp + 2*iota*Y1s*dY20dp + 2*dY1cdp*dY20dp - \n dldp*torsion*X1c*dY2cdp + iota*Y1s*dY2cdp + dY1cdp*dY2cdp - dldp*torsion*X2s*dY1sdp - \n 2*iota*Y2c*dY1sdp - iota*Y1c*dY2sdp + dY1sdp*dY2sdp + dldp*curvature*(-3*iota*X1c*Z2s + \n dldp*torsion*(Y1c*(2*Z20 + Z2c) + Y1s*Z2s) + 2*Z20*dXc1v + Z2c*dXc1v - 2*X1c*dZ20dp - \n X1c*dZ2cdp) + 2*dldp*dZ31cdp)\n \n X31s = 1/2/dldp**2/curvature*(-Ba0**2*B31s + dldp**2*curvature**2*X1c*X2s + dldp**2*torsion**2*X1c*X2s +\n 2*dldp**2*torsion**2*Y20*Y1s - dldp**2*torsion**2*Y2c*Y1s + dldp**2*torsion**2*Y1c*Y2s +\n 2*iota**2*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) + 2*dldp**2*curvature*torsion*Y1s*Z20 - \n dldp**2*curvature*torsion*Y1s*Z2c + dldp**2*curvature*torsion*Y1c*Z2s + dldp*torsion*Y2s*dXc1v +\n dldp*curvature*Z2s*dXc1v + 2*dldp*torsion*Y1s*dX20dp - dldp*torsion*Y1s*dX2cdp + \n dldp*torsion*Y1c*dX2sdp + dXc1v*dX2sdp - dldp*torsion*X2s*dY1cdp - 2*dldp*torsion*X20*dY1sdp + \n dldp*torsion*X2c*dY1sdp + 2*dY20dp*dY1sdp - dY2cdp*dY1sdp - dldp*torsion*X1c*dY2sdp + dY1cdp*dY2sdp +\n iota*(dldp*torsion*(2*X20*Y1c - 3*X2c*Y1c - 2*X1c*Y20 + 3*X1c*Y2c - 3*X2s*Y1s) + dldp*curvature*X1c*\n (-2*Z20 + 3*Z2c) - 2*dldp*Z31c - 2*X2c*dXc1v - 2*X1c*dX20dp + X1c*dX2cdp - 2*Y2c*dY1cdp -\n 2*Y1c*dY20dp + Y1c*dY2cdp - 2*Y2s*dY1sdp + Y1s*dY2sdp) - dldp*curvature*X1c*dZ2sdp +2*dldp*dZ31sdp)\n\n dX31sdp = np.matmul(d_d_varphi, X31s)\n \n # Equation Cb2\n Y31s = 1/4/Ba0/X1c*(-2*Ba1*X1c*Y1s + 2*iota*I2*X1c*Y1s - dldp*(4*curvature*X20 + torsion*I2*\n (X1c**2 + Y1c**2 + Y1s**2)) + 4*Ba0*(X31s*Y1c + 2*X2s*Y2c - X31c*Y1s - 2*X2c*Y2s) -\n I2*Y1c*dXc1v + I2*X1c*dY1cdp + 4*dZ20dp) \n\n dY31sdp = np.matmul(d_d_varphi, Y31s)\n\n \n # From the equation for Bt to order n=4, and looking at m=0\n LamTilde = 2/Y1s**2*(Ba0*B0*I4 + (Ba1*B0 + Ba0*B20)*I2) + 1/Y1s**2*(-2*iota*(2*X2c**2 + X1c*X31c + \n 2*X2s**2 + 2*Y2c**2 + 2*Y2s**2 + Y1s*Y31s + 2*Z2c**2 + 2*Z2s**2) + 2*dldp*(torsion*(-X31s*Y1c -\n 2*X2s*Y2c + X31c*Y1s + 2*X2c*Y2s + X1c*Y31s) + curvature*(-2*X2s*Z2c + 2*X2c*Z2s + X1c*Z31s)) -\n X31s*dXc1v - 2*X2s*dX2cdp + 2*X2c*dX2sdp + X1c*dX31sdp - Y31s*dY1cdp - 2*Y2s*dY2cdp +\n 2*Y2c*dY2sdp + Y1c*dY31sdp - 2*Z2s*dZ2cdp + 2*Z2c*dZ2sdp)\n\n # Need to compute the integration factor necessary for computing the shear\n DMred = d_d_varphi[1:,1:] # The differentiation matrix has a linearly dependent row, focus on submatrix\n\n # Distinguish between the stellarator symmetric case and the non-symmetric one at order r^1.\n # Distinction leads to the expSig function being periodic (stell. sym.) or not.\n if self.sigma0 == 0 and np.max(np.abs(self.rs)) == 0 and np.max(np.abs(self.zc)) == 0:\n # Case in which sigma is stellarator-symmetric:\n integSig = np.linalg.solve(DMred,self.sigma[1:]) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig = np.exp(2*iota*integSig)\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n self.iota2 = self.B0/2*sum(expSig*LamTilde*self.d_varphi_d_phi)/sum(expSig*(X1c**2 + Y1c**2 + Y1s**2)/Y1s**2*self.d_varphi_d_phi) \n else:\n # Case in which sigma is not stellarator-symmetric:\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n avSig = sum(self.sigma*self.d_varphi_d_phi)/len(self.sigma) # Separate the piece that gives secular part, so all things periodic\n integSigPer = np.linalg.solve(DMred,self.sigma[1:]-avSig) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = integSigPer + avSig*self.varphi[1:] # Include the secular piece\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig_ext = np.append(np.exp(2*iota*integSig),np.exp(2*iota*(avSig*2*np.pi/self.nfp))) # Add endpoint at 2*pi for better integration\n LamTilde_ext = np.append(LamTilde,LamTilde[0])\n fac_denom = (X1c**2 + Y1c**2 + Y1s**2) / Y1s**2\n fac_denom_ext = np.append(fac_denom, fac_denom[0])\n varphi_ext = np.append(self.varphi, 2 * np.pi / self.nfp)\n self.iota2 = self.B0 / 2 \\\n * integ.trapz(expSig_ext * LamTilde_ext, varphi_ext) \\\n / integ.trapz(expSig_ext * fac_denom_ext, varphi_ext)\n \n # Using cumtrapz without exploiting periodicity\n # expSig = np.exp(2*iota*integ.cumtrapz(self.sigma,self.varphi,initial=0))",
"def phase_space_volume(self) -> float:",
"def get_Hv():\n \n vn = np.zeros((nx,ny+1)) \n vs = np.zeros((nx,ny+1))\n ve = np.zeros((nx,ny+1))\n vw = np.zeros((nx,ny+1))\n ue = np.zeros((nx,ny+1))\n uw = np.zeros((nx,ny+1))\n τyyn = np.zeros((nx,ny+1))\n τyys = np.zeros((nx,ny+1))\n τyxe = np.zeros((nx,ny+1))\n τyxw = np.zeros((nx,ny+1))\n Hv = np.zeros((nx,ny+1))\n \n j = np.arange(1,ny) # v-cell centers in domain interior\n \n vn[:,j] = (v[:,j+1] + v[:,j])/2\n vs[:,j] = (v[:,j] + v[:,j-1])/2\n \n i = np.arange(0,nx-1)\n ve[IJ(i,j)] = (v[IJ(i+1,j)] + v[IJ(i,j)])/2\n ve[nx-1,j] = vbc_r\n i = np.arange(1,nx)\n vw[IJ(i,j)] = (v[IJ(i,j)] + v[IJ(i-1,j)])/2\n vw[0,j] = vbc_l\n \n i = np.arange(0,nx)\n ue[IJ(i,j)] = (u[IJ(i+1,j-1)] + u[IJ(i+1,j)])/2\n uw[IJ(i,j)] = (u[IJ(i,j-1)] + u[IJ(i,j)]) /2\n \n τyyn[:,j] = -2*ν*(v[:,j+1] - v[:,j]) /Δy\n τyys[:,j] = -2*ν*(v[:,j] - v[:,j-1])/Δy\n \n i = np.arange(0,nx-1)\n τyxe[IJ(i,j)] = -ν*(v[IJ(i+1,j)]-v[IJ(i,j)])/Δx - ν*(u[IJ(i+1,j)]-u[IJ(i+1,j-1)])/Δy\n τyxe[nx-1,j] = -ν*(vbc_r-v[nx-1,j])/(Δx/2) - ν*(u[nx,j]-u[nx,j-1])/Δy \n \n i = np.arange(1,nx)\n τyxw[IJ(i,j)] = -ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx - ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy\n τyxw[0,j] = -ν*(v[0,j]-vbc_l)/(Δx/2) - ν*(u[0,j]-u[0,j-1])/Δy\n \n Hv[:,j] = -((vn[:,j]*vn[:,j] - vs[:,j]*vs[:,j])/Δy + (ve[:,j]*ue[:,j] - vw[:,j]*uw[:,j])/Δx) \\\n -((τyyn[:,j] - τyys[:,j])/Δy + (τyxe[:,j] - τyxw[:,j])/Δx)\n \n return Hv",
"def por_v_hsbs(data):\n tdata = dc(data)\n\n try:\n vp_b = tdata['vp_b']\n k_s = tdata['k_s']\n k_f = tdata['k_f']\n den_f = tdata['den_f']\n den_s = tdata['den_s']\n except NameError:\n raise\n mu_s = dc(tdata.get('mu_s', np.array(0.)))\n\n tvp_b = np.array(vp_b, dtype=float, copy=True, ndmin=1)\n\n vp_s = vp_modu({'den_b':den_s, 'k_b':k_s, 'mu_b':mu_s})\n vp_f = vp_modu({'den_b':den_f, 'k_b':k_f, 'mu_b':0})\n tvp_b[tvp_b < vp_f] = vp_f\n tvp_b[tvp_b > vp_s] = vp_s\n\n por_u = np.array([brentq(lambda x: vp_hsub({'vp_b':tvp_b,\n 'k_s':k_s, 'k_f':k_f, 'mu_s':mu_s, 'den_f':den_f,\n 'den_s':den_s, 'por':x}) - tvp_b[x_i], 0., 1.)\n for x_i in range(len(tvp_b))])\n\n vpm = max(vp_hslb({'vp_b':tvp_b, 'k_s':k_s, 'k_f':k_f, 'mu_s':mu_s,\n 'den_f':den_f, 'den_s':den_s, 'por':np.linspace(0.,.1,10)}))\n vpm = vp_hslb({'vp_b':tvp_b, 'k_s':k_s, 'k_f':k_f, 'mu_s':mu_s,\n 'den_f':den_f, 'den_s':den_s, 'por':0.})\n tvp_b[tvp_b > vpm] = vpm\n\n por_l = np.array([brentq(lambda x: vp_hslb({'vp_b':tvp_b,\n 'k_s':k_s, 'k_f':k_f, 'mu_s':mu_s, 'den_f':den_f,\n 'den_s':den_s, 'por':x}) - tvp_b[x_i],\n 0., 1.) for x_i in range(len(tvp_b))])\n\n return por_l, por_u",
"def lowz_sim(outfile='lowz_sim.fits',\n alpha=-1.23, Mstar=-22, phistar=1e-3, Q=0, P=0,\n Mrange=(-24, -21), mrange=(16, 19.6), zrange=(0.16, 0.36), nz=50,\n H0=100, omega_l=0.7, z0=0.0, area_dg2=8000, nblock=10, pord=4):\n\n def gam_dv(z):\n \"\"\"Gamma function times volume element to integrate.\"\"\"\n kc = np.interp(z, ztab, krtab)\n M1 = mrange[1] - cosmo.dist_mod(z) - kc + Q*(z-z0)\n M1 = max(min(Mrange[1], M1), Mrange[0])\n M2 = mrange[0] - cosmo.dist_mod(z) - kc + Q*(z-z0)\n M2 = max(min(Mrange[1], M2), Mrange[0])\n L1 = 10**(0.4*(Mstar - M1))\n L2 = 10**(0.4*(Mstar - M2))\n dens = phistar * 10**(0.4*P*(z-z0)) * mpmath.gammainc(alpha+1, L1, L2)\n ans = area * cosmo.dV(z) * dens\n return ans\n\n def schec(M):\n \"\"\"Schechter function.\"\"\"\n L = 10**(0.4*(Mstar - M))\n ans = 0.4 * ln10 * phistar * L**(alpha+1) * np.exp(-L)\n return ans\n\n def schec_ev(M, z):\n \"\"\"Evolving Schechter function.\"\"\"\n L = 10**(0.4*(Mstar - Q*(z-z0) - M))\n ans = 0.4 * ln10 * phistar * L**(alpha+1) * np.exp(-L)\n return ans\n\n def vol_ev(z):\n \"\"\"Volume element multiplied by density evolution.\"\"\"\n pz = cosmo.dV(z) * 10**(0.4*P*(z-z0))\n return pz\n\n def zM_pdf(z, M):\n \"\"\"PDF for joint redshift-luminosity distribution.\n\n Don't use this. Generate z and M distributions separately.\"\"\"\n pz = cosmo.dV(z) * 10**(0.4*P*(z-z0))\n pM = schec_ev(M, z)\n return pz*pM\n\n \"\"\"Read Maraston+09 SEDs.\"\"\"\n sedfile = lf_data+'Maraston2009/M09_models/M09_composite_bestfitLRG.sed'\n data = np.loadtxt(sedfile)\n ages, idxs = np.unique(data[:, 0], return_index=True)\n m09_dir = {}\n for i in range(len(idxs)-1):\n ilo = idxs[i]\n ihi = idxs[i+1]\n spec = astSED.SED(data[ilo:ihi, 1], data[ilo:ihi, 2])\n m09_dir[ages[i]] = spec\n spec = m09_dir[12.]\n\n # Read Doi+2010 SDSS passbands\n pbfile = lf_data + 'Doi2010/ugriz_atmos.txt'\n doi_g = astSED.Passband(pbfile, normalise=0, transmissionColumn=2)\n doi_r = astSED.Passband(pbfile, normalise=0, transmissionColumn=3)\n doi_i = astSED.Passband(pbfile, normalise=0, transmissionColumn=4)\n\n area = area_dg2*(math.pi/180.0)*(math.pi/180.0)\n cosmo = util.CosmoLookup(H0, omega_l, zrange)\n\n # K-correction and colour lookup tables\n ztab = np.linspace(zrange[0], zrange[1], nz)\n kgtab = np.zeros(nz)\n krtab = np.zeros(nz)\n kitab = np.zeros(nz)\n grtab = np.zeros(nz)\n ritab = np.zeros(nz)\n for i in range(len(ztab)):\n specz = spec.copy()\n specz.redshift(ztab[i])\n g_0 = spec.calcMag(doi_g, addDistanceModulus=False, magType='AB')\n r_0 = spec.calcMag(doi_r, addDistanceModulus=False, magType='AB')\n i_0 = spec.calcMag(doi_i, addDistanceModulus=False, magType='AB')\n g_z = specz.calcMag(doi_g, addDistanceModulus=False, magType='AB')\n r_z = specz.calcMag(doi_r, addDistanceModulus=False, magType='AB')\n i_z = specz.calcMag(doi_i, addDistanceModulus=False, magType='AB')\n kgtab[i] = g_z - g_0\n krtab[i] = r_z - r_0\n kitab[i] = i_z - i_0\n grtab[i] = g_z - r_z\n ritab[i] = r_z - i_z\n\n pcoeffg = np.polynomial.polynomial.polyfit(ztab, kgtab, pord)\n pcoeffr = np.polynomial.polynomial.polyfit(ztab, krtab, pord)\n pcoeffi = np.polynomial.polynomial.polyfit(ztab, kitab, pord)\n\n plt.clf()\n plt.plot(ztab, kgtab, label='g')\n plt.plot(ztab, np.polynomial.polynomial.polyval(ztab, pcoeffg), label='gp')\n plt.plot(ztab, krtab, label='r')\n plt.plot(ztab, np.polynomial.polynomial.polyval(ztab, pcoeffr), label='rp')\n plt.plot(ztab, kitab, label='i')\n plt.plot(ztab, np.polynomial.polynomial.polyval(ztab, pcoeffi), label='ip')\n plt.xlabel(r'Redshift')\n plt.ylabel(r'$k(z)$')\n plt.legend()\n plt.show()\n\n plt.clf()\n plt.plot(ztab, grtab, label='g-r')\n plt.plot(ztab, ritab, label='r-i')\n plt.xlabel(r'Redshift')\n plt.ylabel(r'Colour')\n plt.legend()\n plt.show()\n\n # Integrate evolving LF for number of simulated galaxies\n nsim, err = scipy.integrate.quad(gam_dv, zrange[0], zrange[1],\n epsabs=1e-3, epsrel=1e-3)\n nsim = int(nsim)\n print('Generating', nsim, 'galaxies')\n\n# pdb.set_trace()\n nrem = nsim\n nout = 0\n g_out, r_out, i_out, z_out = array('d'), array('d'), array('d'), array('d')\n while nrem > 0:\n z = util.ran_fun(vol_ev, zrange[0], zrange[1], nsim*nblock)\n Mabs = util.ran_fun(schec, Mrange[0], Mrange[1], nsim*nblock) - Q*(z-z0)\n\n r_obs = Mabs + cosmo.dist_mod(z) + np.interp(z, ztab, krtab)\n g_obs = r_obs + np.interp(z, ztab, grtab)\n i_obs = r_obs - np.interp(z, ztab, ritab)\n\n # apparent magnitude limits\n sel = (r_obs >= mrange[0]) * (r_obs < mrange[1])\n z, r_obs, g_obs, i_obs = z[sel], r_obs[sel], g_obs[sel], i_obs[sel]\n nsel = len(z)\n if nsel > nrem:\n nsel = nrem\n z, r_obs, g_obs, i_obs = z[:nrem], r_obs[:nrem], g_obs[:nrem], i_obs[:nrem]\n\n # remaining selection limits\n c_par = 0.7*(g_obs - r_obs) + 1.2*(r_obs - i_obs - 0.18)\n c_perp = np.abs((r_obs - i_obs) - (g_obs - r_obs)/4.0 - 0.18)\n sel = (r_obs < 13.5 + c_par/0.3) * (c_perp < 0.2)\n z, r_obs, g_obs, i_obs = z[sel], r_obs[sel], g_obs[sel], i_obs[sel]\n nobs = len(z)\n g_out.extend(g_obs)\n r_out.extend(r_obs)\n i_out.extend(i_obs)\n z_out.extend(z)\n# t['MODELMAG_G'][nout:nout+nobs] = g_obs\n# t['MODELMAG_R'][nout:nout+nobs] = r_obs\n# t['CMODELMAGCOR_R'][nout:nout+nobs] = r_obs\n# t['MODELMAG_I'][nout:nout+nobs] = i_obs\n# t['Z'][nout:nout+nobs] = z\n nout += nobs\n nrem -= nsel\n print(nrem)\n\n print(nout, 'out of', nsim, 'galaxies output')\n # Write out as FITS file\n zz = np.zeros(nout)\n ra = 360*np.random.rand(nout)\n dec = (180/math.pi)*np.arccos(2*np.random.rand(nout) - 1) - 90\n# pdb.set_trace()\n t = Table([ra, dec, g_out, r_out, i_out, r_out, z_out, zz,\n np.tile(pcoeffg, (nout, 1)),\n np.tile(pcoeffr, (nout, 1)), np.tile(pcoeffi, (nout, 1))],\n names=('RA', 'DEC', 'MODELMAG_G', 'MODELMAG_R', 'MODELMAG_I',\n 'CMODELMAGCOR_R',\n 'Z', 'CHI2', 'PCOEFF_G', 'PCOEFF_R', 'PCOEFF_I'),\n meta={'omega_l': omega_l, 'z0': z0, 'area': area_dg2,\n 'alpha': alpha, 'Mstar': Mstar, 'phistar': phistar,\n 'Q': Q, 'P': P})\n t.write(outfile, format='fits', overwrite=True)",
"def phase_spherical_variance():\n pass",
"def vels_from_mod(K, G, Rho):\r\n Vp = np.sqrt((K+4/3*G)/Rho)\r\n Vs = np.sqrt(G/Rho)\r\n\r\n return Vp, Vs",
"def _water_molar_volume(nwat: int, density: float) -> float:\n return (nwat * 18) / density",
"def gauss_hermitian(self):\n if self.seed:\n np.random.seed(self.seed)\n\n mag = np.random.normal(0, 1, size=[self.n] * self.dim)\n pha = 2 * np.pi * np.random.uniform(size=[self.n] * self.dim)\n\n dk = _make_hermitian(mag, pha)\n\n if self._even:\n cutidx = (slice(None, -1),) * self.dim\n dk = dk[cutidx]\n\n return dk",
"def get_hemi_sparsity(img, hemi, thr=0.000005):\n # Transform img to vector for the specified hemisphere\n gm_mask = get_hemi_gm_mask(hemi=hemi)\n masked = apply_mask(img, gm_mask)\n sparsity_dict = {}\n sparsity_dict[\"l1\"] = np.linalg.norm(masked, axis=1, ord=1)\n sparsity_dict[\"vc-pos\"] = (masked > thr).sum(axis=1)\n sparsity_dict[\"vc-neg\"] = (masked < -thr).sum(axis=1)\n sparsity_dict[\"vc-abs\"] = (np.abs(masked) > thr).sum(axis=1)\n\n return sparsity_dict",
"def inducedR(stress,young,poisson,hs,hf):\n young = young/(1-poisson)\n return -young*hs**2/6/hf/stress"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Reuss shear modulus estimate. Uses nonhydrostatic strains.
|
def shear_Reuss(self):
s = self.Sij
return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))
|
[
"def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2",
"def wichmann_hill(seed):\n a, x = divmod(seed, 30268)\n a, y = divmod(a, 30306)\n a, z = divmod(a, 30322)\n x = (171 * x) % 30269\n y = (172 * y) % 30307\n z = (170 * z) % 30323\n ret_val = (x / 30269.0 + y / 30307.0 + z / 30323.0) % 1.0\n return ret_val",
"def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol",
"def msr (riskfree_rate,er,cov):\r\n n=er.shape[0]\r\n init_guess=np.repeat(1/n,n)\r\n bounds=((0.0,1.0),)*n \r\n def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n \"\"\"\r\n Returns the negative of Sharpe Ratio, given weights\r\n \"\"\"\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol\r\n \r\n weights_sum_to_1={'type':'eq','fun':lambda weights:np.sum(weights)-1}\r\n results=minimize(neg_sharpe_ratio,init_guess,args=(riskfree_rate,er,cov,),method='SLSQP',options={'disp':False},constraints=(weights_sum_to_1),bounds=bounds)\r\n return results.x",
"def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n r = portfolio_returns(weights, er)\n vol = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/vol",
"def inducedR(stress,young,poisson,hs,hf):\n young = young/(1-poisson)\n return -young*hs**2/6/hf/stress",
"def HankSph(n, kr):\n return scipy.special.spherical_jn(n, kr) - 1j*scipy.special.spherical_yn(n, kr)",
"def psnr(mse):\n return -10.0 * mse.log10()",
"def calc_s(nums: List[int], modulus: int) -> int:\n return sum(map(f_sq, nums)) % modulus",
"def Sersic_r2_over_hlr(n):\n return 0.985444 + n * (0.391016 + n * (0.0739602 + n * (0.00698719 + n * (0.00212432 + \\\n n * (-0.000154052 + n * 0.0000219632)))))",
"def __dowson_hamrock_parameters(r_eff, param_g, param_u, param_w):\n param_ehd = r_eff * param_g ** 0.53 * param_u ** 0.67 * param_w ** -0.067\n return param_ehd",
"def msr(riskfree_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n) # Equally distr. weights\n bounds = ((0.0, 1.0),)*n # n bounds of (0,1) tuples\n constraint_weight_sum_is_one = {\n 'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n\n def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the inverse of the Sharpe ratio given:\n * weights: allocation of the assets\n \"\"\"\n r = portfolio_return(weights, er)\n v = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/v\n\n results = minimize(neg_sharpe_ratio, initial_weights, args=(riskfree_rate, er, cov,), method=\"SLSQP\", options={\n 'disp': False}, constraints=(constraint_weight_sum_is_one), bounds=bounds)\n return results.x",
"def compute_sharpe_ratio(returns, vol, riskfree_rate): \n\n sharperatio = (returns - riskfree_rate) / vol\n \n return sharperatio",
"def test_purity_werner_state():\n res = purity(werner(2, 1 / 4))\n np.testing.assert_equal(np.isclose(res, 0.2653, atol=4), True)",
"def sharpe_ratio(allocs, normed):\n alloced = normed*allocs\n port_val = alloced.sum(axis=1) #gets total normalized returns for the portfolio as a whole\n daily_returns = compute_daily_returns(port_val)\n sddr = daily_returns.std()\n sr = ((daily_returns).mean()/sddr)*(252.**(1./2)) #computes sr\n return sr*-1 #multiply by negative 1 because we actually want to maximize sr",
"def model_elastic_modulus(T):\n return 2.25e6",
"def calculate_shear(self,B31c = 0):\n logger.debug('Calculating magnetic shear...')\n \n # Shorthand introduced: we also have to ransform to 1/B**2 expansion parameters, taking into account the \n # difference in the definition of the radial coordinate. In the work of Rodriguez et al.,\n # Phys. Plasmas, (2021), epsilon=sqrt(psi) while in the work of Landreman et al.,\n # J. Plasma Physics (2019) it is defined r=\\sqrt(2*psi/B0). Need to transform between the\n # two.\n\n eps_scale = np.sqrt(2/self.B0) \n\n # sign_psi = self.spsi\n # sign_G = self.sG # Sign is taken to be positive for simplicity. To include this, need to track expressions\n d_d_varphi = self.d_d_varphi\n G2 = self.G2*eps_scale**2\n G0 = self.G0\n I2 = self.I2*eps_scale**2\n X1c = self.X1c*eps_scale\n Y1c = self.Y1c*eps_scale\n Y1s = self.Y1s*eps_scale\n X20 = self.X20*eps_scale**2\n X2s = self.X2s*eps_scale**2\n X2c = self.X2c*eps_scale**2\n Y20 = self.Y20*eps_scale**2\n Y2s = self.Y2s*eps_scale**2\n Y2c = self.Y2c*eps_scale**2\n Z20 = self.Z20*eps_scale**2\n Z2s = self.Z2s*eps_scale**2\n Z2c = self.Z2c*eps_scale**2\n torsion = -self.torsion # I use opposite sign for the torsion\n curvature = self.curvature\n iota = self.iotaN\n dldp = self.abs_G0_over_B0\n dXc1v = self.d_X1c_d_varphi*eps_scale\n dY1cdp = self.d_Y1c_d_varphi*eps_scale\n dY1sdp = self.d_Y1s_d_varphi*eps_scale\n dZ20dp = self.d_Z20_d_varphi*eps_scale**2\n dZ2cdp = self.d_Z2c_d_varphi*eps_scale**2\n dZ2sdp = self.d_Z2s_d_varphi*eps_scale**2\n dX20dp = self.d_X20_d_varphi*eps_scale**2\n dX2cdp = self.d_X2c_d_varphi*eps_scale**2\n dX2sdp = self.d_X2s_d_varphi*eps_scale**2\n dY20dp = self.d_Y20_d_varphi*eps_scale**2\n dY2cdp = self.d_Y2c_d_varphi*eps_scale**2\n dY2sdp = self.d_Y2s_d_varphi*eps_scale**2\n # Transformation to 1/B**2 parameters \n B0 = 1/self.B0**2\n Ba0 = G0\n Ba1 = G2 + self.iotaN*I2\n eta = self.etabar*np.sqrt(2)*B0**0.25\n B1c = -2*B0*eta\n B20 = (0.75*self.etabar**2/np.sqrt(B0) - self.B20)*4*B0**2\n B31s = 0 # To preserve stellarator symmetry\n I4 = 0 # Take current variations at this order to be 0\n \n # Compute Z31c and Z31s from Cp2: we assume standard equilibria, meaning that we may\n # pick Bpsi0=0 and Bpsi1=0\n Z31c = -1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) - 2*Ba0*X2s*Y1c*Z20 +\n 2*Ba0* X2c*Y1s*Z20 + 2*Ba0*X1c*Y2s*Z20 - 4*Ba0*X2s*Y1c*Z2c - 2*Ba0* X20*Y1s*Z2c +\n 4*Ba0*X1c*Y2s*Z2c - dldp*(torsion*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c +\n X2s*Y1s) + I2*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c + X2s*Y1s) - \n 2*curvature*X1c*Z20 - curvature*X1c*Z2c) + 2*Ba0*X20*Y1c*Z2s + 4*Ba0*X2c*Y1c*Z2s - \n 2*Ba0*X1c*Y20*Z2s - 4*Ba0*X1c*Y2c*Z2s + 2*X1c*dX20dp + X1c*dX2cdp+2*Y1c*dY20dp +\n Y1c*dY2cdp + Y1s*dY2sdp)\n \n dZ31cdp = np.matmul(d_d_varphi, Z31c)\n \n Z31s = 1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2c + Y1c*Y2c + Y1s*Y2s) - 2*Ba0*X2c*Y1c*Z20 + \n 2*Ba0*X1c*Y2c*Z20 - 2*Ba0*X2s*Y1s*Z20 + 2*Ba0*X20*Y1c*Z2c - 2*Ba0*X1c*Y20*Z2c +\n 4*Ba0*X2s*Y1s*Z2c + 2*Ba0*X20*Y1s*Z2s - 4*Ba0*X2c*Y1s*Z2s + dldp*(I2*X2s*Y1c + \n 2*I2*X20*Y1s - I2*X2c*Y1s - I2*X1c*Y2s + torsion*(X2s*Y1c + 2*X20*Y1s - X2c*Y1s -\n X1c*Y2s) - curvature*X1c*Z2s) - X1c*dX2sdp - 2*Y1s*dY20dp + Y1s*dY2cdp - Y1c*dY2sdp)\n \n dZ31sdp = np.matmul(d_d_varphi, Z31s)\n\n \n # Equation J3: expression for X31c/s\n X31c = 1/2/dldp**2/curvature*(-2*Ba0*Ba1*B1c - Ba0**2*B31c+2*dldp**2*torsion**2*X1c*X20 +\n 2*iota**2*X1c*X2c + dldp**2*torsion**2*X1c*X2c + dldp**2*curvature**2*X1c*(2*X20 + X2c) + \n 3*dldp*iota*torsion*X2s*Y1c + 2*dldp**2*torsion**2*Y1c*Y20 + 2*iota**2*Y1c*Y2c +\n dldp**2*torsion**2*Y1c*Y2c - 2*dldp*iota*torsion*X20*Y1s - 3*dldp*iota*torsion*X2c*Y1s -\n 3*dldp*iota*torsion*X1c*Y2s + 2*iota**2*Y1s*Y2s + dldp**2*torsion**2*Y1s*Y2s + \n 2*dldp*iota*Z31s + 2*iota*X2s*dXc1v + 2*dldp*torsion*Y20*dXc1v + dldp*torsion*Y2c*dXc1v + \n 2*dldp*torsion*Y1c*dX20dp + 2*dXc1v*dX20dp + dldp*torsion*Y1c*dX2cdp + dXc1v*dX2cdp - \n iota*X1c*dX2sdp + dldp*torsion*Y1s*dX2sdp - 2*dldp*torsion*X20*dY1cdp - dldp*torsion*X2c*dY1cdp +\n 2*iota*Y2s*dY1cdp - 2*dldp*torsion*X1c*dY20dp + 2*iota*Y1s*dY20dp + 2*dY1cdp*dY20dp - \n dldp*torsion*X1c*dY2cdp + iota*Y1s*dY2cdp + dY1cdp*dY2cdp - dldp*torsion*X2s*dY1sdp - \n 2*iota*Y2c*dY1sdp - iota*Y1c*dY2sdp + dY1sdp*dY2sdp + dldp*curvature*(-3*iota*X1c*Z2s + \n dldp*torsion*(Y1c*(2*Z20 + Z2c) + Y1s*Z2s) + 2*Z20*dXc1v + Z2c*dXc1v - 2*X1c*dZ20dp - \n X1c*dZ2cdp) + 2*dldp*dZ31cdp)\n \n X31s = 1/2/dldp**2/curvature*(-Ba0**2*B31s + dldp**2*curvature**2*X1c*X2s + dldp**2*torsion**2*X1c*X2s +\n 2*dldp**2*torsion**2*Y20*Y1s - dldp**2*torsion**2*Y2c*Y1s + dldp**2*torsion**2*Y1c*Y2s +\n 2*iota**2*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) + 2*dldp**2*curvature*torsion*Y1s*Z20 - \n dldp**2*curvature*torsion*Y1s*Z2c + dldp**2*curvature*torsion*Y1c*Z2s + dldp*torsion*Y2s*dXc1v +\n dldp*curvature*Z2s*dXc1v + 2*dldp*torsion*Y1s*dX20dp - dldp*torsion*Y1s*dX2cdp + \n dldp*torsion*Y1c*dX2sdp + dXc1v*dX2sdp - dldp*torsion*X2s*dY1cdp - 2*dldp*torsion*X20*dY1sdp + \n dldp*torsion*X2c*dY1sdp + 2*dY20dp*dY1sdp - dY2cdp*dY1sdp - dldp*torsion*X1c*dY2sdp + dY1cdp*dY2sdp +\n iota*(dldp*torsion*(2*X20*Y1c - 3*X2c*Y1c - 2*X1c*Y20 + 3*X1c*Y2c - 3*X2s*Y1s) + dldp*curvature*X1c*\n (-2*Z20 + 3*Z2c) - 2*dldp*Z31c - 2*X2c*dXc1v - 2*X1c*dX20dp + X1c*dX2cdp - 2*Y2c*dY1cdp -\n 2*Y1c*dY20dp + Y1c*dY2cdp - 2*Y2s*dY1sdp + Y1s*dY2sdp) - dldp*curvature*X1c*dZ2sdp +2*dldp*dZ31sdp)\n\n dX31sdp = np.matmul(d_d_varphi, X31s)\n \n # Equation Cb2\n Y31s = 1/4/Ba0/X1c*(-2*Ba1*X1c*Y1s + 2*iota*I2*X1c*Y1s - dldp*(4*curvature*X20 + torsion*I2*\n (X1c**2 + Y1c**2 + Y1s**2)) + 4*Ba0*(X31s*Y1c + 2*X2s*Y2c - X31c*Y1s - 2*X2c*Y2s) -\n I2*Y1c*dXc1v + I2*X1c*dY1cdp + 4*dZ20dp) \n\n dY31sdp = np.matmul(d_d_varphi, Y31s)\n\n \n # From the equation for Bt to order n=4, and looking at m=0\n LamTilde = 2/Y1s**2*(Ba0*B0*I4 + (Ba1*B0 + Ba0*B20)*I2) + 1/Y1s**2*(-2*iota*(2*X2c**2 + X1c*X31c + \n 2*X2s**2 + 2*Y2c**2 + 2*Y2s**2 + Y1s*Y31s + 2*Z2c**2 + 2*Z2s**2) + 2*dldp*(torsion*(-X31s*Y1c -\n 2*X2s*Y2c + X31c*Y1s + 2*X2c*Y2s + X1c*Y31s) + curvature*(-2*X2s*Z2c + 2*X2c*Z2s + X1c*Z31s)) -\n X31s*dXc1v - 2*X2s*dX2cdp + 2*X2c*dX2sdp + X1c*dX31sdp - Y31s*dY1cdp - 2*Y2s*dY2cdp +\n 2*Y2c*dY2sdp + Y1c*dY31sdp - 2*Z2s*dZ2cdp + 2*Z2c*dZ2sdp)\n\n # Need to compute the integration factor necessary for computing the shear\n DMred = d_d_varphi[1:,1:] # The differentiation matrix has a linearly dependent row, focus on submatrix\n\n # Distinguish between the stellarator symmetric case and the non-symmetric one at order r^1.\n # Distinction leads to the expSig function being periodic (stell. sym.) or not.\n if self.sigma0 == 0 and np.max(np.abs(self.rs)) == 0 and np.max(np.abs(self.zc)) == 0:\n # Case in which sigma is stellarator-symmetric:\n integSig = np.linalg.solve(DMred,self.sigma[1:]) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig = np.exp(2*iota*integSig)\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n self.iota2 = self.B0/2*sum(expSig*LamTilde*self.d_varphi_d_phi)/sum(expSig*(X1c**2 + Y1c**2 + Y1s**2)/Y1s**2*self.d_varphi_d_phi) \n else:\n # Case in which sigma is not stellarator-symmetric:\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n avSig = sum(self.sigma*self.d_varphi_d_phi)/len(self.sigma) # Separate the piece that gives secular part, so all things periodic\n integSigPer = np.linalg.solve(DMred,self.sigma[1:]-avSig) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = integSigPer + avSig*self.varphi[1:] # Include the secular piece\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig_ext = np.append(np.exp(2*iota*integSig),np.exp(2*iota*(avSig*2*np.pi/self.nfp))) # Add endpoint at 2*pi for better integration\n LamTilde_ext = np.append(LamTilde,LamTilde[0])\n fac_denom = (X1c**2 + Y1c**2 + Y1s**2) / Y1s**2\n fac_denom_ext = np.append(fac_denom, fac_denom[0])\n varphi_ext = np.append(self.varphi, 2 * np.pi / self.nfp)\n self.iota2 = self.B0 / 2 \\\n * integ.trapz(expSig_ext * LamTilde_ext, varphi_ext) \\\n / integ.trapz(expSig_ext * fac_denom_ext, varphi_ext)\n \n # Using cumtrapz without exploiting periodicity\n # expSig = np.exp(2*iota*integ.cumtrapz(self.sigma,self.varphi,initial=0))",
"def test_hsmparams_nodefault():\n import time\n # First make some profile\n bulge = galsim.DeVaucouleurs(half_light_radius = 0.3)\n disk = galsim.Exponential(half_light_radius = 0.5)\n disk = disk.shear(e1=0.2, e2=-0.3)\n psf = galsim.Kolmogorov(fwhm = 0.6)\n gal = bulge + disk # equal weighting, i.e., B/T=0.5\n tot_gal = galsim.Convolve(gal, psf)\n tot_gal_image = tot_gal.drawImage(scale=0.18)\n tot_psf_image = psf.drawImage(scale=0.18)\n\n # Check that recompute_flux changes give results that are as expected\n test_t = time.time()\n res = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image)\n dt = time.time() - test_t\n res2 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux = 'sum')\n assert(res.moments_amp < res2.moments_amp),'Incorrect behavior with recompute_flux=sum'\n res3 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux = 'none')\n assert(res3.moments_amp == 0),'Incorrect behavior with recompute_flux=none'\n\n # Check correction_status and error message when recompute_flux is invalid.\n with assert_raises(galsim.GalSimError):\n galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux='invalid')\n res4 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image, recompute_flux='invalid',\n strict=False)\n assert res4.correction_status == -1\n assert \"Unknown value\" in res4.error_message\n\n # Check that results, timing change as expected with nsig_rg\n # For this, use Gaussian as galaxy and for ePSF, i.e., no extra pixel response\n p = galsim.Gaussian(fwhm=10.)\n g = galsim.Gaussian(fwhm=20.)\n g = g.shear(g1=0.5)\n obj = galsim.Convolve(g, p)\n # HSM allows a slop of 1.e-8 on nsig_rg, which means that default float32 images don't\n # actually end up with different result when using nsig_rg=0. rather than 3.\n im = obj.drawImage(scale=1., method='no_pixel', dtype=float)\n psf_im = p.drawImage(scale=1., method='no_pixel', dtype=float)\n test_t1 = time.time()\n g_res = galsim.hsm.EstimateShear(im, psf_im)\n test_t2 = time.time()\n g_res2 = galsim.hsm.EstimateShear(im, psf_im, hsmparams=galsim.hsm.HSMParams(nsig_rg=0.))\n dt2 = time.time()-test_t2\n dt1 = test_t2-test_t1\n if test_timing:\n assert(dt2 > dt1),'Should take longer to estimate shear without truncation of galaxy'\n assert(not equal_hsmshapedata(g_res, g_res2)),'Results should differ with diff nsig_rg'\n assert g_res != g_res2,'Results should differ with diff nsig_rg'\n\n # Check that results, timing change as expected with convergence_threshold\n test_t2 = time.time()\n res2 = galsim.hsm.EstimateShear(tot_gal_image, tot_psf_image,\n hsmparams=galsim.hsm.HSMParams(convergence_threshold = 1.e-3))\n dt2 = time.time() - test_t2\n if test_timing:\n assert(dt2 < dt),'Should be faster to estimate shear with higher convergence_threshold'\n assert(not equal_hsmshapedata(res, res2)),'Outputs same despite change in convergence_threshold'\n assert res != res2,'Outputs same despite change in convergence_threshold'\n\n # Check that max_amoment, max_ashift work as expected\n assert_raises(galsim.GalSimError,\n galsim.hsm.EstimateShear, tot_gal_image, tot_psf_image,\n hsmparams=galsim.hsm.HSMParams(max_amoment = 10.))\n assert_raises(galsim.GalSimError,\n galsim.hsm.EstimateShear, tot_gal_image, tot_psf_image,\n guess_centroid=galsim.PositionD(47., tot_gal_image.true_center.y),\n hsmparams=galsim.hsm.HSMParams(max_ashift=0.1))",
"def rs(self):\n return self.rads/self.rad_schw"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Testing all public methods in scidblib.scidb_math.
|
def test_scidb_math_module():
print '*** testing scidblib.scidb_math...'
a = scidb_math.comma_separated_number(1234.1234)
assert a == '1,234.1234'
print 'comma-separate_number(1234.1234) =', a
a = scidb_math.fraction_if_less_than_one(0.125)
assert a == '1/8'
print 'fraction_if_less_than_one(0.125) =', a
a = scidb_math.ceil_of_division(8, 3)
assert a == 3
print 'ceil_of_division(8, 3) =', a
a = scidb_math.round_up(3248, 2)
assert a == 3300
print 'round_up(3248, 2) =', a
a = scidb_math.round_down(3248, 2)
assert a == 3200
print 'round_down(3248, 2) =', a
a = scidb_math.snap_to_grid(3161, 0.01, use_binary=False)
assert a == 3160
print 'snap_to_grid(3161, 0.01, use_binary=False) =', a
a = scidb_math.snap_to_grid(3161, 0.1, use_binary=False)
assert a == 3000
print 'snap_to_grid(3161, 0.1, use_binary=False) =', a
a = scidb_math.snap_to_grid(1021, 0.01, use_binary=True)
assert a == 1024
print 'snap_to_grid(1021, 0.01, use_binary=True) =', a
a = scidb_math.geomean([3, 3, 4, 8])
assert round(a, 10) == 4.1195342878
print 'geomean([3, 3, 4, 8]) =', a
print
|
[
"def test_SMEB():\n testing_function('sme', bilinear=True)",
"def test_example_numeric():\n numeric.main(test=True)",
"def test_DistMult():\n testing_function('distmult')",
"def test_statistics_module():\n print '*** testing scidblib.statistics...'\n data = [3, 3, 4, 8]\n\n a = statistics.pstdev(data)\n assert round(a, 10) == 2.0615528128\n print 'pstdev =', a\n\n a = statistics.pvariance(data)\n assert a == 4.25\n print 'pvariance =', a\n\n a = statistics.stdev(data)\n assert round(a, 10) == 2.3804761428\n print 'stdev =', a\n\n a = statistics.variance(data)\n assert round(a, 10) == 5.6666666667\n print 'variance =', a\n\n a = statistics.median(data)\n assert a == 3.5\n print 'median =', a\n\n a = statistics.median_low(data)\n assert a == 3\n print 'median_low =', a\n\n a = statistics.median_high(data)\n assert a == 4\n print 'median_high =', a\n\n a = statistics.median_grouped(data)\n assert a == 3.5\n print 'median_grouped =', a\n\n a = statistics.mean(data)\n assert a == 4.5\n print 'mean =', a\n\n a = statistics.mode(data)\n assert a == 3\n print 'mode =', a\n print",
"def test_math(self):\n self.assertTrue((1 + 1) == 2)",
"def test_RESCAL():\n testing_function('rescal')",
"def test_legacy_box_score(self):\n pass",
"def test_regular():\n assert newton.square_root_with_newton_method(25, 1) == 7.25",
"def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)",
"def test_interface():\n import pKaTool.pKa_calc\n X = pKaTool.pKa_calc.Monte_Carlo_Mult_CPP()\n\n X.intrinsic_pKa = {':0001:ASP': [0.0, 4.0, 5.0]}\n X.charged_state = {':0001:ASP': [0, 1, 1]}\n X.acid_base = {':0001:ASP': -1}\n X.intene_mult = {':0001:ASP': {':0001:ASP': [[0, 0, 0], [0, 0, 0], [0, 0, 0]]}}\n X._calc_pKas(0.0, 10.0, 0.5)\n return",
"def test_SLM():\n testing_function('slm')",
"def test_my_mul():\n assert my_mul(2, 7) == 14\n assert my_mul(9, 9) == 81",
"def test_divide():\n calc = Calculator(9)\n assert calc.divide(3) == 3",
"def test_function_10(self):\n\t\tself.assertEqual(attempt.fdash(10), 0.12365)",
"def test_legacy_box_scores(self):\n pass",
"def testD():\n #Test case for valid iscurrency\n result24 = a1.iscurrency('USD')\n cornell.assert_equals = (True, result24)\n \n #test case for invalid iscurrency\n result25 = a1.iscurrency('AAA')\n cornell.assert_equals = (False, result25)\n \n #Test case for invalid iscurrency\n result26 = a1.iscurrency('usd')\n cornell.assert_equals = (False, result26)\n\n #Test case for valid exchange\n result27 = a1.exchange('USD','HKD',1.0)\n cornell.assert_floats_equal(7.82541, result27)",
"def test_compare_SphericalSLD_OnionExpShell(self):\r\n note = \"\\n*****Note: This test was passes since Nov. 1st, 2010...\"\r\n print note\r\n # set params\r\n self.model.setParam(\"npts_inter\", 35)\r\n self.model.setParam(\"rad_core0\", 100)\r\n self.model.setParam(\"thick_inter0\", 200)\r\n self.model.setParam(\"nu_inter0\", 4)\r\n # Rexp func\r\n self.model.setParam(\"func_inter0\", 3)\r\n self.model.setParam(\"thick_inter1\", 200)\r\n self.model.setParam(\"nu_inter1\", 4)\r\n self.model.setParam(\"func_inter1\", 3)\r\n # set A_shell=1\r\n self.model2.setParam(\"sld_core0\", 2.07e-006)\r\n # change the function to flat function\r\n self.model2.setParam(\"rad_core0\", 100)\r\n self.model2.setParam(\"thick_shell1\", 200)\r\n self.model2.setParam(\"sld_out_shell1\", 4e-006)\r\n self.model2.setParam(\"sld_in_shell1\", 2.07e-006)\r\n self.model2.setParam(\"A_shell1\", -4)\r\n self.model2.setParam(\"thick_shell2\", 100)\r\n self.model2.setParam(\"sld_out_shell2\", 4e-006)\r\n self.model2.setParam(\"sld_in_shell2\", 4e-006)\r\n self.model2.setParam(\"A_shell2\", 0)\r\n self.model2.setParam(\"thick_shell3\", 200)\r\n self.model2.setParam(\"sld_out_shell3\", 1e-006)\r\n self.model2.setParam(\"sld_in_shell3\", 4e-006)\r\n self.model2.setParam(\"A_shell3\", -4)\r\n self.model2.setParam(\"sld_solv\", 1e-006)\r\n \r\n #sphericalsld model runs\r\n model_run_0_1 = self.model.run(0.1)\r\n model_run_0_01 = self.model.run(0.01)\r\n model_run_0_001 = self.model.run(0.001)\r\n #onionexp model runs\r\n model2_run_0_1 = self.model2.run(0.1)\r\n model2_run_0_01 = self.model2.run(0.01)\r\n model2_run_0_001 = self.model2.run(0.001)\r\n import time\r\n st = time.time()\r\n qs = []\r\n qs = [i/10000 for i in range(1,1000)]\r\n out = map(self.model.run,qs)\r\n print time.time()-st\r\n #Compare exp(A=0) to flat (where A_shell is null) function\r\n self.assertAlmostEqual(self.model.run(0.1),self.model2.run(0.1),4)\r\n self.assertAlmostEqual(self.model.run(0.01),self.model2.run(0.01),0)\r\n self.assertAlmostEqual(self.model.run(0.001),self.model2.run(0.001),-3)",
"def testCalculateInternalSMatrix(self):\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n\n l0 = 2.7;\n k0 = 2.3271;\n kx = 1.00063;\n ky = 0.424741;\n\n er = [2.0, 1.0];\n ur = [1.0, 3.0];\n L = [0.25*l0, 0.5*l0];\n Wg = complexIdentity(2);\n Vg = complexArray([\n [0 - 0.4250j, 0 - 1.1804j],\n [0 + 2.0013j, 0 + 0.4250j]]);\n\n i = 0;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n\n SiActual = complexZeros((2,2,2,2));\n SiActual[0,0] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n SiActual[0,1] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,0] = complexArray([\n [0.1490 + 0.9880j, 0.0005 + 0.0017j],\n [0.0005 + 0.0017j, 0.1480 + 0.9848j]]);\n SiActual[1,1] = complexArray([\n [0.0039 - 0.0006j, -0.0398 + 0.0060j],\n [-0.0398 + 0.0060j, 0.0808 - 0.0121j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);\n\n i = 1;\n SiCalculated = calculateInternalSMatrix(kx, ky, er[i], ur[i], k0, L[i], Wg, Vg);\n SiActual[0,0] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n SiActual[0,1] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,0] = complexArray([\n [-0.2093 - 0.6406j, 0.0311 + 0.0390j],\n [0.0311 + 0.0390j, -0.2693 - 0.7160j]]);\n SiActual[1,1] = complexArray([\n [0.6997 - 0.2262j, 0.0517 - 0.0014j],\n [0.0517-0.0014j, 0.5998 - 0.2235j]]);\n\n assertAlmostEqual(SiActual, SiCalculated, absoluteTolerance, relativeTolerance);",
"def test_divide(self):\n print(\"Test method divide(a, b)\")\n self.assertEqual(2.1, divide(6, 3))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Testing all public methods in scidblib.scidb_afl.
|
def test_scidb_afl_module():
print '*** testing scidblib.scidb_afl...'
class TmpArgs:
def __init__(self):
self.host = ''
self.port = ''
args = TmpArgs()
iquery_cmd = scidb_afl.get_iquery_cmd(args)
scidb_afl.execute_it_return_out_err('ls')
scidb_afl.afl(iquery_cmd, 'list()')
print 'time_afl(..., \'list()\') =', scidb_afl.time_afl(iquery_cmd, 'list()')
print 'single_cell_afl(..., \'build(<v:int64>[i=0:0,1,0], 5)\', 1) =', \
scidb_afl.single_cell_afl(iquery_cmd, 'build(<v:int64>[i=0:0,1,0], 5)', 1)
print 'single_cell_afl(..., \'apply(build(<v:int64>[i=0:0,1,0], 5), v2, 6)\', 2) =', \
scidb_afl.single_cell_afl(iquery_cmd, 'apply(build(<v:int64>[i=0:0,1,0], 5), v2, 6)', 2)
print 'get_num_instances(...) =', scidb_afl.get_num_instances(iquery_cmd)
print 'get_array_names(...) =', scidb_afl.get_array_names(iquery_cmd)
print
|
[
"def test_all(db):\r\n # Shelter Registry\r\n from applications.sahana.modules.test_cr import *\r\n test_cr(db)\r\n # Organisation Registry\r\n from applications.sahana.modules.test_or import *\r\n test_or(db)\r\n # Person Registry\r\n from applications.sahana.modules.test_pr import *\r\n test_pr(db)",
"def test_SLM():\n testing_function('slm')",
"def _test_classes(self):",
"def test_getall2(self):\n pass",
"def test_import():\n from crank import DihedralScanner, QMEngine, PriorityQueue",
"def test_multiple_sqs_list_from_database():\r\n raise NotImplementedError",
"def test_scidb_math_module():\n print '*** testing scidblib.scidb_math...'\n\n a = scidb_math.comma_separated_number(1234.1234)\n assert a == '1,234.1234'\n print 'comma-separate_number(1234.1234) =', a\n\n a = scidb_math.fraction_if_less_than_one(0.125)\n assert a == '1/8'\n print 'fraction_if_less_than_one(0.125) =', a\n\n a = scidb_math.ceil_of_division(8, 3)\n assert a == 3\n print 'ceil_of_division(8, 3) =', a\n\n a = scidb_math.round_up(3248, 2)\n assert a == 3300\n print 'round_up(3248, 2) =', a\n\n a = scidb_math.round_down(3248, 2)\n assert a == 3200\n print 'round_down(3248, 2) =', a\n\n a = scidb_math.snap_to_grid(3161, 0.01, use_binary=False)\n assert a == 3160\n print 'snap_to_grid(3161, 0.01, use_binary=False) =', a\n\n a = scidb_math.snap_to_grid(3161, 0.1, use_binary=False)\n assert a == 3000\n print 'snap_to_grid(3161, 0.1, use_binary=False) =', a\n\n a = scidb_math.snap_to_grid(1021, 0.01, use_binary=True)\n assert a == 1024\n print 'snap_to_grid(1021, 0.01, use_binary=True) =', a\n\n a = scidb_math.geomean([3, 3, 4, 8])\n assert round(a, 10) == 4.1195342878\n print 'geomean([3, 3, 4, 8]) =', a\n print",
"def test_get_sfcrs(self):\n pass",
"def tests(self):\n pass",
"def test_breed(self):\n\t\tpass",
"def test_functions(self):\n self.assertIsNotNone(State.__doc__)",
"def test_test_query(self):\n pass",
"def test_ProcessChain0300(self):\n self.assertTrue(True)",
"def test_classes(self):\n pass",
"def test_shiboken():\n shiboken = pytest.importorskip(\"qtpy.shiboken\")\n\n assert shiboken.isValid is not None\n assert shiboken.wrapInstance is not None\n assert shiboken.getCppPointer is not None\n assert shiboken.delete is not None\n assert shiboken.dump is not None",
"def test_aprs_query(self):\n\n # Create a test service instance and mock some urllib2 methods\n test_service = mxl_balloon_tracker.Direct_Downlink_APRS_Service('direct_downlink_aprs_service', 'tracker', self.standard_device_config)\n test_service._aprs_api_endpoint = \"http://aprstest.local\"\n self.set_mock_request_builder(mock_aprs_success)\n\n # Query the APRS API with a successful request and make sure it correctly parses the response\n test_location = test_service._query_aprs_api()\n self.assertEqual(test_location['timestamp'], 1384119682)\n self.assertEqual(test_location['longitude'], -83.944942)\n self.assertEqual(test_location['latitude'], 42.003933)\n self.assertEqual(test_location['altitude'], 12000)",
"def test_g_et_cob(self):\n pass",
"def prepare_tests(self):",
"def test_find_all(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Testing all public methods in scidblib.statistics.
|
def test_statistics_module():
print '*** testing scidblib.statistics...'
data = [3, 3, 4, 8]
a = statistics.pstdev(data)
assert round(a, 10) == 2.0615528128
print 'pstdev =', a
a = statistics.pvariance(data)
assert a == 4.25
print 'pvariance =', a
a = statistics.stdev(data)
assert round(a, 10) == 2.3804761428
print 'stdev =', a
a = statistics.variance(data)
assert round(a, 10) == 5.6666666667
print 'variance =', a
a = statistics.median(data)
assert a == 3.5
print 'median =', a
a = statistics.median_low(data)
assert a == 3
print 'median_low =', a
a = statistics.median_high(data)
assert a == 4
print 'median_high =', a
a = statistics.median_grouped(data)
assert a == 3.5
print 'median_grouped =', a
a = statistics.mean(data)
assert a == 4.5
print 'mean =', a
a = statistics.mode(data)
assert a == 3
print 'mode =', a
print
|
[
"def test_show_statistics(self):\n assert show_statistics()",
"def test_get_metrics(self):\n pass",
"def test_get_summary_usage(self):\n pass",
"def calc_statistics(self):\n pass",
"def test_get_archive_statistics(self):\n pass",
"def test_statistics_shortcut(self):\n o = statistics('sqlite:///:memory:', False)\n assert_equals(CkanPackagerStatistics, type(o))",
"def test_stats(self):\n self.assertEqual(self.planet.time_to_go_circumnavigate_by_airbus_a380, Decimal('0.03769911184307751739197556162'))\n self.assertEqual(self.planet.time_to_planet_by_spacecraft, Decimal('100631610615981.6203852879867'))\n self.assertEqual(self.planet.time_to_planet_by_car, Decimal('55490553574007693.66724963385'))\n self.assertEqual(self.planet.size_of_star_in_sky, Decimal(9.872222471748167e-08))\n self.assertEqual(self.planet.colour_of_star, '#9bb0ff')\n self.assertEqual(self.planet.weight_on_planet(5.0), Decimal('5.098581064889641512818435200'))\n self.assertEqual(self.planet.age_in_planet_years(5), Decimal('1.871972864166092458898227719'))",
"def __init__(self):\n self.stat = Statistics()",
"def test_get_stats():\n stats = krux.stats.get_stats(prefix='dummy_app')\n\n # object, and of the right class?\n assert_true(stats)\n assert_false(isinstance(stats, krux.stats.DummyStatsClient))",
"def test_initial_stats(self):\n for ser in self._test_serializers:\n\n stats = ser.stats(details=True)\n\n self.assertEqual(stats['serialized']['bytes'], 0)\n self.assertEqual(stats['serialized']['messages'], 0)\n self.assertEqual(stats['serialized']['rated_messages'], 0)\n\n self.assertEqual(stats['unserialized']['bytes'], 0)\n self.assertEqual(stats['unserialized']['messages'], 0)\n self.assertEqual(stats['unserialized']['rated_messages'], 0)",
"def test_count_all(self):",
"def test_LabelObjectStatisticsBasic(self):\n\n self.delayDisplay(\"Starting test_LabelObjectStatisticsBasic\")\n #\n # first, get some data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n mrHead = sampleDataLogic.downloadMRHead()\n ctChest = sampleDataLogic.downloadCTChest()\n self.delayDisplay('Two data sets loaded')\n\n volumesLogic = slicer.modules.volumes.logic()\n\n mrHeadLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, mrHead, \"mrHead-label\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(ctChest, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for mismatch:\\n%s\" % warnings)\n\n self.assertTrue( warnings != \"\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(mrHead, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for match:\\n%s\" % warnings)\n\n self.assertTrue( warnings == \"\" )\n\n self.delayDisplay('test_LabelObjectStatisticsBasic passed!')",
"def summariseResult(self, test):",
"def test_Stats(self):\n resp = self.client.get('/Stats/')\n self.assertEqual(resp.status_code, 200)",
"def test_get_dummy_stats():\n stats = krux.stats.get_stats(prefix='dummy_app', client=False)\n\n # object, and of the right class?\n assert_true(stats)\n assert_true(isinstance(stats, krux.stats.DummyStatsClient))",
"def test_stats_collector(self):\n\n test_params = {'test_time': time.time(),\n 'test_name': self.id(),\n 'json': 0}\n\n sc = self.start_stats('loop',\n test_params=test_params,\n client_id=0)\n\n start_time = time.time()\n\n try:\n time.sleep(self.parami('sleep_time', 3600))\n except KeyboardInterrupt:\n self.log.warning(\"ctats collection was interrupted\")\n\n end_time = time.time()\n\n ops = {'start-time': start_time,\n 'end-time': end_time}\n\n self.end_stats(sc, ops, 'loop')",
"def print_statistics(self):\n pass",
"def getStatistic(self, name):",
"def test_get_platform_metrics(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Obtain coefficients for PMML elements.
|
def _get_coefficients(est, table):
def coefficient_for_category(predictors, category):
predictor = [p for p in predictors if p.get('value') == category]
if not predictor:
return 0
return float(predictor[0].get('coefficient'))
def coefficients_for_field(name, field):
predictors = table.findall(f"*[@name='{name}']")
if field.get('optype') != 'categorical':
if len(predictors) > 1:
raise Exception('PMML model is not linear.')
return [float(predictors[0].get('coefficient'))]
return [
coefficient_for_category(predictors, c)
for c in est.field_mapping[name][1].categories
]
return list(chain.from_iterable([
coefficients_for_field(name, field)
for name, field in est.fields.items()
if table.find(f"*[@name='{name}']") is not None
]))
|
[
"def get_coeffs(weights):\n coeff_num = weights.__len__() - 1\n pub_key = weights.public_key\n\n bn = []\n exp = []\n for i in range(coeff_num):\n bn.append(weights.ciphertextBN(i))\n exp.append(weights.exponent(i))\n ct = ipclCipherText(pub_key.pubkey, bn)\n return IpclPaillierEncryptedNumber(pub_key, ct, exp, coeff_num)",
"def _construct_coefficients(self):\n coeffs = [0]*self.degree\n\n N = float(self.evalpts)\n\n lvals = np.arange(self.evalpts).astype('float')\n xpts = self._c2x(np.cos(np.pi*(lvals + 0.5)/N))\n fpts = np.rollaxis(self.func(xpts, *self.args), -1)\n\n for a in range(self.degree):\n inner = [\n fpts[b] * np.cos(np.pi*a*(lvals[b]+0.5)/N)\n for b in range(self.evalpts)\n ]\n coeffs[a] = 2.0/N * np.sum(inner, axis=0)\n\n coeffs[0] *= 0.5\n self._coeffs = np.array(coeffs)",
"def coefficients(polynomial):\n if not \"args\" in dir(polynomial):\n return [polynomial]\n if polynomial.args == ():\n return [polynomial]\n\n coeff_list = sorted(polynomial.args, key = extract_power)\n degree = extract_power(coeff_list[-1])\n\n pos = 0\n ret = []\n for d in range(0, degree + 1):\n if extract_power(coeff_list[pos]) == d:\n if d == 0:\n ret.append(RealMPFR(str(coeff_list[0]), prec))\n else:\n ret.append(RealMPFR(str(coeff_list[pos].args[0]), prec))\n pos += 1\n else:\n ret.append(0)\n return ret",
"def coefficients(self):\n return self._coefficients.copy()",
"def coeffs(self):\n\t\treturn [self.a,self.b,self.c,self.d]",
"def feature_coefficients(model_mcmc, features):\n # Trace and feature info.\n trace_len = model_mcmc.trace('b_0')[:].shape[0]\n feat_len = len(features)+1\n # Container for coefficients.\n coefs = np.empty((trace_len,feat_len))\n # Extract trace for each coefficient and return.\n for f_i, f in enumerate(['0']+features):\n coefs[:,f_i] = model_mcmc.trace('b_'+f)[:]\n return coefs",
"def get_coeff(expression, model_vars):\n\n # assert(constr.body.polynomial_degree()==1)\n names_model_vars = [v.name for v in model_vars] # all variablenames from the model\n coeff = np.zeros(len(model_vars))\n if type(expression) == pyomo.core.base.constraint.SimpleConstraint:\n expression = expression.body\n repn = generate_canonical_repn(expression)\n for i, coefficient in enumerate(repn.linear or []):\n coeff[names_model_vars.index(repn.variables[i].name)] = coefficient\n return coeff",
"def get_coefficients(lin_op):\r\n # VARIABLE converts to a giant identity matrix.\r\n if lin_op.type is lo.VARIABLE:\r\n coeffs = var_coeffs(lin_op)\r\n # Constants convert directly to their value.\r\n elif lin_op.type is lo.PARAM:\r\n coeffs = [(lo.CONSTANT_ID, lin_op.size, lin_op.data.value)]\r\n elif lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]:\r\n coeffs = [(lo.CONSTANT_ID, lin_op.size, lin_op.data)]\r\n # For non-leaves, recurse on args.\r\n elif lin_op.type in TYPE_TO_FUNC:\r\n coeffs = TYPE_TO_FUNC[lin_op.type](lin_op)\r\n else:\r\n raise Exception(\"Unknown linear operator.\")\r\n return coeffs",
"def Custom(trafo: ngsolve.fem.CoefficientFunction, jac: ngsolve.fem.CoefficientFunction) -> PML:",
"def regression_coefficients(self, mode='Matrices'):\n\n # Split the data into features and labels\n X = self.data[relevant]\n y = self.data['Label']\n\n # Calculate the coefficients using matrices: \n if mode == 'Matrices':\n\n # Add column of ones so that intercept is calculated\n ones = pd.DataFrame(np.ones(shape=(len(X),1)))\n X = pd.concat([ones, X], axis=1)\n\n # Calculate the expression (X^TX)^(-1)X^Ty\n coefficients = np.linalg.inv(X.transpose().dot(X))\\\n .dot(X.transpose()).dot(y)\n\n\n # Calculate the coefficients using systems of equations\n if mode == 'Systems':\n coefficients = []\n\n # Calculate the difference between each point and its mean\n x_diff = X - X.mean()\n y_diff = y - y.mean()\n\n # Calculate the slope\n first = X.multiply(y, axis=0).sum()\n second = X.mean() * y.sum()\n third = len(X) * (x_diff.pow(2).sum() / len(X))\n\n slope = (first - second) / third\n coefficients.append(slope.values)\n \n # Calculate the intercept\n one = len(X) * X.mean() * X.multiply(y, axis=0).sum()\n two = X.pow(2).sum() * y.sum()\n three = len(X)**2 * (x_diff.pow(2).sum() / len(X))\n\n intercept = -(one - two) / three\n\n # intercept = y.mean() - (slope * X.mean())\n coefficients.append(intercept.values)\n\n return coefficients",
"def _calculate_coeffs(self):\n for joint in self._joint_names:\n self._ovrl_disp[joint] = self._start_pos[joint] - self._end_pos[joint]\n self._coeffs[joint] = [(2*self._ovrl_disp[joint])/(self._motion_time ** 3), (3*-self._ovrl_disp[joint])/(self._motion_time ** 2)]",
"def _get_length_coeffs(self, mol: Molecule) -> defaultdict:\n if hasattr(mol, 'id') and mol.id is not None:\n if mol.id in self._beta_coeffs:\n return self._beta_coeffs[mol.id]\n\n coeffs = defaultdict(float)\n\n for bond in mol.get_all_edges():\n atom1 = bond.atom1\n atom2 = bond.atom2\n symbol1 = atom1.element.symbol\n symbol2 = atom2.element.symbol\n\n c = np.exp(-self.exp_coeff * np.linalg.norm(atom1.coords - atom2.coords))\n k = symbol1 if symbol1 == symbol2 else tuple(sorted([symbol1, symbol2]))\n coeffs[k] += c\n\n if hasattr(mol, 'id'):\n self._beta_coeffs[mol.id] = coeffs\n return coeffs",
"def spectral_coefficients(self) -> tuple:\n if self.modes is None:\n return\n f_coeffs = []\n g_coeffs = []\n for mode in self.modes:\n coeffs = qnms.KerrMode(mode).coefficients\n f_coeffs.append(coeffs[0])\n g_coeffs.append(coeffs[1])\n return array(f_coeffs), array(g_coeffs)",
"def calculateElementCoefficients(self):\n\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if ('grad(u)',cj) in self.q:\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n\n #can skip this after first call\n stfuncs.RE_NCP1_evaluateElementCoefficients_Linear(self.coefficients.rho,\n self.coefficients.gravity,\n self.coefficients.sdInfo[(0,0)][0],\n self.coefficients.sdInfo[(0,0)][1],\n self.coefficients.Ksw_types,\n self.nSpace_global,\n self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.mesh.elementNeighborsArray,\n self.mesh.elementMaterialTypes,\n self.q[('f_lin',0)],\n self.q[('a_lin',0,0)])\n\n stfuncs.RE_NCP1_evaluateElementCoefficients_VGM(self.coefficients.rho,\n self.coefficients.beta,\n self.coefficients.gravity,\n self.coefficients.vgm_alpha_types,\n self.coefficients.vgm_n_types,\n self.coefficients.thetaR_types,\n self.coefficients.thetaSR_types,\n self.nSpace_global,\n self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.mesh.elementNeighborsArray,\n self.mesh.elementBarycentersArray,\n self.mesh.elementMaterialTypes,\n self.nDOF_trial_element[0],\n self.u[0].femSpace.dofMap.l2g,\n self.u[0].dof,\n self.q['x'],\n self.q[('u',0)],\n self.q[('m',0)],\n self.q[('dm',0,0)],\n self.q[('r',0)],\n self.q[('k_r',0,0)],\n self.q[('dk_r',0,0,0)],\n self.q[('k_r_up',0,0)])\n\n if self.movingDomain and self.coefficients.movingDomain:\n self.coefficients.updateToMovingDomain(self.timeIntegration.t,self.q)\n if self.timeTerm:\n self.timeIntegration.calculateElementCoefficients(self.q)\n #cek need to clean up calculation of dimless numbers, might as well do it all the time and pass to subgrid error\n #mwf figure out what to do with this\n #what happens if stabilization didn't compute cfl?\n for ci in range(self.nc):\n #for two phase flow would need this\n self.q[('dphi',ci,ci)].fill(1.0)\n if self.sd:\n cfemIntegrals.calculateDimensionlessNumbersADR_sd(self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],\n self.elementEffectiveDiametersArray,\n self.q[('df',ci,ci)],\n self.q[('a',ci,ci)],\n self.q[('dphi',ci,ci)],\n self.q[('dr',ci,ci)],\n self.q[('dmt',ci,ci)],\n self.q[('pe',ci)],\n self.q[('cfl',ci)])\n else:\n cfemIntegrals.calculateDimensionlessNumbersADR(self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.elementEffectiveDiametersArray,\n self.q[('df',ci,ci)],\n self.q[('a',ci,ci)],\n self.q[('dphi',ci,ci)],\n self.q[('dr',ci,ci)],\n self.q[('dmt',ci,ci)],\n self.q[('pe',ci)],\n self.q[('cfl',ci)])\n\n if self.shockCapturing is not None:\n self.shockCapturing.calculateNumericalDiffusion(self.q)",
"def _coefficients(self):\n\n self.first_eq = self.general_solution.subs(t, self.t0) - self.x0\n self.second_eq = self.general_solution.subs(t, self.t1) - self.x1\n\n self.__make_Cs()\n self.__make_equations()\n\n coefficients = solve(self.equations, self.Cs)\n self.coefficients = coefficients",
"def loadCoefficients(path):\n # FILE_STORAGE_READ\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)\n\n # note we also have to specify the type to retrieve other wise we only get a\n # FileNode object back instead of a matrix\n camera_matrix = cv_file.getNode(\"camera_matrix_plex\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff_plex\").mat()\n\n # Debug: print the values\n # print(\"camera_matrix : \", camera_matrix.tolist())\n # print(\"dist_matrix : \", dist_matrix.tolist())\n\n cv_file.release()\n return [camera_matrix, dist_matrix]",
"def nested_coefficients(self):\n origin_coeff = self.coefficients\n res = self.coefficients[:]\n\n # If there are less than 3 non_zero coefficients, just return the standard form coefficients.\n num_non_zero = len([1 for coeff in origin_coeff if coeff is not 0])\n if num_non_zero < 3:\n return res\n\n # Find the first non-zero coefficient starting at degree one\n prev_nonzero_idx = 1\n while _is_almost_zero(res[prev_nonzero_idx]):\n prev_nonzero_idx += 1\n\n if prev_nonzero_idx < len(res) - 1:\n cur_nonzero_idx = prev_nonzero_idx + 1\n while cur_nonzero_idx < len(res):\n while _is_almost_zero(res[cur_nonzero_idx]):\n cur_nonzero_idx += 1\n # Found the next non-zero coefficient\n res[cur_nonzero_idx] = origin_coeff[cur_nonzero_idx] / origin_coeff[prev_nonzero_idx]\n prev_nonzero_idx = cur_nonzero_idx\n cur_nonzero_idx += 1\n if cur_nonzero_idx >= len(res):\n return res\n\n return res",
"def polyFeatures(X, p):\n X_poly = np.zeros((X.size, p))\n for i in range(p):\n X_poly[:, [i]] = X**(i+1)\n return X_poly",
"def compute_coefficients_ref(ks):\n coeffs = [1]\n for k in ks:\n coeffs = zipWith(lambda x,y:x+y,coeffs+[0],[0]+[-k*c for c in coeffs])\n return coeffs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Opens a .bsx file and loads the values into the propellant parameters. Unused
|
def open_bsx_file(self, filename):
if filename != '':
f = open(filename, 'r')
file = f.read()
attr = file.split('"')
for i in range(0, len(attr)):
if attr[i].find("Density") != -1:
self.values["rhop"] = float(attr[i+1])*27.6799
if attr[i].find("BallisticA") != -1:
self.values["a"] = float(attr[i+1])
if attr[i].find("BallisticN") != -1:
self.values["n"] = float(attr[i+1])
if attr[i].find("SpecificHeatRatio") != -1:
self.values["k"] = float(attr[i+1])
if attr[i].find("MolarMass") != -1:
self.values["MM"] = float(attr[i+1])
for i2 in range(0, len(attr)):
if attr[i2].find("ISPStar") != -1:
print(self.values["k"])
print("\n")
print((2/(self.values["k"]+1))**((self.values["k"] + 1)/(self.values["k"]-1)))
print("\n")
print(self.values["k"] * (2/(self.values["k"]+1))**((self.values["k"] + 1)/(self.values["k"]-1)))
self.values["T"] = float(attr[i2+1])**2 * 9.81 * self.values["k"] * (2/(self.values["k"]+1))**((self.values["k"] + 1)/(self.values["k"]-1)) / self.R_specific
f.close()
self.engine.update(self.values["ri"], self.values["ro"], self.values["l"], self.values["rt"], self.values["re"])
self.tspan = linspace(0, 2, num=int(self.values["tstepnum"]))
self.R_specific = self.R / self.values["MM"]
|
[
"def loadParameters(self, filepath) -> retval:\n ...",
"def load_param_from_pcs_file(self, pcs_path):\n self.parameter_space = Parameters.load_param_from_pcs_file(pcs_path)",
"def browseSettingFile(self):\n #Open file:\n self.settingsFilename = askopenfilename(filetypes=[('settings file','*.pkl')])\n self.settings.settings = templateLoader(self.settingsFilename)\n \n #Update settings data:\n for key in self.entries:\n var = self.entries[key]\n #Supposed to update fields:\n var.set(self.settings.settings[key].value)\n for key in self.buttons:\n var = self.buttons[key]\n #Supposed to update button states:\n var.set(self.settings.settings[key].value)",
"def load_fixed_params(self, entry_browse, grid):\n filename = tkFileDialog.askopenfilename(initialdir=\"./\", title=\"Select file\",\n filetypes=((\"csv files\", \"*.csv\"), (\"all files\", \"*.*\")))\n if filename:\n entry_browse.insert(0, filename)\n helpers.destroy_slaves(grid)\n self.model.fixed_params = helpers.read_fixed_params_from_file(filename, [\"Name, Values, Units\"])\n helpers.create_entry_table(self.model.fixed_params, grid)",
"def load(self, filename=None):\n if filename == None:\n default = f\"{os.getcwd()}/stacksmash\"\n path = get_filepath(\"of your saved exploit\", default=default)\n else:\n path = get_filepath(\"\", already_exists=True, path=filename)\n with open(path, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if re.search(r'\".+\" : \".+\"', line):\n words = line.split(\"\\\"\")\n file_key, file_val = words[1], translate_type(words[3])\n for key in list(self.__dict__.keys()):\n if key == file_key:\n setattr(self, key, file_val)\n break\n print(\"\\nI loaded the following settings:\\n\\n\", self, sep=\"\")\n input(\"\\n\\nPress ENTER to continue\")",
"def OpenForReading(self):\n self.fp = open(self.filename,\"rb\")\n codingParams =self.ReadFileHeader() # this leaves the file pointer at the start of data and returns a CodingParams object w/ data from header\n return codingParams",
"def load_params(self):\n self.autoencoder.load_parameters('/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/encoder')",
"def open(filename):\n infile = open(filename,'r')\n state = ''\n for line in infile:\n if '#lower' in line:\n state = 'lower'\n elif '#upper' in line:\n state = 'upper'\n elif '#sizes' in line:\n state = 'sizes'\n elif '#' in line:\n state = 'other'\n elif state == 'lower':\n lower = int(line.strip('\\n'))\n elif state == 'upper':\n upper = int(line.strip('\\n'))\n elif state == 'sizes':\n fragmentsizes = np.array(map(float,line.rstrip(\"\\n\").split(\"\\t\")))\n try:\n new = FragmentSizes(lower, upper, vals = fragmentsizes)\n except NameError:\n raise Exception(\"FragmentDistribution decriptor file appeas to be missing some\\\nneeded components\")\n infile.close()\n return new",
"def _paramsFileHead():\n\n str = \\\n\"\"\"\n# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from\n# Numenta, Inc. a separate commercial license for this software code, the\n# following terms and conditions apply:\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses.\n#\n# http://numenta.org/licenses/\n# ----------------------------------------------------------------------\n\n## This file defines parameters for a prediction experiment.\n\n###############################################################################\n# IMPORTANT!!!\n# This params file is dynamically generated by the RunExperimentPermutations\n# script. Any changes made manually will be over-written the next time\n# RunExperimentPermutations is run!!!\n###############################################################################\n\n\nfrom nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription\n\n# the sub-experiment configuration\nconfig ={\n\"\"\"\n\n return str",
"def HandleLoadFile(self):\n fn = str(pg.QtGui.QFileDialog.getOpenFileName(\n caption=\"Load an Igor File\",\n directory=\"\",\n filter=\"Packed Experiment Files (*.pxp)\"))\n if fn == '':\n return\n # XXX add in support for more than just pxp\n self.LoadPxpAndAddToModel(fn)",
"def load_paramters(self, parameter_file):\n return 0",
"def open(self):\n \n if self.asc == None:\n self.asc = False \n\n if not self.asc:\n self.file = open(self.filename, \"rb\")\n \n if self.validate():\n self.deSerializeFile(self.file)\n else:\n raise PSFInvalid(\"Invalid PSF file\")\n else:\n newpsfobj = psfasc.parse(\"psfasc\", open(self.filename).read())\n self.header = newpsfobj.header \n self.types = newpsfobj.types\n self.sweeps = newpsfobj.sweeps\n self.traces = newpsfobj.traces\n self.values = newpsfobj.values\n self.lastid = newpsfobj.lastid\n self.verbose = newpsfobj.verbose",
"def load_auction_p(fname):\n return pickle.load(open(fname, \"rb\"))",
"def load_file(self, file_path):\n ...",
"def loadData(fname):\r\n (grating, params, lines, meta) = pickle.load(open(fname, \"r\"))\r\n return grating, params, lines, meta",
"def load_params(file_name):\n\n try:\n open(file_name, \"r\")\n except FileNotFoundError:\n s = \"algorithm.paremeters.load_params\\n\" \\\n \"Error: Parameters file not found.\\n\" \\\n \" Ensure file extension is specified, e.g. 'regression.txt'.\"\n raise Exception(s)\n\n with open(file_name, 'r') as parameters:\n # Read the whole parameters file.\n content = parameters.readlines()\n\n for line in [l for l in content if not l.startswith(\"#\")]:\n\n # Parameters files are parsed by finding the first instance of a\n # colon.\n split = line.find(\":\")\n\n # Everything to the left of the colon is the parameter key,\n # everything to the right is the parameter value.\n key, value = line[:split], line[split+1:].strip()\n\n # Evaluate parameters.\n try:\n value = eval(value)\n\n except:\n # We can't evaluate, leave value as a string.\n pass\n\n # Set parameter\n params[key] = value",
"def load_fixed_params_sim(self):\n self.load_fixed_params(self.entry_browse_fixed_sim, self.table_fixed_params_sim)",
"def loadbsf(filename, plot=1, axs=None):\n\n MNC = 32 # Maximum number of channels defined by AMTI\n lbforce_per_N = 1.0/4.44822162 # AMTI conversion factor (version 105)\n # this constant is derived from:\n # g = 9.80665 # standard acceleration of free fall in m/s2 by ISO 80000-3:2006\n # onelb = 0.45359237 # 1 lb in kg by International yard and pound\n\n plot = int(plot) # in case of command line input\n\n if filename == 'shfile': # memory-mapped file by NetForce\n try:\n # bug in Python mmap: file can't be opened with unknown size\n # read at least up to the first instrument:\n nbytes = 4 + 968 + 948\n f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile') \n f.seek(0, 0)\n except IOError as err:\n print('{0} I/O error: {1}'.format(filename, err))\n f.close()\n return\n else: # file in the hard disk\n try:\n f = open(filename, 'rb') \n except IOError as err:\n print('{0} I/O error: {1}'.format(filename, err))\n f.close()\n return\n\n # read Main header\n mh = ReadMainHeader(f)\n \n if filename == 'shfile':\n try:\n # try to open for all bytes in file:\n nbytes = 4 + mh.size_header + 948*mh.instHeadCount + 8*int(mh.numDatasets*mh.TNC)\n f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')\n except IOError as err:\n pass\n try:\n # instrument header may have size < 948, do not try to open for all bytes yet:\n nbytes = 4 + mh.size_header + 948*mh.instHeadCount + 4*int(mh.numDatasets*mh.TNC)\n f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile') \n except IOError as err:\n print('{0} I/O error: {1}'.format(filename, err))\n f.close()\n return\n\n # read Instrument header\n ih = []\n f.seek(4 + mh.size_header, 0) # advances file to the first instrument header\n for i in range(mh.instHeadCount):\n ih.append(ReadInstHeader(f, MNC, mh.TNC))\n # go to the next instrument header\n f.seek(4 + mh.size_header + ih[i].size_header - f.tell(), 1) \n\n # check the file size and adjust for the shfile:\n current = f.tell()\n f.seek(0, 2)\n filesize = f.tell()\n if filesize - current != 8*int(mh.numDatasets*mh.TNC):\n if filename == 'shfile': # open the file for all its bytes\n try:\n nbytes = current + 8*int(mh.numDatasets*mh.TNC)\n f = mmap.mmap(fileno=-1, length=nbytes, tagname='shfile')\n except:\n print('Error: unnexpected number of bytes for data in %s.' %filename)\n f.close()\n return\n else:\n print('Error: unnexpected number of bytes for data in %s.' %filename)\n f.close()\n return\n f.seek(current, 0)\n\n # read data\n try:\n data = unpack('<'+int(mh.numDatasets*mh.TNC)*'d', f.read(int(mh.numDatasets*mh.TNC)*8))\n except:\n print('Error reading data in %s.' %filename)\n f.close()\n return\n data = np.array(data).reshape((mh.numDatasets, mh.TNC))\n # In NetForce file, data is always in Imperial units, scale factor for force platform:\n scale = np.array([1, 1, 1, 0.0254, 0.0254, 0.0254]) / lbforce_per_N\n for i in range(mh.num_of_plats):\n # In the NetForce file version 105, raw data is already converted\n data[:, ih[i].chans] = data[:, ih[i].chans] * scale\n\n f.close()\n\n if plot:\n plotGRF(data, mh, ih, axs=None)\n\n return data, mh, ih",
"def _load_param_header(cls, filepath, break_symbol='-----'):\r\n \r\n index = 0\r\n params = {}\r\n \r\n with open(filepath) as f:\r\n for line in f.readlines():\r\n index += 1\r\n if break_symbol in line:\r\n break\r\n else:\r\n key, val = [ii.strip() for ii in line.split(\":\")]\r\n params[key] = val\r\n return index, params"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a new grain
|
def add_grain(self, ri, l):
self.engine.add_grain(self.engine.ri, ri, l)
|
[
"def add_grain_file(self, filename):\r\n f = open(filename, 'r')\r\n fin = f.read()\r\n grains = fin.split(\"grain,\")\r\n for i in grains:\r\n grain = i.split(\",\")\r\n if grain[0] != '':\r\n self.add_grain(float(grain[0]), float(grain[1]))\r\n f.close()",
"def type_of_grain(self, type_of_grain):\n\n self._type_of_grain = type_of_grain",
"def generate_grain(self):\n if len(self.progeny) < default.MAX_GRAINS_PER_GEN:\n if self.curr_lag_jitter != 0:\n lag = self.curr_lag + random.randrange(0, self.curr_lag_jitter)\n else:\n lag = self.curr_lag\n content = self.input_connect[0].delay_line.get_segment(lag=lag,\n duration=self.curr_dur)\n envelope = self.envelope_generator(self.curr_dur)\n self.progeny.append(Grain(generator=self, content=content, \n envelope=envelope, id_number = len(self.progeny)))\n self.dur_since_last_birth = 0",
"def add(shard_mapping_id, shard_id, persister=None):\n shard = Shards.fetch(shard_id)\n persister.exec_stmt(\n HashShardingSpecification.INSERT_HASH_SPECIFICATION, {\n \"params\":(\n shard_mapping_id,\n shard.group_id,\n shard_id\n )\n }\n )",
"def _add_gust_object(self, gust: GUST) -> None:\n key = gust.sid\n assert key not in self.gusts\n assert key > 0\n self.gusts[key] = gust\n self._type_to_id_map[gust.type].append(key)",
"def add(shard_mapping_id, lower_bound, shard_id, persister=None):\n persister.exec_stmt(\n RangeShardingSpecification.INSERT_RANGE_SPECIFICATION, {\n \"params\":(\n shard_mapping_id,\n lower_bound,\n shard_id\n )\n }\n )\n return RangeShardingSpecification(\n shard_mapping_id,\n lower_bound,\n shard_id\n )",
"def add_region(self, region):\n self.metrics_dictionary[\"RegionalMasking\"][\n self.get_region_name_from_region(region)\n ] = region",
"def set_grain_map(self, grain_map, voxel_size):\n self.grain_map = grain_map\n self.voxel_size = voxel_size",
"def consumeGrain(self):\r\n # Consume grain for all workers\r\n self.model.totalGrain -= self.workers * 160\r\n self.grain -= self.workers * 160 \r\n \r\n # Decrement amount of workers if grain is less than or equal to zero (also impacts overall population numbers)\r\n if (self.grain <= 0):\r\n self.model.totalGrain -= self.grain # Add back negative grain to prevent negatve grain in model and incorrect grain representation\r\n self.grain = 0\r\n self.workers -= 1\r\n self.settlement.population -= 1\r\n self.model.totalPopulation -= 1\r\n\r\n # Check if there are still workers in the Household\r\n if self.workers <= 0:\r\n # Removes ownership of all fields\r\n for f in self.fields:\r\n f.owned = False\r\n # Decrements the amount of households and removes this household from the simulation\r\n self.settlement.noHouseholds -= 1\r\n self.model.schedule.remove(self)",
"def add_light(self, light):\n self.light_list[self.light_count] = light\n self.light_count += 1",
"def add_population(self, population):",
"def add_region(self, acc, start, end):\n if not self._finalised:\n self._regions[acc].append((start, end))\n self._signatures = {}\n else:\n raise RuntimeError()",
"def add_genome(self, genome):\n assert isinstance(genome, Genome)\n assert type(self.genomes) is list\n\n self.genomes.append(genome)",
"def add_ingredient(self, ing):\n self.ingredients.append(ing)",
"def add_flower(self, **kwargs):\n f = Flower(self.my_manager, **kwargs)\n self.flower_list.append(f)\n self.write_list_to_file()\n return f",
"def add_ingredient(self, ingredient):\n self.ingredients.append(ingredient)",
"def add(self, name, **kwargs):\n if name in self.stats_pool:\n if self.is_resumed: # skip if resumed\n return\n raise ValueError(f'Stats `{name}` has already existed!')\n self.stats_pool[name] = SingleStats(name, **kwargs)",
"def add_instance(self, instance):\n self.factories.append(instance)",
"def addLight(self, l):\n self.lights.append(l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets engine grains based off a file. Unused
|
def add_grain_file(self, filename):
f = open(filename, 'r')
fin = f.read()
grains = fin.split("grain,")
for i in grains:
grain = i.split(",")
if grain[0] != '':
self.add_grain(float(grain[0]), float(grain[1]))
f.close()
|
[
"def config_armies(filename: str) -> None:\n game = Game()\n reader = Reader()\n armies = reader.read(filename)\n game.start_step = reader.start_from_step\n for army in armies:\n game.add_army(army)\n game.start()",
"def load_grain(grains, k):\n grain = -np.ones(dims)\n ind = grains[k][0]-1\n [x, y, z] = np.unravel_index(ind, dims, order='F')\n val = grains[k][1]\n grain[y,x,z] = val\n verts, faces = measure.marching_cubes_classic(grain, 0, spacing=(1, 1, 1))\n return verts, faces",
"def __init__(self, grains=None, filename=None, csym=None, ngrain=100,\n cdim=[1.,1.,1.], cang=[90.,90.,90.], ssym=False, epf=None):\n # The grain aggregte can be given either through a file or #\n # passing an array of them to the class directly. #\n # either grains or filename #\n # if none of them is given, a 500-grains file is generated #\n # and returns its grains to the global gr variable. #\n\n if grains==None and filename==None and epf==None:\n print(\" ****************************** \")\n print(\" Since no argument is passed,\")\n print(\" 100 random grains are created\")\n print(\" ****************************** \\n\")\n a = re(ngrain=ngrain)\n gr = np.array(a.euler).transpose()\n gr = np.array([gr[1],gr[2],gr[3]]).transpose()\n temp = []\n for i in range(len(gr)):\n temp.append([gr[i][0],gr[i][1],gr[i][2],0.01])\n self.gr = np.array(temp)\n\n self.epf = epf # global\n\n if grains!=None:\n self.gr = np.array(grains)\n elif filename!=None:\n self.gr = np.genfromtxt(fname=filename,skiprows=4)\n pass\n elif epf!=None: # None is the default for epf\n \"\"\"\n experimental pole figures..\n # available format:\n - UXD\n - steglich\n - bruker\n - epf*\n \"\"\"\n if type(epf).__name__=='list': self.epf_fn = epf\n elif type(epf).__name__=='str': self.epf_fn = [epf]\n elif epf==True:\n fn = [] # list of file names\n print('type the experimental pole figure file names')\n print(\"To finish input, press enter\")\n while True:\n dum = input(\">>> \")\n if len(dum)==0: break\n fn.append(dum)\n pass\n self.epf_fn = fn\n pass\n else: raise IOError('Unexpected epf type found')\n\n ## check if the file name is correct ##\n for i in range(len(self.epf_fn)):\n if not(os.path.isfile(self.epf_fn[i])):\n raise IOError(\"Could not find %s\"%self.epf_fn[i])\n pass\n ## --------------------------------- ##\n\n ## POLE FIGURE MODE --------------------------------------\n print(\"Type the experimental polfe figure mode\")\n print(\"Available options:\", end=' ') #continuation\n print(\"bruker, steglich, epf (default: %s)\"%'epf')\n epf_mode = input(\" >>>\" )\n if len(epf_mode)==0:\n epf_mode='steglich'\n pass\n ##---------------------------------------------------------\n\n self.grid = []; self.hkl = []\n ## more than one pf can be included.\n npole_per_file = []\n if epf_mode=='epf': self.max_khi = [] #Available only for epf_mode yet.\n\n for i in range(len(self.epf_fn)):\n if epf_mode=='epf':\n data, maxk, hkl = epfformat(\n mode=epf_mode,\n filename=self.epf_fn[i]\n )\n # one file may include multiple poles\n for i in range(len(data)):\n self.grid.append(data[i])\n self.max_khi.append(maxk[i])\n self.hkl.append(hkl)\n npole_per_file.append(len(data)) # of pole per a file\n\n else:\n data = epfformat(\n mode=epf_mode,\n filename=self.epf_fn[i]\n )\n self.grid.append(\n data\n )\n self.hkl.append(None)\n self.grid = np.array(self.grid)\n self.epf_mode=epf_mode\n\n ## EXPERIMENTAL POLE FIGURE\n ## ---------------------------------------------------------- ##\n ## POLE FIGURES BINNINGED FROM THE POLYCRYSTALLINE AGGREGATES ##\n\n if epf==None:\n dat = self.gr.transpose()\n phi1 = dat[0]; phi = dat[1]; phi2 = dat[2]\n\n print('phi1: %i ~ %i'%(\n int(round(min(dat[0]/90.)))*90, int(\n round(max(dat[0]/90.)))*90))\n print('phi: %i ~ %i'%(\n int(round(min(dat[1]/90.)))*90, int(\n round(max(dat[1]/90.)))*90))\n print('phi2: %i ~ %i'%(\n int(round(min(dat[2]/90.)))*90, int(\n round(max(dat[2]/90.)))*90))\n ph1min, ph1max= int(\n round(min(dat[0]/90.)))*90, int(\n round(max(dat[0]/90.)))*90\n phmin, phmax = int(\n round(min(dat[1]/90.)))*90, int(\n round(max(dat[1]/90.)))*90\n ph2min, ph2max= int(\n round(min(dat[2]/90.)))*90, int(\n round(max(dat[2]/90.)))*90\n\n ## symmetric multiplication over self.gr is performed unless ph1max==360\n \"\"\"\n Sample symmetry application is pending,\n because it is done over rve (refer to cmb.py)\n \"\"\"\n # nrot = int(round(360./ph1max))\n # if ssym==True:\n # if nrot==4: self.gr = planar_sym(gr=self.gr, nrot=2)\n # else: raise IOError, \"not ready for other nrot than 4\"\n # pass\n\n ### environments global variables\n #1 symmetry\n self.csym = csym\n self.ngr = len(self.gr)\n self.cdim = cdim\n self.cang = cang\n pass\n pass",
"def set_bg_file(self):\n # ic()\n fname, _ = QFileDialog().getOpenFileName()\n if fname != '':\n self.parameters.child('BG').child('File').setValue(fname)\n self.bg_file = fname",
"def set_grain_map(self, grain_map, voxel_size):\n self.grain_map = grain_map\n self.voxel_size = voxel_size",
"def load_set(self, path):\n \n basepath = os.path.split(path)[0]\n \n file = open(path, \"r\")\n for line in file:\n tokens = line.split()\n if len(tokens) > 0:#skip blank lines\n if len( tokens) != 3:\n raise Exception( \"Invalid asset line {\" + line + \"}\")\n\n type = tokens[0] \n tag = tokens[1]\n path = os.path.join( basepath, tokens[2])\n self._assets[ tag] = _Asset( path, type)",
"def read_txt_grains(fname):\n\n # Note: (21) fields named below with an underscore are not yet used\n #\n # Fields from grains.out header:\n \"\"\"grain ID completeness chi2\n xi[0] xi[1] xi[2]\n tVec_c[0] tVec_c[1] tVec_c[2]\n vInv_s[0] vInv_s[1] vInv_s[2] vInv_s[4]*sqrt(2) vInv_s[5]*sqrt(2) vInv_s[6]*sqrt(2)\n ln(V[0,0]) ln(V[1,1]) ln(V[2,2]) ln(V[1,2]) ln(V[0,2]) ln(V[0,1])\"\"\"\n\n # Use shortened names in construction of numpy data type.\n\n d = {'names': ('id', 'completeness', 'chisq',\n 'ori_0', 'ori_1', 'ori_2',\n 'cen_0', 'cen_1', 'cen_2',\n 'vi0', 'vi1', 'vi2', 'vi3', 'vi4', 'vi5',\n 'lnV00', 'lnV11', 'lnV22', 'lnV12', 'lnV02', 'lnV01'),\n 'formats': ('i4',) + 20*('f4',)}\n\n return np.loadtxt(fname, dtype=d)",
"def load(self, filename=None):\n if filename == None:\n default = f\"{os.getcwd()}/stacksmash\"\n path = get_filepath(\"of your saved exploit\", default=default)\n else:\n path = get_filepath(\"\", already_exists=True, path=filename)\n with open(path, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if re.search(r'\".+\" : \".+\"', line):\n words = line.split(\"\\\"\")\n file_key, file_val = words[1], translate_type(words[3])\n for key in list(self.__dict__.keys()):\n if key == file_key:\n setattr(self, key, file_val)\n break\n print(\"\\nI loaded the following settings:\\n\\n\", self, sep=\"\")\n input(\"\\n\\nPress ENTER to continue\")",
"def load_graphics_config(self):\n if os.path.exists(self.graphics_config_filename):\n try:\n logging.getLogger(\"HWR\").debug(\"GraphicsManager: Loading graphics \" + \\\n \"from configuration file %s\" % self.graphics_config_filename)\n graphics_config_file = open(self.graphics_config_filename)\n graphics_config = eval(graphics_config_file.read())\n for graphics_item in graphics_config:\n if graphics_item[\"type\"] == \"point\":\n point = self.create_centring_point(\\\n None, {\"motors\": graphics_item[\"cpos\"]})\n point.index = graphics_item[\"index\"]\n cpos = point.get_centred_position()\n cpos.set_index(graphics_item[\"cpos_index\"])\n for graphics_item in graphics_config:\n if graphics_item[\"type\"] == \"line\":\n start_point = self.get_point_by_index(\\\n graphics_item[\"start_point_index\"])\n end_point = self.get_point_by_index(\\\n graphics_item[\"end_point_index\"])\n self.create_line(start_point, end_point)\n self.de_select_all()\n graphics_config_file.close()\n except:\n logging.getLogger(\"HWR\").error(\"GraphicsManager: Unable to load \" + \\\n \"graphics from configuration file %s\" % self.graphics_config_filename)",
"def SetPregeneratedProfiles(self, files):\n logging.info('Using pregenerated profiles')\n self._pregenerated_profiles = files",
"def import_ica(self, fname):\n self.current[\"ica\"] = mne.preprocessing.read_ica(fname)",
"def _setup_skins( self ):\r\n self.skins = os.listdir( os.path.join( BASE_RESOURCE_PATH, \"skins\" ) )\r\n try: self.current_skin = self.skins.index( self.settings[ \"skin\" ] )\r\n except: self.current_skin = 0",
"def scene_setting_init():\n sce = bpy.context.scene.name\n bpy.data.scenes[sce].render.engine = 'CYCLES'\n bpy.data.scenes[sce].cycles.film_transparent = True\n\n #output\n bpy.data.scenes[sce].render.image_settings.color_mode = 'RGB'\n bpy.data.scenes[sce].render.image_settings.color_depth = '16'\n bpy.data.scenes[sce].render.image_settings.file_format = 'PNG'\n\n #dimensions\n #bpy.data.scenes[sce].render.resolution_x = g_resolution_x\n #bpy.data.scenes[sce].render.resolution_y = g_resolution_y\n #bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage",
"def _import(self):\n\t\tbpy.ops.import_scene.gltf(filepath=self.filename)",
"def __init__(self, filepath, filetype):\n\n try:\n self.filepath = filepath\n self.name, self.base, self.env, self.light, self.spots = filepath.replace(filetype, '').split('-')\n self.name = self.name.split('/')[-1]\n self.base = float(self.base)\n self.spots = not 'no' in self.spots\n self.z = ImageLayer.FOREGROUND if 'foreground' in filepath else ImageLayer.BACKGROUND\n\n except Exception as e:\n\n print('Error ', e)\n print('on file ', filepath, filetype)\n raise Exception('Error creating the image layer!')",
"def instanceFromFile(file):\n graphMode = True\n\n graph = dict()\n obligationsSet = []\n with open(file) as f :\n for line in f:\n\n if line == \"\\n\":\n graphMode = False\n\n\n elif graphMode:\n if line[-1] == '\\n':\n line = line[:-1]\n\n info = line.split(\":\")\n graph[info[0]] = set(info[1:])\n\n else:\n if line[-1] == '\\n':\n line = line[:-1]\n obligation = set(line.split(\",\"))\n obligationsSet.append(obligation)\n\n\n if obligationsSet == []:\n printW(\"Warning : No obligation set found. It will be automatically provide\")\n obligationsSet, n = obligationsGenerator(graph, minObligations = 1, maxObligations = len(graph.keys()))\n else :\n n = len(obligationsSet)\n\n meta = {\"type\" : \"From file\", \"vertices\" : len(graph.keys()), \"obligations\" : len(obligationsSet)}\n return (graph, obligationsSet, meta)",
"def load_class_grid(self,filepath):\r\n self.class_grid = np.load(filepath)",
"def set_experiment_file(self, filename):\n self.experiment_file = filename",
"def my_gsettings(\n picture_filename='/home/user/Wallpaper/good-art.jpg',\n mytheme='BlackMATE'\n):\n\n set_config(\n yaml.load(\n get_local_settings(\n picture_filename,\n mytheme)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function will fix timestamps for sample events and generate random ids for traces, spans, and the event id. Largely based on sentry.utils.samples.load_data but more simple
|
def fix_event_data(data):
timestamp = datetime.utcnow() - timedelta(minutes=1)
timestamp = timestamp - timedelta(microseconds=timestamp.microsecond % 1000)
timestamp = timestamp.replace(tzinfo=pytz.utc)
data["timestamp"] = to_timestamp(timestamp)
start_timestamp = timestamp - timedelta(seconds=3)
data["start_timestamp"] = to_timestamp(start_timestamp)
trace = uuid4().hex
span_id = uuid4().hex[:16]
data["event_id"] = uuid4().hex
data["contexts"]["trace"]["trace_id"] = trace
data["contexts"]["trace"]["span_id"] = span_id
for span in data.get("spans", []):
# Use data to generate span timestamps consistently and based
# on event timestamp
duration = span.get("data", {}).get("duration", 10.0)
offset = span.get("data", {}).get("offset", 0)
span_start = data["start_timestamp"] + offset
span["start_timestamp"] = span_start
span["timestamp"] = span_start + duration
span["parent_span_id"] = span_id
span["span_id"] = uuid4().hex[:16]
span["trace_id"] = trace
return data
|
[
"def create_event(caseId_par,prev_event_dt_par,event_name_par,hrs_par):\n d=prev_event_dt_par+datetime.timedelta(days=random.uniform(0,(hrs_par+random.randint(0,int(hrs_par*2))))/24)\n return [str(d),caseId_par,event_name_par]",
"def sample_date_indices():\n observed = xr.open_dataset(settings.SMIPS_AGG, decode_times=False)\n max_date_index = len(observed.time.values) - 8 # to ensure we don't get the last value and don't have \"lead time\" values for it\n date_index_sample = random.sample(range(max_date_index), 1000)\n return date_index_sample",
"def test_random_data_down_the_pipe():\n records = [FakeDictEntryFactory() for _ in range(100)]\n record_gen = log_data_generator(records)\n start(record_gen, datetime_broadcaster)",
"def resample(self, sample_docs_id_pre):\n #sample_docs_id = sample_docs_id_pre[:int(self.num_doc/2)]\n #remain_index = list(set(range(len(self.t_a_k))) - set(sample_docs_id))\n #remain_index_sample = random.sample(remain_index, int(self.num_doc/2))\n #sample_docs_id += remain_index_sample\n self.get_samples(sample_docs_id_pre[:self.num_doc])",
"def regenerate_time_sample(d):\n try:\n # Trying to get Time coordinate. If not present regenerating it\n d.get_coordinate_object('Time')\n except ValueError:\n ct = d.get_coordinate_object('Start Time in int(Time)')\n c_shift = d.get_coordinate_object('Rel. Time in int(Time)')\n if (not ct.mode.equidistant):\n try:\n ct.values += np.nanmean(c_shift.values, axis=c_shift.dimension_list[0])\n except IndexError:\n ct.values += c_shift.values\n #check if new coordinate is equidistant\n if len(ct.dimension_list) == 1:\n steps = ct.values[1:]-ct.values[:-1]\n accuracy = np.max(steps)/np.min(steps)\n if accuracy-1 < 1e-10:\n ct.start = ct.values[0]\n ct.step = np.mean(steps)\n ct.mode.equidistant = True\n else:\n try:\n ct.start += c_shift.values[0]\n except IndexError:\n ct.start += c_shift.values\n ct.unit.name='Time'\n ct.unit.unit='Second'\n \n d.del_coordinate('Rel. Time in int(Time)')",
"def generate_trace_json(events: Iterable[TraceEvent]):\n json_lines = []\n for event in events:\n if event.module is None or event.timestamp_us is None or \\\n event.event_type is None or event.label is None:\n _LOG.error(\"Invalid sample\")\n continue\n\n line = {\n \"pid\": event.module,\n \"name\": (event.label),\n \"ts\": event.timestamp_us\n }\n if event.event_type == TraceType.DURATION_START:\n line[\"ph\"] = \"B\"\n line[\"tid\"] = event.label\n elif event.event_type == TraceType.DURATION_END:\n line[\"ph\"] = \"E\"\n line[\"tid\"] = event.label\n elif event.event_type == TraceType.DURATION_GROUP_START:\n line[\"ph\"] = \"B\"\n line[\"tid\"] = event.group\n elif event.event_type == TraceType.DURATION_GROUP_END:\n line[\"ph\"] = \"E\"\n line[\"tid\"] = event.group\n elif event.event_type == TraceType.INSTANTANEOUS:\n line[\"ph\"] = \"I\"\n line[\"s\"] = \"p\"\n elif event.event_type == TraceType.INSTANTANEOUS_GROUP:\n line[\"ph\"] = \"I\"\n line[\"s\"] = \"t\"\n line[\"tid\"] = event.group\n elif event.event_type == TraceType.ASYNC_START:\n line[\"ph\"] = \"b\"\n line[\"scope\"] = event.group\n line[\"tid\"] = event.group\n line[\"cat\"] = event.module\n line[\"id\"] = event.trace_id\n line[\"args\"] = {\"id\": line[\"id\"]}\n elif event.event_type == TraceType.ASYNC_STEP:\n line[\"ph\"] = \"n\"\n line[\"scope\"] = event.group\n line[\"tid\"] = event.group\n line[\"cat\"] = event.module\n line[\"id\"] = event.trace_id\n line[\"args\"] = {\"id\": line[\"id\"]}\n elif event.event_type == TraceType.ASYNC_END:\n line[\"ph\"] = \"e\"\n line[\"scope\"] = event.group\n line[\"tid\"] = event.group\n line[\"cat\"] = event.module\n line[\"id\"] = event.trace_id\n line[\"args\"] = {\"id\": line[\"id\"]}\n else:\n _LOG.error(\"Unknown event type, skipping\")\n continue\n\n # Handle Data\n if event.has_data:\n if event.data_fmt == \"@pw_arg_label\":\n line[\"name\"] = event.data.decode(\"utf-8\")\n elif event.data_fmt == \"@pw_arg_group\":\n line[\"tid\"] = event.data.decode(\"utf-8\")\n elif event.data_fmt == \"@pw_arg_counter\":\n line[\"ph\"] = \"C\"\n line[\"args\"] = {\n line[\"name\"]: int.from_bytes(event.data, \"little\")\n }\n elif event.data_fmt.startswith(\"@pw_py_struct_fmt:\"):\n items = struct.unpack_from(\n event.data_fmt[len(\"@pw_py_struct_fmt:\"):], event.data)\n args = {}\n for i, item in enumerate(items):\n args[\"data_\" + str(i)] = item\n line[\"args\"] = args\n else:\n line[\"args\"] = {\"data\": event.data.hex()}\n\n # Encode as JSON\n json_lines.append(json.dumps(line))\n\n return json_lines",
"def sample_log(log, no_traces=100):\r\n new_log = EventLog(attributes=log.attributes, extensions=log.extensions, globals=log._omni,\r\n classifiers=log.classifiers)\r\n new_log._list = random.sample(log, min(no_traces, len(log)))\r\n return new_log",
"def subsample_events(\n self,\n subsample_count=10000,\n random_seed=1\n ):\n # get raw event count as it might be less than original event count\n # due to filtered negative scatter events\n raw_event_count = self._raw_events.shape[0]\n shuffled_indices = np.arange(raw_event_count)\n\n self._subsample_seed = random_seed\n rng = np.random.RandomState(seed=self._subsample_seed)\n\n bad_idx = np.empty(0, dtype=int)\n\n if self.negative_scatter_indices is not None:\n bad_idx = self.negative_scatter_indices\n\n if self.flagged_indices is not None:\n bad_idx = np.unique(np.concatenate([bad_idx, self.flagged_indices]))\n\n bad_count = bad_idx.shape[0]\n if bad_count > 0:\n shuffled_indices = np.delete(shuffled_indices, bad_idx)\n\n if (raw_event_count - bad_count) < subsample_count:\n # if total event count is less than requested subsample count,\n # sub-sample will be all events (minus negative scatter if filter is True)\n self._subsample_count = self.event_count - bad_count\n else:\n self._subsample_count = subsample_count\n\n # generate random indices for subsample\n # using a new RandomState with given seed\n rng.shuffle(shuffled_indices)\n\n self.subsample_indices = shuffled_indices[:self._subsample_count]",
"def test_generate_sample_lending_intervals_non_repeating_timestamps():\n\tnum_entries = 12\n\tresult = utils.generate_sample_lending_intervals(10, num_entries, 1479123456, 1489123457)\n\tfor interval in result:\n\t\ttimestamps = set()\n\t\tfor lending_entry in interval.lending_entries:\n\t\t\ttimestamps.add(lending_entry.timestamp)\n\t\tassert len(timestamps) == num_entries",
"def assign_ids_const_delta(self, drop_samples=False) -> Optional[TSCDataFrame]:\n\n def split_irregular_time_series(local_tsc_df: TSCDataFrame, min_id: int):\n assert min_id >= 0\n\n if local_tsc_df.shape[0] == 1 and drop_samples:\n # degenerated time series are dropped\n return None\n elif local_tsc_df.shape[0] == 1 and not drop_samples:\n raise ValueError(\n \"There is a single-sampled time series present and at \"\n \"the same time 'drop_samples=False'.\"\n )\n\n if local_tsc_df.shape[0] == 2:\n # return early of special case of only 2 samples\n return local_tsc_df\n\n # time difference\n first_diff = np.diff(\n local_tsc_df.index.get_level_values(TSCDataFrame.tsc_time_idx_name)\n )\n\n if local_tsc_df.is_datetime_index():\n first_diff = first_diff.astype(int)\n\n first_diff = np.append(np.inf, first_diff)\n\n # change in time difference\n second_diff = np.diff(first_diff)\n second_diff = np.append(second_diff, 0)\n\n # Indicator for first case:\n # There is a gap in the sampling, e.g.\n # 1,2,3,10,11,12\n # This results into\n # first diff 1,1,7,1,1\n # second diff 0,6,-6,0\n # To identify this case, neighboring non-zero (with respect to the first\n # sample) are identified (the \"6\" identifies a new start of an ID)\n indicator = np.logical_and(second_diff[:-1], second_diff[1:])\n\n # remove the indentifications of the first kind from the second diff\n # from the example above remove the 6 and the neighboring -6\n second_diff[np.append(0, indicator).astype(bool)] = 0\n second_diff[np.append(indicator, 0).astype(bool)] = 0\n\n indicator = np.append(0, indicator)\n\n # Indicator for the second case:\n # There is a new sampling frequency\n # 1,2,3,5,7,9\n # This results into\n # first diff 1,1,2,2,2\n # second diff 0,1,0,0\n # I.e. there is a single difference (without a neighboring)\n # We simply take the second_diff (after removals of the first case) as\n # indicator for the start of a new time series ID).\n indicator = np.logical_or(indicator, second_diff.astype(bool))\n\n new_ids = np.cumsum(indicator)\n new_ids += min_id\n\n unique_ids, counts = np.unique(new_ids, return_counts=True)\n\n if drop_samples:\n remove_ids = unique_ids[counts == 1]\n\n mask_keep_elements = ~np.isin(new_ids, remove_ids)\n new_ids = new_ids[mask_keep_elements]\n local_tsc_df = local_tsc_df.loc[mask_keep_elements, :]\n else:\n if np.array(counts == 1).any():\n raise ValueError(\n \"The new time series collection is invalid because there are \"\n \"intervals of irregular time sampling frequency. Consider \"\n \"setting 'drop_samples=True'.\"\n )\n\n if local_tsc_df.shape[0] == 2 and drop_samples:\n return None\n else:\n # prepare df and assign new ids -\n # >> this creates new sub time series\n reassigned_ids_idx = pd.MultiIndex.from_arrays(\n arrays=(\n new_ids,\n local_tsc_df.index.get_level_values(\n TSCDataFrame.tsc_time_idx_name\n ),\n )\n )\n\n return local_tsc_df.set_index(reassigned_ids_idx)\n\n result_dfs = list()\n\n min_id = 0\n for _id, timeseries_df in self._tsc_df.groupby(by=TSCDataFrame.tsc_id_idx_name):\n\n if pd.isnull(timeseries_df.delta_time):\n new_df = split_irregular_time_series(timeseries_df, min_id=min_id)\n else:\n # reset time series ID\n new_df = timeseries_df\n new_df.index = new_df.index.set_levels([min_id], 0)\n\n if new_df is not None:\n min_id = max(new_df.ids) + 1\n result_dfs.append(new_df)\n else:\n if not drop_samples:\n raise RuntimeError(\n \"BUG: DataFrame is None while drop_samples=False. Please report.\"\n )\n\n if result_dfs:\n self._tsc_df = pd.concat(result_dfs, axis=0)\n self._tsc_df = self._tsc_df.tsc.assign_ids_sequential()\n\n if np.isnan(np.asarray(self._tsc_df.delta_time)).any():\n warnings.warn(\n \"The function 'assign_ids_const_delta' was unsuccessful \"\n \"to remove all irregular time series. Please \"\n \"consider to report case.\"\n )\n\n return self._tsc_df\n else:\n return None",
"def prepare_testIDs():\n message_body= []\n\n today= open(clientPath+\"todays_testIDs.log\", 'r')\n yesterday= open(clientPath+\"yesterdays_testIDs.log\", 'r')\n \n for log_file in [today, yesterday]:\n for line in log_file: \n if \"/\" not in line: \n print len(line)\n message_body.append(line[:-1])\n log_file.close()\n\n return create_JSON_message(\"testId\", message_body)",
"def _generate_random_example_for_one_session_and_one_marker(\n rng: np.random.Generator,\n) -> Tuple[List[EventMetaData], List[int]]:\n applied = int(rng.choice(10))\n all_preceding_user_turn_numbers = [int(rng.choice(20)) for _ in range(applied)]\n event_list = [\n EventMetaData(\n idx=int(rng.choice(100)), preceding_user_turns=preceding_user_turns\n )\n for preceding_user_turns in all_preceding_user_turn_numbers\n ]\n return event_list, all_preceding_user_turn_numbers",
"def generate_trace_id(self) -> int:",
"def test_random_sample_1962(self):\n\t\t#-Load Random Sample From RAW DATASET-#\n\t\tyears = [1962]\n\t\tobj = self.obj\n\t\trs = import_csv_as_statatypes(TEST_DATA_DIR+\"nberfeenstra_wtf62_random_sample.csv\") \t\t#random sample\n\t\tdel rs['obs']\n\t\tassert_rows_in_df(df=self.obj.raw_data, rows=rs)\n\t\tassert_unique_rows_in_df(df=self.obj.raw_data, rows=rs)",
"def generate_random_entries(self, number_of_entries):\n counter = 1\n for i in range(number_of_entries):\n self.mongo_db_service.add_entry(\n {\n 'id': counter,\n 'is_modified': False,\n 'status': random.randint(1, 1000000),\n 'data': Utils.generate_random_string(length=random.randint(8, 15)),\n 'timestamp': int(time.time())\n })\n counter += 1",
"def timestamp_patterns( sample):\n\t# All timestamps variations\n\tday_name = ''\n\tif len(sample) > 0:\n\t\tif sample[0] in string.ascii_letters:\n\t\t\tday_name = '%a '\n\tc_cols = sample.count( ':')\n\tfor zone in ['',' %Z',' %z']:\n\t\tfor dt in datetime_patterns( c_cols):\n\t\t\tyield ['%s%s%s'%(day_name, dt[0], zone), dt[1], dt[2]]",
"def get_segment_sample_timestamps(self, segment_id, flat=False, idx_start=None, idx_end=None):\r\n if segment_id in self.info.source_channel_of_segment.keys():\r\n idx_start, idx_end = self.__handle_indices(idx_start, idx_end)\r\n data_ts = self.data_ts[idx_start:idx_end]\r\n source_channel = self.info.source_channel_of_segment[segment_id]\r\n signal_ts = np.zeros((self.data.shape[0], data_ts.shape[1]), dtype=np.long)\r\n segment_ts = np.zeros(self.data.shape[0], dtype=np.long) + source_channel.sampling_tick.magnitude\r\n segment_ts[0] = 0\r\n segment_ts = np.cumsum(segment_ts)\r\n for i in range(data_ts.shape[1]):\r\n col = (data_ts[0, i] - self.info.pre_interval.magnitude) + segment_ts\r\n signal_ts[:, i] = col\r\n if flat:\r\n signal_ts = np.reshape(signal_ts, -1, 'F')\r\n return (signal_ts, source_channel.sampling_tick.units)",
"def _fill_in_sample_ids(samples: List[dict], lims_map: dict, id_key: str = \"internal_id\"):\n for sample in samples:\n LOG.debug(f\"{sample['name']}: link sample to LIMS\")\n if not sample.get(id_key):\n internal_id = lims_map[sample[\"name\"]]\n LOG.info(f\"{sample['name']} -> {internal_id}: connect sample to LIMS\")\n sample[id_key] = internal_id",
"def get_random_sensor_id():\n return \"\".join(random.choice(\"0123456789abcdef\") for i in range(12))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fetch & return a new `Tag` object representing the tag's current state
|
def fetch(self):
api = self.doapi_manager
return api._tag(api.request(self.url)["tag"])
|
[
"async def fetch_tags(self) -> dict:\n self.cur.execute('select type from tags where tag=?', (self.tag,))\n result = self.cur.fetchone()\n if result:\n return {\n 'name': self.tag,\n 'tag_type': result[0]\n }\n\n # since our cache missed the current tag,\n # we query from the API for it.\n route = Route('GET', '/tag/index.json?name='\n f'{self.tag}&limit=0')\n try:\n results = await self.succ.hh_req(route)\n except (aiohttp.ClientError, HHApiError) as err:\n retry = round(random.uniform(0.5, 2.5), 3)\n log.info(f'[tagfetch {self.tag}] {err!r}, retrying in {retry}s.')\n await asyncio.sleep(retry)\n return await self.fetch_tags()\n\n learned, already_in = 0, 0\n\n # we get a list of tag information from a tag\n # we can get 1 tag information or N tag information.\n for tag_data in results:\n tag_name = tag_data['name']\n tag_type = tag_data['tag_type']\n\n # insert to our tag knowledge db\n try:\n self.cur.execute('insert into tags (tag, type) values (?, ?)',\n (tag_name, tag_type))\n learned += 1\n except sqlite3.IntegrityError:\n already_in += 1\n\n log.info(f'[tagfetch] learned {learned} tags,'\n f' {already_in} already learned')\n\n # reiterate again, to get our *actual tag* information\n for tag_data in results:\n tag_name = tag_data['name']\n tag_type = tag_data['tag_type']\n\n if tag_name == self.tag:\n return _wrap(tag_name, tag_type)\n\n # if we didn't find our tag inside those tag information data,\n # mark it as a general tag\n\n # this happens when the tag exists inside a post,\n # but the tag information route doesn't give us\n # anything meaningful about the tag.\n self.cur.execute('insert into tags (tag, type) values (?, ?)',\n (self.tag, TagType.GENERAL))\n\n log.debug(f'{self.tag!r} is a no-match from API')\n\n return _wrap(self.tag, TagType.GENERAL)",
"def from_tag(cls, tag: str) -> \"Release\":\n resp = requests.get(f\"{cls.class_url}/{tag}\")\n if resp.status_code != 200:\n error(f\"Failed to fetch release {tag}: {resp.status_code} - {resp.json()['message']}\")\n breakpoint()\n exit(1)\n return cls.from_api(resp.json())",
"def get_or_create_tag(cls, session, tagname):\n tag = cls.get_tag(tagname)\n if tag:\n return tag\n tag = cls(tag_string=tagname)\n session.add(tag)\n session.commit()\n return tag",
"def ex_get_tag(self, tag_id):\r\n action = '/tags/%s/' % (tag_id)\r\n response = self.connection.request(action=action, method='GET').object\r\n tag = self._to_tag(data=response)\r\n return tag",
"def _new_tag_object(self, tag_name):\n return Reference(tag_name, self)",
"def get_tag(self, id, create_on_fail=False):\n return self.manager.get_tag(id, create_on_fail)",
"def tag_object(tag):\n # We can't use ar.get_tags because that returns the commit's hexsha,\n # not the tag's, and ar.get_hexsha is limited to commit objects.\n return ar.call_git_oneline(\n [\"rev-parse\", \"refs/tags/{}\".format(tag)], read_only=True)",
"def get_tag(self, bag):\n return self.tag_set.get(bag=bag)",
"def get_tag_detail(self, tag_name):\n return Tag.objects(name=tag_name).first()",
"def get_tag(self, tag):\n if tag not in self.tags: return\n return self.tags[tag]",
"def tag(self):\n return self._tag",
"def get_most_recent_tag_indefinitely(self):\n self._issue_command('t6')\n while True:\n data = self.serial.read(self.serial.inWaiting())\n if data:\n init_kwargs = {}\n if self.tag_id_prefix:\n init_kwargs.update({'id_prefix': self.tag_id_prefix})\n\n # Try to initialize and return a valid Tag object w/ the epc_id read\n try:\n epc_id = data.split('TAG=')[1].split(' ')[0]\n self._issue_command(' \\r')\n self.serial.flushInput()\n return Tag(epc_id, **init_kwargs)\n except IndexError:\n print 'IndexError', data\n self._issue_command(' \\r')\n self.serial.flushInput()\n return None",
"def of_tag_candidate(cls, d: Dict[str, Any]) -> 'Tag':\n return Tag(\n d['name'],\n d['filename'],\n d['cmd'],\n d['kind'],\n )",
"def _retrieveNewTagsFromFeedEntry(jobId, entry):\n\n newTags = {};\n\n # add title\n newTags[LINKTAG_TITLE] = entry.title\n\n # add summary and image tags\n processingResult = hp.processHtml(\n jobId,\n entry.summary,\n \":not(script)\",\n [\"img\"]);\n newTags[LINKTAG_SUMMARY] = entry.summary;\n newTags[LINKTAG_SUMMARYTEXT] = processingResult[0];\n newTags[LINKTAG_SUMMARYIMAGES] = processingResult[1];\n\n if entry.published_parsed:\n newTags[LINKTAG_PUBTIME] = calendar.timegm(entry.published_parsed);\n else:\n newTags[LINKTAG_PUBTIME] = int(time.time())\n\n newTags[LINKTAG_ISPROCESSED] = 'false'\n return newTags",
"def get(self, uuid):\n b = Branch()\n b.branch_name = \"Foo\"\n return b",
"def get_fake_tag():\n\n return {\n \"ref\": \"refs/tags/v1.0\",\n \"revision\": \"49ce77fdcfd3398dc0dedbe016d1a425fd52d666\",\n \"object\": \"1624f5af8ae89148d1a3730df8c290413e3dcf30\",\n \"message\": \"Annotated tag\",\n \"tagger\": {\n \"name\": \"John Doe\",\n \"email\": \"j.doe@example.com\",\n \"date\": \"2014-10-06 07:35:03.000000000\",\n \"tz\": 540\n }\n }",
"def __get_data(self):\n ent = self.__entity_ref()\n return self.get_state_data(ent)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Tag':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = TagArgs.__new__(TagArgs)\n\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"package_id\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"repository_id\"] = None\n __props__.__dict__[\"tag_id\"] = None\n __props__.__dict__[\"version\"] = None\n return Tag(resource_name, opts=opts, __props__=__props__)",
"def get_latest_tag():\n url = \"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest\"\n logger.info(\"Requesting tag information: %s\", url)\n response = requests.get(url)\n logger.info(\"Response url: %s\", response.url)\n tag = response.url.rsplit(\"/\", 1)[-1]\n logger.info(\"Tag: '%s'\", tag)\n return tag"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove the tag from one or more resources
|
def remove(self, *resources):
self.doapi_manager.request(self.url + '/resources', method='DELETE',
data={"resources": _to_taggable(resources)})
|
[
"def remove_resource_tags(req, resource):",
"def unlink(self, tag, glob=None, resources=None):\n query = Q(project__in=self.projects) if self.projects else Q()\n if glob is not None:\n resources = list(self.find(glob, include=tag))\n self.tag_manager.filter(query).get(slug=tag).resources.remove(*resources)\n return resources\n if resources is not None:\n _resources = self.resource_manager.none()\n for resource in resources:\n _resources |= self.resource_manager.filter(\n project=resource[\"project\"],\n path=resource[\"path\"])\n self.tag_manager.filter(query).get(slug=tag).resources.remove(*list(_resources))",
"def delete_tags(self, req, resource, tags=None):\n provider = self._get_provider(resource.realm)\n if tags is None:\n provider.remove_resource_tags(req, resource)\n else:\n tags = set(tags)\n current_tags = provider.get_resource_tags(req, resource)\n current_tags.remove(tags)\n provider.set_resource_tags(req, resource, tags)",
"def remove_resource(self, rm):\n pass",
"def remove(tag: AnyTag, *, file: str) -> None:\n tag = _create_tag(tag)\n tags = get_all(file)\n if tag in tags:\n tags.pop(tags.index(tag))\n set_all(tags, file=file)",
"def remove(self, *tags):\n with self._treant._write:\n # remove redundant tags from given list if present\n tags = set([str(tag) for tag in tags])\n for tag in tags:\n # remove tag; if not present, continue anyway\n try:\n self._treant._state['tags'].remove(tag)\n except ValueError:\n pass",
"def remove_tag(self, tag):\n self.tags = list(set(self.tags or []) - set([tag]))",
"def delete_tag(self):\n self.delete()",
"def remove_tags(self, *tags):\n\n try:\n tag_list = self.data[\"tags\"]\n except KeyError:\n return\n\n self.data[\"tags\"] = [t for t in tag_list if t not in tags]",
"def remove_tags_from_picture(picture, tags):\n for tag in tags:\n remove_tag_from_picture(picture, tag)",
"def clearTagImages():\n files = glob.glob('/home/sewerbot/repo/SeniorDesign/site/backend/data_management/temp/tag*.jpg')\n remove(files)",
"def remove(self, uri):\n try:\n del self._resources[uri]\n except KeyError:\n pass",
"def test_remove_asset_tag(self):\n pass",
"def on_remove_resource(self, event):\r\n resource_index = self.listbox_resources.GetSelection()\r\n if resource_index != wx.NOT_FOUND:\r\n resource_type = self.choice_type.GetSelection()\r\n self.resource_lists[ resource_type ].pop( resource_index )\r\n self.listbox_resources.Delete( resource_index )",
"def delAsset(self):\n pass",
"def delete(self, resource, keys):\n i = 0\n keyN = len(keys)\n while i < keyN:\n resource.attrs.__delitem__(keys[i])\n i = i+1\n print('Done deleting.')",
"def ex_tag_resources(self, resources, tag):\r\n\r\n resources = tag.resources[:]\r\n\r\n for resource in resources:\r\n if not hasattr(resource, 'id'):\r\n raise ValueError('Resource doesn\\'t have id attribute')\r\n\r\n resources.append(resource.id)\r\n\r\n resources = list(set(resources))\r\n\r\n data = {\r\n 'name': tag.name,\r\n 'resources': resources\r\n }\r\n\r\n action = '/tags/%s/' % (tag.id)\r\n response = self.connection.request(action=action, method='PUT',\r\n data=data).object\r\n tag = self._to_tag(data=response)\r\n return tag",
"def __del__(self):\n self.__tag_registration.stop_tag_reading()",
"def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns a generator that yields all of the droplets to which the tag is currently applied
|
def fetch_all_droplets(self):
return self.doapi_manager.fetch_all_droplets(tag_name=self.name)
|
[
"def get_all_droplets(self):\n self.mock_data = \"droplets/all.json\"\n data = self.get_data(\"droplets/\")\n droplets = list()\n for jsoned in data['droplets']:\n droplet = Droplet(**jsoned)\n droplet.token = self.token\n droplet.mocked = self.mocked\n\n for net in droplet.networks['v4']:\n if net['type'] == 'private':\n droplet.private_ip_address = net['ip_address']\n if net['type'] == 'public':\n droplet.ip_address = net['ip_address']\n if droplet.networks['v6']:\n droplet.ip_v6_address = droplet.networks['v6'][0]['ip_address']\n droplets.append(droplet)\n return droplets",
"def tag_iterator(self):\n return iter(self._tags)",
"def tags(self) -> List:",
"def get_drop_features(self):\n\n self.dropletAnalysis = True\n self.beginDropAnalysisButton.setEnabled(False)\n self.runDippingTestButton.setEnabled(True)",
"def droplets():\n return [\n {\n 'id': 110,\n 'name': 'droplet-1',\n 'ip_address': '45.0.0.2',\n 'private_ip_address': '10.0.0.2',\n 'created_at': '2015-10-01T14:17:36Z',\n 'distro': 'Ubuntu',\n 'image': 'ubuntu-14',\n 'image_id': 12658446,\n 'region': 'nyc3',\n 'size': '512mb',\n 'backups_active': False,\n 'locked': False,\n 'status': 'active'\n },\n {\n 'id': 111,\n 'name': 'droplet-2',\n 'ip_address': '45.0.0.3',\n 'private_ip_address': '10.0.0.3',\n 'created_at': '2015-10-01T14:17:36Z',\n 'distro': 'Ubuntu',\n 'image': 'ubuntu-14',\n 'image_id': 12658446,\n 'region': 'nyc3',\n 'size': '512mb',\n 'backups_active': False,\n 'locked': False,\n 'status': 'active'\n },\n {\n 'id': 112,\n 'name': 'droplet-3',\n 'ip_address': '45.0.0.4',\n 'private_ip_address': '10.0.0.4',\n 'created_at': '2015-10-01T14:17:36Z',\n 'distro': 'Ubuntu',\n 'image': 'ubuntu-14',\n 'image_id': 12658446,\n 'region': 'nyc3',\n 'size': '1gb',\n 'backups_active': False,\n 'locked': False,\n 'status': 'active'\n }\n ]",
"def chunk(self, tags: Iterable[FSTTag]) -> Iterable[tuple[FSTTag, ...]]:\n tag_set = tuple(tags)\n while tag_set:\n unmatched, _ = self._get_longest(tag_set)\n prefix_length = len(tag_set) - len(unmatched)\n if prefix_length == 0:\n # There was no relabelling found, but we can just return the first tag.\n prefix_length = 1\n\n yield tag_set[:prefix_length]\n tag_set = tag_set[prefix_length:]",
"def delete_all_droplets(self):\n self.doapi_manager.request('/v2/droplets', method='DELETE',\n params={\"tag_name\": self.name})",
"def dxftags(self) -> Iterable[DXFTag]:\n pass",
"def tags(self):\n return self._named_trees('tag')",
"def items(self):\n return zip(self.times, self.droplets)",
"def ttt_player_gen(tags=['CK', 'NK']):\n for item in tags:\n yield item",
"def findTagsIter(self, wild):\n nid=_C.c_int32(0)\n tagctx=_C.c_void_p(0)\n _TreeShr._TreeFindTagWild.restype=_C.c_char_p\n try:\n while True:\n tag_ptr = _TreeShr._TreeFindTagWild(self.ctx,\n _C.c_char_p(_ver.tobytes(wild)),\n _C.byref(nid),\n _C.byref(tagctx))\n if tag_ptr is None:\n break\n yield tag_ptr.rstrip()\n except GeneratorExit:\n pass\n _TreeShr.TreeFindTagEnd(_C.byref(tagctx))",
"def blobs(self, tag, ignore_missing=True):\n for path, tags, blobs in self.walk(tag, ignore_missing=ignore_missing):\n if tags != blobs:\n for replicas in blobs:\n yield replicas",
"def print_droplets(self):\n for drop in self.cloud:\n print(drop)",
"def get_drops(self):\n return []",
"def walk(self, tag, ignore_missing=True, tagpath=()):\n tagpath += (tagname(tag),)\n\n try:\n urls = self.get(tag).get('urls', [])\n tags, blobs = partition(urls, tagname)\n tags = canonizetags(tags)\n yield tagpath, tags, blobs\n except CommError, e:\n if ignore_missing and e.code == 404:\n tags = blobs = ()\n yield tagpath, tags, blobs\n else:\n yield tagpath, None, None\n raise e\n\n for next_tag in tags:\n for child in self.walk(next_tag,\n ignore_missing=ignore_missing,\n tagpath=tagpath):\n yield child",
"def targets(self) -> Iterator[Slot[ItemT]]:\n for slot in self._slots:\n if slot.is_target:\n yield slot",
"def gen_alternates(pkgdesc):\n pkgdesc = parse_alternates(pkgdesc)\n for x in gen_alternates_recurse(pkgdesc):\n yield x",
"def wire_iter(self, grid):\n tr_w = self.track_id.width\n layer_id = self.layer_id\n for tr_idx in self.track_id:\n layer_name = grid.get_layer_name(layer_id, tr_idx)\n bbox = grid.get_bbox(layer_id, tr_idx, self._lower_unit, self._upper_unit,\n width=tr_w, unit_mode=True)\n yield layer_name, bbox"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete all of the droplets to which the tag is applied
|
def delete_all_droplets(self):
self.doapi_manager.request('/v2/droplets', method='DELETE',
params={"tag_name": self.name})
|
[
"def delete_tag(session,taglist):\r\n for t in taglist:\r\n session.query(Tag.name==t).delete()\r\n session.commit()",
"def cleanup(self):\n\n # Call all trashed posts, active metadata tags, and photos\n trash_posts = self.connection.call(\n GetPosts(\n {\n 'post_status': ['trash'],\n 'number': 1000\n }\n )\n )\n\n active_tag_ids = set(\n [term.id for term in\n [term for term in\n [post.terms for post in self.connection.call(GetPosts())]\n for term in term]\n ]\n )\n\n active_media_library = self.connection.call(\n GetMediaLibrary(\n {'number': 1000}\n )\n )\n\n # Drop old tags from database\n tag_ids_to_drop = []\n\n for post in reversed(trash_posts):\n # Bypass Active tags, 'Uncategorized' tags, and cl_id tags\n # Append all other tags to list of terms_to_drop\n for term in post.terms:\n if term.id in active_tag_ids:\n continue\n # Bypass 'Uncategorized' tags with id='1'\n elif term.id == '1':\n continue\n elif term.name == [meta['value'] for meta in post.custom_fields if meta['key'] == 'cl_id'][0]:\n continue\n else:\n tag_ids_to_drop.append(term)\n\n for tag in set(tag_ids_to_drop):\n self.connection.call(DeleteTerm('post_tag', tag.id))\n logging.info(f'Deleted this tag from database:\\t\\tID: {tag.id}\\tTag: {tag}')\n\n\n # Drop old photos from database\n media_to_drop = []\n\n for photo in active_media_library:\n if photo.title[:-7] in [term.name for terms in [posting.terms for posting in trash_posts] for term in terms]:\n media_to_drop.append(photo)\n logging.info(f'Staged this photo for deletion:\\t{photo}')\n\n for photo in reversed(media_to_drop):\n self.connection.call(DeletePost(f'{photo.id}'))\n logging.info(f'Deleted this photo from database:\\t{photo}')\n\n\n # Delete trashed posts from database\n posts_to_drop = []\n\n for post in trash_posts:\n # Record with id == 1 is a default WP record that must stay put.\n if post.id == '1':\n continue\n else:\n posts_to_drop.append(post)\n\n for post in posts_to_drop:\n self.connection.call(DeletePost(post.id))\n logging.info(f'Deleted this post from database:\\t{post.title}')\n\n logging.info(f'\\nCleanup is done as of:\\t\\t\\t{datetime.datetime.now().strftime(\"%c\")}\\n')",
"def delete(self):\n\t\t[ n.delete() for n in self.nodes ]",
"def delete_tag(self):\n self.delete()",
"def remove_unused_tags():\n return (\n Tag.objects.all()\n .annotate(num=Count(\"taggit_taggeditem_items\"))\n .filter(num=0)\n .delete()\n )",
"def clear(self):\n with self._treant._write:\n self._treant._state['tags'] = list()",
"def clearTagImages():\n files = glob.glob('/home/sewerbot/repo/SeniorDesign/site/backend/data_management/temp/tag*.jpg')\n remove(files)",
"def test_portals_id_designs_nk_tags_delete(self):\n pass",
"def delete_all_on_layer(self):\n bpy.ops.object.select_by_layer()\n bpy.ops.object.delete(use_global=False)",
"def delete_unused(self, tags=None):\n tags_ids = [x.id for x in tags] if tags else None\n tags = self.all() if tags_ids is None else self.filter(id__in=tags_ids)\n tags.filter(items__isnull=True).delete()",
"def go_through_and_delete(deletion_script, docker_u,\n docker_p, list_of_unactive,\n working_dir_path):\n print(\"Going through each unused namespace and running {} to delete\"\n \" their unused resources.\".format(deletion_script))\n\n for i in range(len(list_of_unactive)):\n team_name = list_of_unactive[i]\n delete_unused_resources.delete_all(team_name, docker_u,\n docker_p, working_dir_path)",
"def destroyTreeItems(self):\n self.jvFigureCanvas.get_tk_widget().pack_forget() # removes prexisting JV plot #NEEDS TO MOVE TO OWN BUTTON\n self.ax.cla()\n self.selectedItems = self.viewDataTree.selection()\n self.attributeList = []\n columnvalues = self.viewDataTree['columns']\n counter = 0\n for i in self.selectedItems:\n children = self.viewDataTree.get_children(i)\n if len(children) > 0:\n for j in children:\n self.attributeList = []\n deviceValues = self.viewDataTree.item(j, 'values')\n for k, value in enumerate(deviceValues,start=0):\n self.attributeList.append((columnvalues[k], value))\n DataMethods.dataFrameAdjusted_removeItem(self,self.attributeList[0:5])\n self.viewDataTree.delete(j)\n counter += 1\n # self.viewDataTree.delete(self.viewDataTree.parent(children[0]))\n else:\n self.attributeList = []\n deviceValues = self.viewDataTree.item(i, 'values')\n for k, value in enumerate(deviceValues,start=0):\n self.attributeList.append((columnvalues[k], value))\n DataMethods.dataFrameAdjusted_removeItem(self,self.attributeList[0:5])\n self.viewDataTree.delete(i)\n # self.viewDataTree.delete(self.viewDataTree.parent(i))\n counter += 1\n \n # print(self.attributeList)\n # self.viewDataTree.delete(*)\n CleanDataModule.cleanDataTree(self)\n CleanDataModule.populateDataTree(self)\n CleanDataModule.cleanLogFill(self,f'{counter} devices deleted from dataframe')",
"def finalizer():\n\n for instance in instances:\n try:\n instance.get()\n except CommandFailed:\n log.warning(\"Pool is already deleted\")\n continue\n blockpool_ui_obj = BlockPoolUI()\n if not blockpool_ui_obj.delete_pool(instance.name):\n instance.delete()\n raise PoolNotDeletedFromUI(\n f\"Could not delete block pool {instances.name} from UI.\"\n \" Deleted from CLI\"\n )",
"def delete_all(self):\n self.db_tool.session.query(Plot).delete()\n self.db_tool.session.query(Parameter).delete()\n self.db_tool.session.query(Signal).delete()\n self.db_tool.commit()",
"def remove(self, *tags):\n with self._treant._write:\n # remove redundant tags from given list if present\n tags = set([str(tag) for tag in tags])\n for tag in tags:\n # remove tag; if not present, continue anyway\n try:\n self._treant._state['tags'].remove(tag)\n except ValueError:\n pass",
"def delete_all_busy_box_deployments(self):\n for depl in WorkloadUi().deployment_list:\n self.delete_busybox(depl.name, force=True)",
"def delete(self):\n for subset in self.category.project.subsets.all():\n subset.remove_filter_field(self)\n\n for usergroup in self.category.project.usergroups.all():\n usergroup.remove_filter_field(self)\n\n super(Field, self).delete()",
"def undeleteComponents(*args, **kwargs):\n \n pass",
"def fetch_all_droplets(self):\n return self.doapi_manager.fetch_all_droplets(tag_name=self.name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Perform an arbitrary action on all of the droplets to which the tag is applied. ``data`` will be serialized as JSON and POSTed to the proper API endpoint. All currentlydocumented actions require the POST body to be a JSON object containing, at a minimum, a ``"type"`` field.
|
def act_on_droplets(self, **data):
api = self.doapi_manager
return map(api._action, api.request('/v2/droplets/actions', method='POST', params={"tag_name": self.name}, data=data)["actions"])
|
[
"def act(self, **data):\n api = self.doapi_manager\n return api._action(api.request(self.action_url, method='POST',\n data=data)[\"action\"])",
"def droplet_actions(ctx, disable_backups, reboot, power_cycle, shutdown, power_off,\n\t\t\t\t\tpower_on, password_reset, ipv6, private_networking, upgrade,\n\t\t\t\t\trestore, backup_id, resize, size, rebuild, image, rename, name,\n\t\t\t\t\tchange_kernel, kernel, snapshot, sname, token, tablefmt, proxy):\n\n\tif (not ctx.params['disable_backups'] and not ctx.params['reboot'] \n\t\tand not ctx.params['power_cycle'] and not ctx.params['shutdown'] \n\t\tand not ctx.params['power_off'] and not ctx.params['power_on'] \n\t\tand not ctx.params['password_reset'] and not ctx.params['ipv6'] \n\t\tand not ctx.params['private_networking'] and not ctx.params['upgrade'] \n\t\tand not ctx.params['restore'] and not ctx.params['backup_id'] \n\t\tand not ctx.params['resize'] and not ctx.params['size'] \n\t\tand not ctx.params['rebuild'] and not ctx.params['image'] \n\t\tand not ctx.params['rename'] and not ctx.params['name'] \n\t\tand not ctx.params['change_kernel'] and not ctx.params['kernel'] \n\t\tand not ctx.params['snapshot'] and not ctx.params['sname']):\n\t\treturn click.echo(ctx.get_help())\n\n\toption_list = ['disable_backups', 'reboot', 'power_cycle', 'shutdown', 'power_off',\n\t 'power_on', 'password_reset', 'ipv6', 'private_networking', 'upgrade', 'restore', \n\t 'resize', 'rebuild', 'rename', 'change_kernel', 'snapshot']\n\n\tif validate(ctx.params, option_list):\n\t\tif disable_backups:\n\t\t\tparams = {'type':'disable_backups'}\n\t\t\trecord = 'droplet disable backups'\n\t\t\treturn run_command(disable_backups, params, record, token, proxy, tablefmt)\n\n\t\tif reboot:\n\t\t\tparams = {'type':'reboot'}\n\t\t\trecord = 'droplet reboot'\n\t\t\treturn run_command(reboot, params, record, token, proxy, tablefmt)\n\t\n\t\tif power_cycle:\n\t\t\tparams = {'type':'power_cycle'}\n\t\t\trecord = 'droplet power_cycle'\n\t\t\treturn run_command(power_cycle, params, record, token, proxy, tablefmt)\n\n\t\tif shutdown:\n\t\t\tparams = {'type':'shutdown'}\n\t\t\trecord = 'droplet shutdown'\n\t\t\treturn run_command(shutdown, params, record, token, proxy, tablefmt)\n\n\t\tif power_off:\n\t\t\tparams = {'type':'power_off'}\n\t\t\trecord = 'droplet power_off'\n\t\t\treturn run_command(power_off, params, record, token, proxy, tablefmt)\n\n\t\tif power_on:\n\t\t\tparams = {'type':'power_on'}\n\t\t\trecord = 'droplet power_on'\n\t\t\treturn run_command(power_on, params, record, token, proxy, tablefmt)\n\n\t\tif password_reset:\n\t\t\tparams = {'type':'password_reset'}\n\t\t\trecord = 'droplet password_reset'\n\t\t\treturn run_command(password_reset, params, record, token, proxy, tablefmt)\n\n\t\tif ipv6:\n\t\t\tparams = {'type':'enable_ipv6'}\n\t\t\trecord = 'droplet ipv6'\n\t\t\treturn run_command(ipv6, params, record, token, proxy, tablefmt)\n\n\t\tif private_networking:\n\t\t\tparams = {'type':'enable_private_networking'}\n\t\t\trecord = 'droplet private_networking'\n\t\t\treturn run_command(private_networking, params, record, token, proxy, tablefmt)\n\n\t\tif upgrade:\n\t\t\tparams = {'type':'upgrade'}\n\t\t\trecord = 'droplet upgrade'\n\t\t\treturn run_command(upgrade, params, record, token, proxy, tablefmt)\n\n\t\tif restore:\n\t\t\tparams = {'type':'restore', 'image':backup_id}\n\t\t\trecord = 'droplet restore'\n\t\t\treturn run_command(restore, params, record, token, proxy, tablefmt)\n\n\t\tif resize:\n\t\t\tparams = {'type':'resize', 'size':size}\n\t\t\trecord = 'droplet resize'\n\t\t\treturn run_command(resize, params, record, token, proxy, tablefmt)\n\n\t\tif rebuild:\n\t\t\tparams = {'type':'rebuild', 'image':image}\n\t\t\trecord = 'droplet rebuild'\n\t\t\treturn run_command(rebuild, params, record, token, proxy, tablefmt)\n\n\t\tif rename:\n\t\t\tparams = {'type':'rename', 'name':name}\n\t\t\trecord = 'droplet rename'\n\t\t\treturn run_command(rename, params, record, token, proxy, tablefmt)\n\n\t\tif change_kernel:\n\t\t\tparams = {'type':'change_kernel', 'kernel':kernel}\n\t\t\trecord = 'droplet change_kernel'\n\t\t\treturn run_command(change_kernel, params, record, token, proxy, tablefmt)\n\n\t\tif snapshot:\n\t\t\tparams = {'type':'snapshot', 'name':sname}\n\t\t\trecord = 'droplet snapshot'\n\t\t\treturn run_command(snapshot, params, record, token, proxy, tablefmt)",
"def delete_all_droplets(self):\n self.doapi_manager.request('/v2/droplets', method='DELETE',\n params={\"tag_name\": self.name})",
"def droplet_actions_group():\n\tpass",
"def do_post(self,data=None,params={}):\n if data and params:\n raise ValueError('Either data or params can be submitted to be the POST body, but not both.')\n \n post_data = json.dumps(data) if data else params\n \n response = requests.post('%s/%s.json' % (self.service_url,self.descriptor['slug']),\n data=post_data,\n auth=(self.user,self.password))\n \n return self.process_response(response)",
"def trigger_action_on_multi_resource(data):\n for item in data:\n trigger_action_on_a_resource(item['resource_url'],item['action'],item['provider'][0])\n return \"\",return_code['OK']",
"def tagAPost( self, action_param ):\n self.doc['tags'] = self.doc.get('tags', [])\n self.doc['tags'].append(action_param)",
"def GetDropletActions(self, id):\n Actions = r.get(self.APIURL + \"/droplets/\" + id + \"/actions\",\n auth=self.BasicAuth)\n DropletActions = Actions.json()\n return DropletActions",
"def post(self):\n json_data = request.get_json(force=True)\n action = dict_get(json_data, 'action', '')\n\n success = True\n error = ''\n action_name = ''\n try:\n if action == 'drop_database':\n action_name = 'dropping MongoDB database'\n MongoUtil.drop_db()\n else:\n success = False\n error = '[%s] action is not supported' % action\n except Exception as e:\n success = False\n error = 'Error when %s : %s' % (action_name, e)\n\n return {'success': success, 'error': error}",
"def post(self):\n # get parameters\n type = self.get_argument('type')\n # get trade list from body content\n trades = json.loads(self.request.body.decode())\n # set trade list to risker\n risker.set(type, trades)\n self.write(protocol.success(msg='success'))",
"def fetch_all_droplets(self):\n return self.doapi_manager.fetch_all_droplets(tag_name=self.name)",
"def post(self, path, data):\n return(self._request('POST', path, json=data))",
"def _post(self, data):\n if not isinstance(data, list):\n data = [data]\n headers = {\"Content-Type\": \"application/json\", }\n return self.client.post(\"/query\", data=json.dumps(data), headers=headers)",
"def add_raw_data(self, data):\r\n bulk_action = None\r\n for row in data:\r\n if isinstance(row, dict):\r\n op_type = row.keys()[0]\r\n metadata = row[op_type]\r\n if pylastica.bulk.action.Action.is_valid_op_type(op_type):\r\n #add previous action\r\n if bulk_action is not None:\r\n self.add_action(bulk_action)\r\n bulk_action = pylastica.bulk.action.Action(op_type, metadata)\r\n elif isinstance(bulk_action, pylastica.bulk.action.Action):\r\n bulk_action.source = row\r\n self.add_action(bulk_action)\r\n bulk_action = None\r\n else:\r\n raise pylastica.exception.InvalidException(\"Invalid bulk data. Source must follow action metadata.\")\r\n else:\r\n raise pylastica.exception.InvalidException(\"Invalid bulk data. Should be list of dict, Document, or Bulk.Action\")\r\n #add last action if available\r\n if bulk_action is not None:\r\n self.add_action(bulk_action)\r\n return self",
"def submit_all(self: SubmitApp) -> None:\n self.count = submit_from(self.source, template=self.template,\n bundlesize=self.bundlesize, bundlewait=self.bundlewait,\n tags=Tag.parse_cmdline_list(self.taglist))",
"def post(self):\n json_data= request.get_json(force=True) \n product = json_data['product']\n urls = json_data['dataset_definition_urls']\n\n statuses = list(add_datasets([urls], product))\n\n return statuses",
"def dispatch_hook(key, hooks, hook_data, **kwargs):\n ...",
"def post(self, *args, **kw):\n return self.custom_dispatch(*args, **kw)",
"def DropletPowerControl(self, id, action):\n ValidActions = [\"power_cycle\", \"shutdown\", \"power_off\", \"power_on\",\n \"reboot\"]\n if action not in ValidActions:\n raise DopyError(\"%s is not a valid action\" % action)\n command = {\"type\": action}\n Dpower = r.post(self.APIURL + \"/droplets/\" + id + \"/actions\",\n params=command, auth=self.BasicAuth)\n DropPower = Dpower.json()\n return DropPower"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Power on all of the droplets to which the tag is applied
|
def power_on(self):
return self.act_on_droplets(type='power_on')
|
[
"def get_drop_features(self):\n\n self.dropletAnalysis = True\n self.beginDropAnalysisButton.setEnabled(False)\n self.runDippingTestButton.setEnabled(True)",
"def act_on_droplets(self, **data):\n api = self.doapi_manager\n return map(api._action, api.request('/v2/droplets/actions', method='POST', params={\"tag_name\": self.name}, data=data)[\"actions\"])",
"def delete_all_droplets(self):\n self.doapi_manager.request('/v2/droplets', method='DELETE',\n params={\"tag_name\": self.name})",
"def fetch_all_droplets(self):\n return self.doapi_manager.fetch_all_droplets(tag_name=self.name)",
"def _drop_tip(\n self,\n command: models.PickUpDropTipCommand) -> ReturnType:\n return [\n DropTipRequest(\n pipetteId=command.params.pipette,\n labwareId=command.params.labware,\n wellName=command.params.well\n )\n ]",
"def print_droplets(self):\n for drop in self.cloud:\n print(drop)",
"def add_water(self):",
"def items(self):\n return zip(self.times, self.droplets)",
"def power_on(self):",
"def get_drops(self):\n self._sheep_wool = WoolBlock\n return self._sheep_wool.get_drops",
"def drop_item_multiple(self, key):\n\t\tself.player.begin_drop_item(True)",
"def at_drop(self, dropper):\r\n pass",
"def weightedTweakUsing(*args, **kwargs):\n \n pass",
"def strip_from_power(self):\n monsters = [\"Zombie\", \"Zombie Fighter\", \"Zombie Druid\", \"Zombie Paladin\", \"Zombie Wizard\"]\n double = False\n for i in self.activemonsters:\n if i.mon_type in monsters:\n double = True\n break\n if double:\n for i in self.activeadventurers:\n if i.class_type == \"Paladin\":\n i.power = i.power // 2",
"def fuse_innerproduct_and_bias_callback(self, op, label_map_op_list):\n for (label_map, op) in label_map_op_list:\n bias = label_map[self.bias_label]\n map_roles = label_map[self.map_roles_label]\n if isinstance(map_roles.args[0], DotOp):\n x = map_roles.args[0].args[0]\n y = map_roles.args[0].args[1]\n map_roles_op = MapRolesOp(DotOp(x, y, bias), map_roles.axes_map)\n self.replace_op(op, map_roles_op)",
"def droplet_actions_group():\n\tpass",
"def droplet_actions(ctx, disable_backups, reboot, power_cycle, shutdown, power_off,\n\t\t\t\t\tpower_on, password_reset, ipv6, private_networking, upgrade,\n\t\t\t\t\trestore, backup_id, resize, size, rebuild, image, rename, name,\n\t\t\t\t\tchange_kernel, kernel, snapshot, sname, token, tablefmt, proxy):\n\n\tif (not ctx.params['disable_backups'] and not ctx.params['reboot'] \n\t\tand not ctx.params['power_cycle'] and not ctx.params['shutdown'] \n\t\tand not ctx.params['power_off'] and not ctx.params['power_on'] \n\t\tand not ctx.params['password_reset'] and not ctx.params['ipv6'] \n\t\tand not ctx.params['private_networking'] and not ctx.params['upgrade'] \n\t\tand not ctx.params['restore'] and not ctx.params['backup_id'] \n\t\tand not ctx.params['resize'] and not ctx.params['size'] \n\t\tand not ctx.params['rebuild'] and not ctx.params['image'] \n\t\tand not ctx.params['rename'] and not ctx.params['name'] \n\t\tand not ctx.params['change_kernel'] and not ctx.params['kernel'] \n\t\tand not ctx.params['snapshot'] and not ctx.params['sname']):\n\t\treturn click.echo(ctx.get_help())\n\n\toption_list = ['disable_backups', 'reboot', 'power_cycle', 'shutdown', 'power_off',\n\t 'power_on', 'password_reset', 'ipv6', 'private_networking', 'upgrade', 'restore', \n\t 'resize', 'rebuild', 'rename', 'change_kernel', 'snapshot']\n\n\tif validate(ctx.params, option_list):\n\t\tif disable_backups:\n\t\t\tparams = {'type':'disable_backups'}\n\t\t\trecord = 'droplet disable backups'\n\t\t\treturn run_command(disable_backups, params, record, token, proxy, tablefmt)\n\n\t\tif reboot:\n\t\t\tparams = {'type':'reboot'}\n\t\t\trecord = 'droplet reboot'\n\t\t\treturn run_command(reboot, params, record, token, proxy, tablefmt)\n\t\n\t\tif power_cycle:\n\t\t\tparams = {'type':'power_cycle'}\n\t\t\trecord = 'droplet power_cycle'\n\t\t\treturn run_command(power_cycle, params, record, token, proxy, tablefmt)\n\n\t\tif shutdown:\n\t\t\tparams = {'type':'shutdown'}\n\t\t\trecord = 'droplet shutdown'\n\t\t\treturn run_command(shutdown, params, record, token, proxy, tablefmt)\n\n\t\tif power_off:\n\t\t\tparams = {'type':'power_off'}\n\t\t\trecord = 'droplet power_off'\n\t\t\treturn run_command(power_off, params, record, token, proxy, tablefmt)\n\n\t\tif power_on:\n\t\t\tparams = {'type':'power_on'}\n\t\t\trecord = 'droplet power_on'\n\t\t\treturn run_command(power_on, params, record, token, proxy, tablefmt)\n\n\t\tif password_reset:\n\t\t\tparams = {'type':'password_reset'}\n\t\t\trecord = 'droplet password_reset'\n\t\t\treturn run_command(password_reset, params, record, token, proxy, tablefmt)\n\n\t\tif ipv6:\n\t\t\tparams = {'type':'enable_ipv6'}\n\t\t\trecord = 'droplet ipv6'\n\t\t\treturn run_command(ipv6, params, record, token, proxy, tablefmt)\n\n\t\tif private_networking:\n\t\t\tparams = {'type':'enable_private_networking'}\n\t\t\trecord = 'droplet private_networking'\n\t\t\treturn run_command(private_networking, params, record, token, proxy, tablefmt)\n\n\t\tif upgrade:\n\t\t\tparams = {'type':'upgrade'}\n\t\t\trecord = 'droplet upgrade'\n\t\t\treturn run_command(upgrade, params, record, token, proxy, tablefmt)\n\n\t\tif restore:\n\t\t\tparams = {'type':'restore', 'image':backup_id}\n\t\t\trecord = 'droplet restore'\n\t\t\treturn run_command(restore, params, record, token, proxy, tablefmt)\n\n\t\tif resize:\n\t\t\tparams = {'type':'resize', 'size':size}\n\t\t\trecord = 'droplet resize'\n\t\t\treturn run_command(resize, params, record, token, proxy, tablefmt)\n\n\t\tif rebuild:\n\t\t\tparams = {'type':'rebuild', 'image':image}\n\t\t\trecord = 'droplet rebuild'\n\t\t\treturn run_command(rebuild, params, record, token, proxy, tablefmt)\n\n\t\tif rename:\n\t\t\tparams = {'type':'rename', 'name':name}\n\t\t\trecord = 'droplet rename'\n\t\t\treturn run_command(rename, params, record, token, proxy, tablefmt)\n\n\t\tif change_kernel:\n\t\t\tparams = {'type':'change_kernel', 'kernel':kernel}\n\t\t\trecord = 'droplet change_kernel'\n\t\t\treturn run_command(change_kernel, params, record, token, proxy, tablefmt)\n\n\t\tif snapshot:\n\t\t\tparams = {'type':'snapshot', 'name':sname}\n\t\t\trecord = 'droplet snapshot'\n\t\t\treturn run_command(snapshot, params, record, token, proxy, tablefmt)",
"def random_feature_drop_multi_narrow_chunk_both(power_data, phase_data, ClassObj, drop_temps, k_folds=5, seed=None,\n verbose=False):\n\n # 1.) Make Array for Holding all of the feature dropping curves\n nested_dropping_curves = [] # np.zeros([])\n\n # 2.) Create INDEX of all instances of interests : create_discrete_index()\n label_identities, label_index = cat.create_discrete_index(event_data=power_data)\n identity_index = np.arange(len(label_index))\n sss = cat.StratifiedShuffleSplit(n_splits=k_folds, random_state=seed)\n sss.get_n_splits(identity_index, label_index)\n\n if verbose:\n print(sss)\n fold_number = 0\n\n # --------- For Loop over possible Training Sets---------\n for train_index, test_index in sss.split(identity_index, label_index):\n if verbose:\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n fold_number += 1\n print(\"On Fold #\" + str(fold_number) + ' of ' + str(k_folds))\n\n X_train, X_test = identity_index[train_index], identity_index[test_index]\n y_train, y_test = label_index[train_index], label_index[test_index]\n\n # 4.) Use INDEX to Break into corresponding [template/training set| test set] : ml_selector()\n # 4.1) Get template set/training : ml_selector(power_data, identity_index, label_index, sel_instances)\n sel_train_pow = cat.ml_selector(event_data=power_data, identity_index=label_identities, label_index=label_index,\n sel_instances=X_train, )\n sel_train_phas = cat.ml_selector(event_data=phase_data, identity_index=label_identities,\n label_index=label_index,\n sel_instances=X_train, )\n\n # 4.1) Get test set : ml_selector()\n sel_test_pow = cat.ml_selector(event_data=power_data, identity_index=label_identities, label_index=label_index,\n sel_instances=X_test)\n sel_test_phas = cat.ml_selector(event_data=phase_data, identity_index=label_identities, label_index=label_index,\n sel_instances=X_test)\n\n # 5.) Use template/training set to make template : make_templates(power_data)\n templates_pow = cat.make_templates(event_data=sel_train_pow)\n templates_phas = cat.make_templates(event_data=sel_train_phas)\n\n ### 5.2) Remove Template that aren't needed from train\n templates_pow = np.delete(templates_pow, drop_temps, axis=0)\n templates_phas = np.delete(templates_phas, drop_temps, axis=0)\n\n # 6.1) Use template/training INDEX and template to create Training Pearson Features : pearson_extraction()\n train_pearson_features_pow = cat.pearson_extraction(event_data=sel_train_pow, templates=templates_pow)\n train_pearson_features_phas = cat.pearson_extraction(event_data=sel_train_phas, templates=templates_phas)\n\n # 6.2) Use test INDEX and template to create Test Pearson Features : pearson_extraction()\n test_pearson_features_pow = cat.pearson_extraction(event_data=sel_test_pow, templates=templates_pow)\n test_pearson_features_phas = cat.pearson_extraction(event_data=sel_test_phas, templates=templates_phas)\n\n # 7.1) Reorganize Test Set into Machine Learning Format : ml_order_pearson()\n ml_trials_train_pow, ml_labels_train = cat.ml_order(extracted_features_array=train_pearson_features_pow)\n ml_trials_train_phas, _ = cat.ml_order(extracted_features_array=train_pearson_features_phas)\n ml_trials_train = np.concatenate([ml_trials_train_pow, ml_trials_train_phas], axis=-1)\n\n # 7.2) Get Ledger of the Features\n num_freqs, num_chans, num_temps = np.shape(\n train_pearson_features_pow[0][0]) # Get the shape of the Feature data\n ordered_index = cat.make_feature_id_ledger(num_freqs=num_freqs, num_chans=num_chans, num_temps=num_temps)\n ordered_index = np.concatenate([ordered_index, ordered_index], axis=0)\n\n # 7.3) Reorganize Training Set into Machine Learning Format : ml_order_pearson()\n ml_trials_test_pow, ml_labels_test = cat.ml_order(extracted_features_array=test_pearson_features_pow)\n ml_trials_test_phas, _ = cat.ml_order(extracted_features_array=test_pearson_features_phas)\n ml_trials_test = np.concatenate([ml_trials_test_pow, ml_trials_test_phas], axis=-1)\n\n repeated_freq_curves = []\n test_list = list(np.arange(num_chans))\n random.seed(0)\n for index in range(5000):\n drop_order = random.sample(test_list, k=len(test_list))\n fold_frequency_curves = []\n for freq in range(num_freqs):\n # if verbose:\n # print(\"On Frequency Band:\", freq, \" of:\", num_freqs)\n\n ml_trials_train_cp = ml_trials_train.copy() # make a copy of the feature extracted Train data\n ml_trials_test_cp = ml_trials_test.copy() # make a copy of the feature extracted Test data\n ordered_index_cp = ordered_index.copy() # make a copy of the ordered_index\n all_other_freqs = list(np.delete(np.arange(num_freqs), [freq])) # Make a index of the other frequencies\n temp_feature_dict = cat.make_feature_dict(ordered_index=ordered_index_cp,\n drop_type='frequency') # Feature Dict\n # reduce to selected frequency from the COPY of the training data\n ml_trials_train_freq, full_drop = cat.drop_features(features=ml_trials_train_cp, keys=temp_feature_dict,\n desig_drop_list=all_other_freqs)\n # reduce to but the selected frequency from the COPY of test data\n ml_trials_test_freq, _ = cat.drop_features(features=ml_trials_test_cp, keys=temp_feature_dict,\n desig_drop_list=all_other_freqs)\n ordered_index_cp = np.delete(ordered_index_cp, full_drop,\n axis=0) # Remove features from other frequencies\n\n # 8.) Perform Nested Feature Dropping with K-Fold Cross Validation\n nested_drop_curve = cat.ordered_feature_dropping(train_set=ml_trials_train_freq,\n train_labels=ml_labels_train,\n test_set=ml_trials_test_freq,\n test_labels=ml_labels_test,\n ordered_index=ordered_index_cp, drop_type='channel',\n Class_Obj=ClassObj, order=drop_order, verbose=False)\n fold_frequency_curves.append(nested_drop_curve) # For each Individual Frequency Band\n\n if verbose:\n if index % 100 == 0:\n print('on loop' + str(index))\n\n repeated_freq_curves.append(fold_frequency_curves) # Exhaustive Feature Dropping\n nested_dropping_curves.append(repeated_freq_curves) # All of the Curves\n\n # 9.) Combine all curve arrays to one array\n all_drop_curves = np.array(nested_dropping_curves) # (folds, frequencies, num_dropped, 1)\n\n # 10.) Calculate curve metrics\n fold_mean_curve = np.mean(all_drop_curves, axis=0)\n mean_curve = np.mean(fold_mean_curve, axis=0)\n # std_curve = np.std(all_drop_curves, axis=0, ddof=1) # ddof parameter is set to 1 to return the sample std\n std_curve = scipy.stats.sem(fold_mean_curve, axis=0)\n\n return mean_curve, std_curve",
"def addOperators(self):\n # Visibilities -------------------------------------\n # fk\n fkvis_node = node.createReverseNode(self.blend_att)\n\n for shp in self.fk0_ctl.getShapes():\n pm.connectAttr(fkvis_node + \".outputX\", shp.attr(\"visibility\"))\n for shp in self.fk0_roll_ctl.getShapes():\n pm.connectAttr(fkvis_node + \".outputX\", shp.attr(\"visibility\"))\n for shp in self.fk1_ctl.getShapes():\n pm.connectAttr(fkvis_node + \".outputX\", shp.attr(\"visibility\"))\n for shp in self.fk1_roll_ctl.getShapes():\n pm.connectAttr(fkvis_node + \".outputX\", shp.attr(\"visibility\"))\n\n fkvis2_node = node.createReverseNode(self.blend2_att)\n for shp in self.fk2_ctl.getShapes():\n pm.connectAttr(fkvis2_node + \".outputX\", shp.attr(\"visibility\"))\n\n # ik\n for shp in self.upv_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n for shp in self.ikcns_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n for shp in self.ik_ctl.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n for shp in self.line_ref.getShapes():\n pm.connectAttr(self.blend_att, shp.attr(\"visibility\"))\n\n # jnt ctl\n for ctl in (self.div_ctls):\n for shp in ctl.getShapes():\n pm.connectAttr(self.jntctl_vis_att, shp.attr(\"visibility\"))\n\n # Controls ROT order -----------------------------------\n attribute.setRotOrder(self.ik_ctl, \"XZY\")\n\n # IK Solver -----------------------------------------\n out = [self.bone0, self.bone1, self.ctrn_loc, self.eff_npo]\n\n o_node = applyop.gear_ikfk2bone_op(out,\n self.root,\n self.ik_ref,\n self.upv_ctl,\n self.fk0_mtx,\n self.fk1_mtx,\n self.fk2_mtx,\n self.length0,\n self.length1,\n self.negate)\n\n pm.connectAttr(self.blend_att, o_node + \".blend\")\n if self.negate:\n mulVal = -1\n else:\n mulVal = 1\n node.createMulNode(self.roll_att, mulVal, o_node + \".roll\")\n pm.connectAttr(self.scale_att, o_node + \".scaleA\")\n pm.connectAttr(self.scale_att, o_node + \".scaleB\")\n pm.connectAttr(self.maxstretch_att, o_node + \".maxstretch\")\n pm.connectAttr(self.slide_att, o_node + \".slide\")\n pm.connectAttr(self.softness_att, o_node + \".softness\")\n pm.connectAttr(self.reverse_att, o_node + \".reverse\")\n # update issue on effector scale interpolation, disconnect\n # for stability\n pm.disconnectAttr(self.eff_npo.scale)\n\n # auto upvector -------------------------------------\n # leg aim\n o_node = applyop.aimCns(self.upv_auv,\n self.ik_ctl,\n axis=\"-yz\",\n wupType=1,\n wupVector=[0, 1, 0],\n wupObject=self.upv2_auv,\n maintainOffset=False)\n\n # foot aim\n o_node = applyop.aimCns(self.upv1_auv,\n self.root,\n axis=\"yz\",\n wupType=4,\n wupVector=[0, 1, 0],\n wupObject=self.root,\n maintainOffset=False)\n\n # auto upvector connection\n o_node = applyop.gear_mulmatrix_op(\n self.upv_auv.attr(\"worldMatrix\"),\n self.upv_mtx.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pb_node = pm.createNode(\"pairBlend\")\n pb_node.attr(\"rotInterpolation\").set(1)\n pm.connectAttr(dm_node + \".outputTranslate\", pb_node + \".inTranslate2\")\n pm.connectAttr(dm_node + \".outputRotate\", pb_node + \".inRotate2\")\n pm.connectAttr(pb_node + \".outRotate\", self.upv_mtx.attr(\"rotate\"))\n pm.connectAttr(pb_node + \".outTranslate\",\n self.upv_mtx.attr(\"translate\"))\n pm.connectAttr(self.auv_att, pb_node + \".weight\")\n\n # fk0 mtx parent constraint\n o_node = applyop.gear_mulmatrix_op(\n self.fk0_roll_ctl.attr(\"worldMatrix\"),\n self.fk0_mtx.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputTranslate\",\n self.fk0_mtx.attr(\"translate\"))\n\n pm.connectAttr(dm_node + \".outputRotate\", self.fk0_mtx.attr(\"rotate\"))\n # fk1 loc to fk1 ref parent constraint\n o_node = applyop.gear_mulmatrix_op(\n self.fk1_ref.attr(\"worldMatrix\"),\n self.fk1_loc.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputTranslate\",\n self.fk1_loc.attr(\"translate\"))\n\n pm.connectAttr(dm_node + \".outputRotate\", self.fk1_loc.attr(\"rotate\"))\n # fk1 mtx orient cns to fk1 roll\n pm.connectAttr(self.fk1_roll_ctl.attr(\"rotate\"),\n self.fk1_mtx.attr(\"rotate\"))\n\n # fk2_loc position constraint to effector------------------------\n o_node = applyop.gear_mulmatrix_op(\n self.eff_npo.attr(\"worldMatrix\"),\n self.fk2_loc.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputTranslate\",\n self.fk2_loc.attr(\"translate\"))\n # fk2_loc rotation constraint to bone1 ------------------------\n\n o_node = applyop.gear_mulmatrix_op(\n self.bone1.attr(\"worldMatrix\"),\n self.fk2_loc.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputRotate\", self.fk2_loc.attr(\"rotate\"))\n\n # foot ikfk blending from fk ref to ik ref (serious bugfix)----\n o_node = applyop.gear_mulmatrix_op(\n self.fk_ref.attr(\"worldMatrix\"),\n self.eff_loc.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pb_node = pm.createNode(\"pairBlend\")\n pb_node.attr(\"rotInterpolation\").set(1)\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputRotate\", pb_node + \".inRotate1\")\n pm.connectAttr(self.blend2_att, pb_node + \".weight\")\n pm.connectAttr(pb_node + \".outRotate\", self.eff_loc.attr(\"rotate\"))\n\n o_node = applyop.gear_mulmatrix_op(\n self.ik_ref.attr(\"worldMatrix\"),\n self.eff_loc.attr(\"parentInverseMatrix\"))\n\n dm_node1 = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node1 + \".inputMatrix\")\n pm.connectAttr(dm_node1 + \".outputRotate\", pb_node + \".inRotate2\")\n # use blendcolors to blend scale\n bc_node = pm.createNode(\"blendColors\")\n pm.connectAttr(self.blend_att, bc_node + \".blender\")\n pm.connectAttr(dm_node + \".outputScale\", bc_node + \".color2\")\n pm.connectAttr(dm_node1 + \".outputScale\", bc_node + \".color1\")\n pm.connectAttr(bc_node + \".output\", self.eff_loc.attr(\"scale\"))\n\n # Twist references ---------------------------------\n pm.connectAttr(self.mid_ctl.attr(\"translate\"),\n self.tws1_npo.attr(\"translate\"))\n pm.connectAttr(self.mid_ctl.attr(\"rotate\"),\n self.tws1_npo.attr(\"rotate\"))\n pm.connectAttr(self.mid_ctl.attr(\"scale\"),\n self.tws1_npo.attr(\"scale\"))\n\n o_node = applyop.gear_mulmatrix_op(\n self.eff_loc.attr(\"worldMatrix\"),\n self.tws3_npo.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n\n pm.connectAttr(dm_node + \".outputTranslate\",\n self.tws3_npo.attr(\"translate\"))\n\n o_node = applyop.gear_mulmatrix_op(\n self.bone1.attr(\"worldMatrix\"),\n self.tws3_npo.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputRotate\", self.tws3_npo.attr(\"rotate\"))\n\n o_node = applyop.gear_mulmatrix_op(\n self.tws_ref.attr(\"worldMatrix\"),\n self.tws3_rot.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n pm.connectAttr(dm_node + \".outputRotate\", self.tws3_rot.attr(\"rotate\"))\n\n # knee thickness connection\n if self.negate:\n o_node = node.createMulNode(\n [self.knee_thickness_att, self.knee_thickness_att],\n [0.5, -0.5, 0],\n [self.tws1_loc + \".translateX\", self.tws2_loc + \".translateX\"])\n else:\n o_node = node.createMulNode(\n [self.knee_thickness_att, self.knee_thickness_att],\n [-0.5, 0.5, 0],\n [self.tws1_loc + \".translateX\", self.tws2_loc + \".translateX\"])\n\n # connect both tws1 and tws2 (mid tws)\n self.tws0_rot.setAttr(\"sx\", .001)\n self.tws3_rot.setAttr(\"sx\", .001)\n\n add_node = node.createAddNode(self.roundness0_att, .001)\n pm.connectAttr(add_node + \".output\", self.tws1_rot.attr(\"sx\"))\n\n add_node = node.createAddNode(self.roundness1_att, .001)\n pm.connectAttr(add_node + \".output\", self.tws2_rot.attr(\"sx\"))\n\n # Roll Shoulder--use aimconstraint withour uovwctor to solve the\n # stable twist\n\n if self.negate:\n o_node = applyop.aimCns(self.tws0_loc,\n self.mid_ctl,\n axis=\"-xy\",\n wupType=4,\n wupVector=[0, 1, 0],\n wupObject=self.tws0_npo,\n maintainOffset=False)\n else:\n o_node = applyop.aimCns(self.tws0_loc,\n self.mid_ctl,\n axis=\"xy\",\n wupType=4,\n wupVector=[0, 1, 0],\n wupObject=self.tws0_npo,\n maintainOffset=False)\n\n # Volume -------------------------------------------\n distA_node = node.createDistNode(self.tws0_loc, self.tws1_npo)\n distB_node = node.createDistNode(self.tws1_npo, self.tws3_loc)\n add_node = node.createAddNode(distA_node + \".distance\",\n distB_node + \".distance\")\n div_node = node.createDivNode(add_node + \".output\",\n self.root.attr(\"sx\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(self.root.attr(\"worldMatrix\"), dm_node + \".inputMatrix\")\n\n div_node2 = node.createDivNode(div_node + \".outputX\",\n dm_node + \".outputScaleX\")\n self.volDriver_att = div_node2 + \".outputX\"\n\n # Divisions ----------------------------------------\n # div mid constraint to mid ctl\n o_node = applyop.gear_mulmatrix_op(\n self.mid_ctl.attr(\"worldMatrix\"),\n self.div_mid.attr(\"parentInverseMatrix\"))\n\n dm_node = pm.createNode(\"decomposeMatrix\")\n pm.connectAttr(o_node + \".output\", dm_node + \".inputMatrix\")\n\n pm.connectAttr(dm_node + \".outputTranslate\",\n self.div_mid.attr(\"translate\"))\n\n pm.connectAttr(dm_node + \".outputRotate\",\n self.div_mid.attr(\"rotate\"))\n\n # at 0 or 1 the division will follow exactly the rotation of the\n # controler.. and we wont have this nice tangent + roll\n scl_1_perc = []\n scl_2_perc = []\n\n for i, div_cnsUp in enumerate(self.div_cnsUp):\n\n if i < (self.settings[\"div0\"] + 1):\n perc = i / (self.settings[\"div0\"] + 1.0)\n elif i < (self.settings[\"div0\"] + 2):\n perc = .95\n\n perc = max(.001, min(.99, perc))\n\n # Roll\n if self.negate:\n o_node = applyop.gear_rollsplinekine_op(\n div_cnsUp, [self.tws1_rot, self.tws0_rot], 1 - perc, 20)\n\n else:\n o_node = applyop.gear_rollsplinekine_op(\n div_cnsUp, [self.tws0_rot, self.tws1_rot], perc, 20)\n pm.connectAttr(self.resample_att, o_node + \".resample\")\n pm.connectAttr(self.absolute_att, o_node + \".absolute\")\n\n scl_1_perc.append(perc / 2)\n scl_2_perc.append(perc)\n scl_1_perc.append(0.5)\n scl_2_perc.append(1)\n for i, div_cnsDn in enumerate(self.div_cnsDn):\n\n if i == (0):\n perc = .05\n elif i < (self.settings[\"div1\"] + 1):\n perc = i / (self.settings[\"div1\"] + 1.0)\n elif i < (self.settings[\"div1\"] + 2):\n perc = .95\n\n perc = max(.001, min(.990, perc))\n\n # Roll\n if self.negate:\n o_node = applyop.gear_rollsplinekine_op(\n div_cnsDn, [self.tws3_rot, self.tws2_rot], 1 - perc, 20)\n\n else:\n o_node = applyop.gear_rollsplinekine_op(\n div_cnsDn, [self.tws2_rot, self.tws3_rot], perc, 20)\n pm.connectAttr(self.resample_att, o_node + \".resample\")\n pm.connectAttr(self.absolute_att, o_node + \".absolute\")\n\n scl_1_perc.append(perc / 2 + 0.5)\n scl_2_perc.append(1 - perc)\n # Squash n Stretch\n for i, div_cns in enumerate(self.div_cns):\n o_node = applyop.gear_squashstretch2_op(\n div_cns, None, pm.getAttr(self.volDriver_att), \"x\")\n pm.connectAttr(self.volume_att, o_node + \".blend\")\n pm.connectAttr(self.volDriver_att, o_node + \".driver\")\n pm.connectAttr(self.st_att[i], o_node + \".stretch\")\n pm.connectAttr(self.sq_att[i], o_node + \".squash\")\n # get the first mult_node after sq op\n mult_node = pm.listHistory(o_node, future=True)[1]\n # linear blend effector scale\n bc_node = pm.createNode(\"blendColors\")\n bc_node.setAttr(\"color2R\", 1)\n bc_node.setAttr(\"color2G\", 1)\n bc_node.setAttr(\"blender\", scl_1_perc[i])\n pm.connectAttr(self.eff_loc.attr(\"scale\"), bc_node + \".color1\")\n # linear blend mid scale\n bc_node2 = pm.createNode(\"blendColors\")\n bc_node2.setAttr(\"color2R\", 1)\n bc_node2.setAttr(\"color2G\", 1)\n bc_node2.setAttr(\"blender\", scl_2_perc[i])\n pm.connectAttr(self.mid_ctl.attr(\"scale\"), bc_node2 + \".color1\")\n # mid_ctl scale * effector scale\n mult_node2 = pm.createNode(\"multiplyDivide\")\n pm.connectAttr(bc_node2 + \".output\", mult_node2 + \".input1\")\n pm.connectAttr(bc_node + \".output\", mult_node2 + \".input2\")\n # plug to sq scale\n pm.connectAttr(mult_node2 + \".output\", mult_node + \".input2\")\n\n # match IK/FK ref\n pm.connectAttr(self.bone0.attr(\"rotate\"),\n self.match_fk0.attr(\"rotate\"))\n pm.connectAttr(self.bone0.attr(\"translate\"),\n self.match_fk0.attr(\"translate\"))\n pm.connectAttr(self.bone1.attr(\"rotate\"),\n self.match_fk1.attr(\"rotate\"))\n pm.connectAttr(self.bone1.attr(\"translate\"),\n self.match_fk1.attr(\"translate\"))\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Enable private networking on all of the droplets to which the tag is applied
|
def enable_private_networking(self):
return self.act_on_droplets(type='enable_private_networking')
|
[
"def enable_ipv6(self):\n return self.act_on_droplets(type='enable_ipv6')",
"def setIptables(dev):\n logging.debugv(\"functions/linux.py->setIptables(dev)\", [dev])\n try:\n runWrapper([locations.IPTABLES, \"-A\", \"OUTPUT\", \"-p\", \"TCP\", \"-m\", \"physdev\", \"--physdev-out\", dev, \"--dport\", \"1194\", \"-j\", \"DROP\"], True)\n except:\n logging.error(\"Setting up loop protection with iptables failed\")",
"def advertise_tunnel_ips(self, tunnel_ips):\n raise NotImplementedError()",
"def assign_private_ip_addresses(NetworkInterfaceId=None, PrivateIpAddresses=None, SecondaryPrivateIpAddressCount=None, AllowReassignment=None):\n pass",
"def enable_ports(self):\n pass",
"def private_network_setup(self):\n key_pair = self.create_keypair()\n security_group = self._create_security_group()\n security_groups = [{'name': security_group['name']}]\n inst1 = self._create_vm(key_pair=key_pair,\n security_groups=security_groups)\n host_name = inst1[\"OS-EXT-SRV-ATTR:hypervisor_hostname\"]\n host_zone = inst1['OS-EXT-AZ:availability_zone']\n av_zone = host_zone + ':' + host_name\n inst2 = self._create_vm(key_pair=key_pair,\n security_groups=security_groups,\n av_zone=av_zone)\n\n host_client, sw_names = self._create_vswitch(host_name, private_sw=True)\n\n ip1 = '22.22.22.2'\n net_mask = '24'\n inst1_nic_args = self._add_nic_to_vm(inst1, sw_names['privateSwitch'],\n host_client)\n linux_client1, inst1_new_nic_name = self._set_vm_ip(\n inst1, key_pair, inst1_nic_args['MAC'], ip1, net_mask)\n ip2 = '22.22.22.3'\n inst2_nic_args = self._add_nic_to_vm(inst2, sw_names['privateSwitch'],\n host_client)\n linux_client2, inst2_new_nic_name = self._set_vm_ip(\n inst2, key_pair, inst2_nic_args['MAC'], ip2, net_mask)\n private_setup = dict()\n private_setup['instances'] = [inst1, inst2]\n private_setup['linux_clients'] = [linux_client1, linux_client2]\n private_setup['new_nics'] = [inst1_new_nic_name, inst2_new_nic_name]\n private_setup['linux_ips'] = [ip1, ip2]\n private_setup['key_pair'] = key_pair\n\n return private_setup",
"def configNetworks(self):\n self.configPublicNet()\n self.configStorageNet()\n self.configManagementNet()",
"def configPublicNet(self):\n networks = self.handler.getNetworks(self.osid)\n for net in networks['networks']:\n if net['name'] == \"public\":\n net[\"ip_ranges\"] = [[\"10.20.1.10\", \"10.20.1.126\"]]\n net['cidr'] = \"10.20.1.0/24\"\n net['gateway'] = \"10.20.1.1\"\n\n # updates the floating ranges\n rng = [[\"10.20.1.130\", \"10.20.1.254\"]]\n networks['networking_parameters']['floating_ranges'] = rng\n self.handler.uploadNetworks(networks, self.osid)",
"def enable_IPV6_grub_level(self):\n for server in self.servers:\n shell = RemoteMachineShellConnection(server)\n shell.execute_command(\"sed -i 's/ipv6.disable=1/ipv6.disable=0/' /etc/default/grub\")\n shell.execute_command(\"grub2-mkconfig -o /boot/grub2/grub.cfg\")\n shell.reboot_node()\n time.sleep(10)\n shell = RemoteMachineShellConnection(server)\n output, error = shell.execute_command(\"ifconfig | grep inet6\")\n if output == []:\n log.info(\"Cant enable IPv6\")\n log.info(\"Output message is {0} and error message is {1}\".format(output, error))\n elif output != []:\n log.info(\"IPv6 Successfully Enabled for {0}\".format(server.ip))\n output, error = shell.execute_command(\"iptables -F\")\n shell.disconnect()",
"def make_network_private(self, network_id):\n if self.version.startswith('1.'):\n return self.update_network_profile(network_id,\n {'visibility': 'PRIVATE'})\n\n return self.set_network_system_properties(network_id,\n {'visibility': 'PRIVATE'})",
"def connect_private():\n return connect(\"private\")",
"async def _allowlist_add(self, ctx: commands.Context, *servers: int):\n async with self.config.allowed() as settings:\n for server in servers:\n if server not in settings:\n settings.append(server)\n return await ctx.tick()",
"def allowInternetConnection(network, bridge):\n\n cmds = []\n cmds.append('ip -4 route add dev {} {} proto static'.format(bridge, network))\n cmds.append(\n 'iptables -A FORWARD -o {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A FORWARD -i {} -t filter -m comment --comment \"generated for Distrinet Admin Network\" -j ACCEPT'.format(\n bridge))\n cmds.append(\n 'iptables -A POSTROUTING -t nat -m comment --comment \"generated for Distrinet Admin Network\" -s {} ! -d {} -j MASQUERADE'.format(\n network, network))\n cmds.append('sysctl -w net.ipv4.ip_forward=1')\n return cmds",
"def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass",
"def get_all_droplets(self):\n self.mock_data = \"droplets/all.json\"\n data = self.get_data(\"droplets/\")\n droplets = list()\n for jsoned in data['droplets']:\n droplet = Droplet(**jsoned)\n droplet.token = self.token\n droplet.mocked = self.mocked\n\n for net in droplet.networks['v4']:\n if net['type'] == 'private':\n droplet.private_ip_address = net['ip_address']\n if net['type'] == 'public':\n droplet.ip_address = net['ip_address']\n if droplet.networks['v6']:\n droplet.ip_v6_address = droplet.networks['v6'][0]['ip_address']\n droplets.append(droplet)\n return droplets",
"def enable_network(self):\n if self._is_admin():\n completed = subprocess.run(args=['netsh', 'interface', 'set', 'interface', '\"Wi-Fi\"', 'enable'])\n print(\"Enable Wi-Fi \", completed.returncode)\n completed = subprocess.run(args=['netsh', 'interface', 'set', 'interface', '\"Ethernet\"', 'enable'])\n print(\"Enable Ethernet\", completed.returncode)\n else:\n # Re-run the program with admin rights\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", 'netsh', ' interface set interface \"Ethernet\" enable', None, 1)\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", 'netsh', ' interface set interface \"Wi-Fi\" enable', None, 1)",
"def enableDHCPClick():\n os.system(\"mount -o rw,remount /\")\n os.system(\"cp netctl/ethernet-dhcp /etc/netctl/eth0\")\n os.system(\"mount -o ro,remount /\")\n lcdPrint(\"Obtaining IP...\")\n lcd.setCursor(15,0)\n lcd.ToggleBlink()\n os.system(\"ip link set eth0 down\")\n os.system(\"netctl restart eth0\")\n ip = socket.gethostbyname(socket.getfqdn())\n lcd.ToggleBlink()\n lcdPrint(\"Enabled DHCP:\\n\"+ip, 2)",
"def block_portlets(ob, *args, **kw):\n pl_managers = kw['managers']\n blockstatus = kw['blockstatus']\n for pl_managername, pl_manager in pl_managers.items():\n portletManager = getUtility(IPortletManager, name=pl_managername)\n assignable = getMultiAdapter(\n (ob, portletManager, ), ILocalPortletAssignmentManager)\n assignable.setBlacklistStatus(CONTEXT_CATEGORY, blockstatus)",
"def runTest(self):\n try:\n print(\"Lag disable ingress lag member test\")\n \n pkts_num = 10\n begin_port = 2000\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[11][1].l3_lag_obj.neighbor_mac,\n ip_dst=self.servers[1][1].ipv4,\n ip_src=self.servers[11][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[1][1].mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[1][1].ipv4,\n ip_src=self.servers[11][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[18].dev_port_index, pkt)\n verify_packet(self, exp_pkt, self.dut.port_obj_list[1].dev_port_index)\n # git disable ingress of lag member: port18\n print(\"disable port18 ingress\")\n status = sai_thrift_set_lag_member_attribute(\n self.client, self.lag_list[0].lag_members[1], ingress_disable=True)\n self.assertEqual(status, SAI_STATUS_SUCCESS)\n\n for i in range(0, pkts_num):\n src_port = begin_port + i\n pkt = simple_tcp_packet(eth_dst=ROUTER_MAC,\n eth_src=self.servers[11][1].l3_lag_obj.neighbor_mac,\n ip_dst=self.servers[1][1].ipv4,\n ip_src=self.servers[11][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(eth_dst=self.servers[1][1].mac,\n eth_src=ROUTER_MAC,\n ip_dst=self.servers[1][1].ipv4,\n ip_src=self.servers[11][1].ipv4,\n tcp_sport=src_port,\n ip_id=105,\n ip_ttl=63)\n self.dataplane.flush()\n send_packet(self, self.dut.port_obj_list[18].dev_port_index, pkt)\n verify_no_packet(self, exp_pkt, self.dut.port_obj_list[1].dev_port_index)\n finally:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Enable IPv6 networking on all of the droplets to which the tag is applied
|
def enable_ipv6(self):
return self.act_on_droplets(type='enable_ipv6')
|
[
"def enable_IPV6_grub_level(self):\n for server in self.servers:\n shell = RemoteMachineShellConnection(server)\n shell.execute_command(\"sed -i 's/ipv6.disable=1/ipv6.disable=0/' /etc/default/grub\")\n shell.execute_command(\"grub2-mkconfig -o /boot/grub2/grub.cfg\")\n shell.reboot_node()\n time.sleep(10)\n shell = RemoteMachineShellConnection(server)\n output, error = shell.execute_command(\"ifconfig | grep inet6\")\n if output == []:\n log.info(\"Cant enable IPv6\")\n log.info(\"Output message is {0} and error message is {1}\".format(output, error))\n elif output != []:\n log.info(\"IPv6 Successfully Enabled for {0}\".format(server.ip))\n output, error = shell.execute_command(\"iptables -F\")\n shell.disconnect()",
"def add_ipv6(self):\n for host in self.hosts_matrix:\n h = self.net.getNodeByName(host[0])\n h.cmd(f'ip -6 addr flush dev {host[0]}-eth1')\n h.cmd(f'ip -6 addr add dev {host[0]}-eth1 {host[2]}')",
"def disable_IPV6_grub_level(self):\n for server in self.servers:\n shell = RemoteMachineShellConnection(server)\n shell.execute_command(\n '''sed -i 's/ipv6.disable=0 //; s/ipv6.disable=1 //; s/GRUB_CMDLINE_LINUX=\"/GRUB_CMDLINE_LINUX=\"ipv6.disable=1 /' /etc/default/grub''')\n shell.execute_command(\"grub2-mkconfig -o /boot/grub2/grub.cfg\")\n shell.reboot_node()\n time.sleep(10)\n shell = RemoteMachineShellConnection(server)\n output, error = shell.execute_command(\"ifconfig | grep inet6\")\n if output == [] and error == []:\n log.info(\"IPv6 Successfully Disabled for {0}\".format(server.ip))\n else:\n log.info(\"Cant disable IPv6\")\n log.info(\"Output message is {0} and error message is {1}\".format(output, error))\n output, error = shell.execute_command(\"iptables -F\")\n shell.disconnect()",
"def enable_disable_ipv6_iface(self, cmd):\n self._magmad_util.config_ipv6_iface(cmd)",
"def manual_ipv6_infrastructure_allocation(anm):\n\n import netaddr\n g_ipv6 = anm['ipv6']\n log.info('Using specified IPv6 infrastructure allocation')\n\n for node in g_ipv6.l3devices():\n for interface in node.physical_interfaces:\n if not interface['input'].is_bound:\n continue # unbound interface\n ip_address = netaddr.IPAddress(interface['input'\n ].ipv6_address)\n prefixlen = interface['input'].ipv6_prefixlen\n interface.ip_address = ip_address\n interface.prefixlen = prefixlen\n cidr_string = '%s/%s' % (ip_address, prefixlen)\n interface.subnet = netaddr.IPNetwork(cidr_string)\n\n broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]\n\n # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen\n\n from netaddr import IPNetwork\n for coll_dom in broadcast_domains:\n connected_interfaces = [edge.dst_int for edge in\n coll_dom.edges()]\n cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,\n i.prefixlen)) for i in connected_interfaces]\n\n\n if len(cd_subnets) == 0:\n log.warning(\"Collision domain %s is not connected to any nodes\" % coll_dom)\n continue\n\n try:\n assert len(set(cd_subnets)) == 1\n except AssertionError:\n mismatch_subnets = '; '.join('%s: %s/%s' % (i,\n i.subnet.network, i.prefixlen) for i in\n connected_interfaces)\n log.warning('Non matching subnets from collision domain %s: %s'\n % (coll_dom, mismatch_subnets))\n else:\n coll_dom.subnet = cd_subnets[0] # take first entry\n\n # apply to remote interfaces\n\n for edge in coll_dom.edges():\n edge.dst_int.subnet = coll_dom.subnet\n\n # also need to form aggregated IP blocks (used for e.g. routing prefix\n # advertisement)\n # import autonetkit\n # autonetkit.update_http(anm)\n\n infra_blocks = {}\n for (asn, devices) in g_ipv6.groupby('asn').items():\n broadcast_domains = [d for d in devices if d.broadcast_domain]\n subnets = [cd.subnet for cd in broadcast_domains\n if cd.subnet is not None] # only if subnet is set\n infra_blocks[asn] = netaddr.cidr_merge(subnets)\n\n g_ipv6.data.infra_blocks = infra_blocks",
"def streamingbypass_ipv6_set(ipv6_addrs: str):\n return _run_speedify_cmd([\"streamingbypass\", \"ipv6\", \"set\", ipv6_addrs])",
"def streamingbypass_ipv6_add(ipv6_addrs: str):\n return _run_speedify_cmd([\"streamingbypass\", \"ipv6\", \"add\", ipv6_addrs])",
"def assign_ipv6_addresses(NetworkInterfaceId=None, Ipv6Addresses=None, Ipv6AddressCount=None):\n pass",
"def ipv6(self, ipv6: SubUnnumberedTop):\n\n self._ipv6 = ipv6",
"def ensure_ipv6_enabled():\n log.info(\"Ensuring IPv6 is enabled at the kernel level\")\n ensure_ipv6_command = \"/usr/bin/env sysctl net.ipv6.conf.all.disable_ipv6=0\"\n run_command_print_ready(\n ensure_ipv6_command,\n failure_callback=log_failure_factory(\n \"Failed to ensure IPv6 was enabled at the kernel level. Assuming OK. \"\n \"If not, cjdroute will later fail to configure the tunnel.\"\n ),\n shell=True,\n buffered=False\n )",
"def build_ipv6(anm):\n import netaddr\n import autonetkit.plugins.ipv6 as ipv6\n\n # uses the nodes and edges from ipv4\n\n g_ipv6 = anm.add_overlay('ipv6')\n g_ip = anm['ip']\n g_in = anm['input']\n g_ipv6.add_nodes_from(g_ip, retain=['label', 'asn', 'broadcast_domain']) # retain if collision domain or not\n g_ipv6.add_edges_from(g_ip.edges())\n\n #TODO: tidy up naming consitency of secondary_loopback_block and vrf_loopback_block\n (infra_block, loopback_block, secondary_loopback_block) = \\\n extract_ipv6_blocks(anm)\n\n block_message = \"IPv6 allocations: Infrastructure: %s, Loopback: %s\" % (infra_block, loopback_block)\n if any(i for n in g_ip.nodes() for i in\n n.loopback_interfaces if not i.is_loopback_zero):\n block_message += \" Secondary Loopbacks: %s\" % secondary_loopback_block\n log.info(block_message)\n\n # TODO: replace this with direct allocation to interfaces in ip alloc plugin\n allocated = sorted([n for n in g_ip if n['input'].loopback_v6])\n if len(allocated) == len(g_ip.l3devices()):\n # all allocated\n #TODO: need to infer subnetomanual_ipv6_loopback_allocation\n log.info(\"Using user-specified IPv6 loopback addresses\")\n manual_ipv6_loopback_allocation(anm)\n else:\n if len(allocated):\n log.warning(\"Using automatic IPv6 loopback allocation. IPv6 loopback addresses specified on nodes %s will be ignored.\" % allocated)\n else:\n log.info(\"Automatically assigning IPv6 loopback addresses\")\n\n ipv6.allocate_loopbacks(g_ipv6, loopback_block)\n\n l3_devices = [d for d in g_in if d.device_type in ('router', 'server')]\n\n manual_alloc_devices = set()\n for device in l3_devices:\n physical_interfaces = list(device.physical_interfaces)\n allocated = list(interface.ipv6_address for interface in physical_interfaces if interface.is_bound)\n if all(interface.ipv6_address for interface in\n physical_interfaces if interface.is_bound):\n manual_alloc_devices.add(device) # add as a manual allocated device\n\n if manual_alloc_devices == set(l3_devices):\n log.info(\"Using user-specified IPv6 infrastructure addresses\")\n manual_alloc_ipv6_infrastructure = True\n else:\n manual_alloc_ipv6_infrastructure = False\n # warn if any set\n allocated = []\n unallocated = []\n for node in l3_devices:\n allocated += sorted([i for i in node.physical_interfaces if i.is_bound and i.ipv6_address])\n unallocated += sorted([i for i in node.physical_interfaces if i.is_bound and not i.ipv6_address])\n\n #TODO: what if IP is set but not a prefix?\n if len(allocated):\n #TODO: if set is > 50% of nodes then list those that are NOT set\n log.warning(\"Using automatic IPv6 interface allocation. IPv6 interface addresses specified on interfaces %s will be ignored.\" % allocated)\n else:\n log.info(\"Automatically assigning IPv6 infrastructure addresses\")\n\n if manual_alloc_ipv6_infrastructure:\n manual_ipv6_infrastructure_allocation(anm)\n else:\n ipv6.allocate_infra(g_ipv6, infra_block)\n #TODO: see if this is still needed or if can allocate direct from the ipv6 allocation plugin\n for node in g_ipv6.l3devices():\n for interface in node:\n edges = list(interface.edges())\n if len(edges):\n edge = edges[0] # first (only) edge\n interface.ip_address = edge.ip # TODO: make this consistent\n interface.subnet = edge.dst.subnet # from collision domain\n\n ipv6.allocate_vrf_loopbacks(g_ipv6, secondary_loopback_block)\n\n for node in g_ipv6.routers():\n #TODO: test this code\n node.loopback_zero.ip_address = node.loopback\n node.loopback_zero.subnet = netaddr.IPNetwork(\"%s/32\" % node.loopback)\n for interface in node.loopback_interfaces:\n if not interface.is_loopback_zero:\n interface.ip_address = interface.loopback #TODO: fix this inconsistency elsewhere",
"def run_ipv6_multi_host(self, default_as=None, per_node_as=None):\n pass",
"def enable_private_networking(self):\n return self.act_on_droplets(type='enable_private_networking')",
"def streaming_ipv6_set(ipv6_addrs: str):\n return _run_speedify_cmd([\"streaming\", \"ipv6\", \"set\", ipv6_addrs])",
"def loopback_ip6(self):\n ret = self._get_attr(\"loopbackIp6\")\n return ret",
"def vpnglobal_intranetip6_bindings(self) :\n\t\ttry :\n\t\t\treturn self._vpnglobal_intranetip6_binding\n\t\texcept Exception as e:\n\t\t\traise e",
"def create_ipv6(self):\n int1 = Interface('eth1/1')\n int2 = Interface('eth1/2')\n pc1 = PortChannel('211')\n ipv6 = IPV6()\n ipv6.add_interface_address(int1, '2004:0DB8::1/10', link_local='FE83::1')\n ipv6.add_interface_address(int2, '2104:0DB8::1/11')\n ipv6.add_interface_address(int2, '2002:0DB8::1/12')\n ipv6.add_interface_address(pc1, '2022:0DB8::1/13')\n return ipv6",
"def enable_static_ip_config_v6(self, ipv6_address, ipv6_network_mask_prefix_length):\n if not isinstance(ipv6_address, basestring):\n raise TypeError(\"ipv6_address can only be an instance of type basestring\")\n if not isinstance(ipv6_network_mask_prefix_length, baseinteger):\n raise TypeError(\"ipv6_network_mask_prefix_length can only be an instance of type baseinteger\")\n self._call(\"enableStaticIPConfigV6\",\n in_p=[ipv6_address, ipv6_network_mask_prefix_length])",
"def _supports_ipv6_tethering(self, dut):\n # Currently only Verizon support IPv6 tethering\n carrier_supports_tethering = [\"vzw\"]\n operator = get_operator_name(self.log, dut)\n return operator in carrier_supports_tethering"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Enable backups on all of the droplets to which the tag is applied
|
def enable_backups(self):
return self.act_on_droplets(type='enable_backups')
|
[
"def disable_backups(self):\n return self.act_on_droplets(type='disable_backups')",
"def mongo_backup(event, context):\n\n\n ec2 = ec2connect()\n\n #get list of instances with `Backup` tag\n inst = list_instances(ec2)\n backup_volume(ec2,inst)\n remove_old_snapshots(ec2)\n print \"Backup finished successfully!\"\n return True",
"def delete_all_droplets(self):\n self.doapi_manager.request('/v2/droplets', method='DELETE',\n params={\"tag_name\": self.name})",
"def auto_backup_enabled(self, auto_backup_enabled):\n self._auto_backup_enabled = auto_backup_enabled",
"def allow_backup(self):\n return self._root.find(\"application\").get(\n \"allowBackup\", \"false\") == \"true\"",
"def onBackupVolume(self, widget):\n self.useBackup = self.chk1.get_active()\n self.updateScreens()",
"def backupZenPacks(self):\n #can only copy zenpacks backups if ZEO is backed up\n if not self.options.noZopeDb and os.path.isdir(zenPath('ZenPacks')):\n # Copy /ZenPacks to backup dir\n self.log.info('Backing up ZenPacks.')\n etcTar = tarfile.open(os.path.join(self.tempDir, 'ZenPacks.tar'), 'w')\n etcTar.dereference = True\n etcTar.add(zenPath('ZenPacks'), 'ZenPacks')\n etcTar.close()\n self.log.info(\"Backup of ZenPacks completed.\")\n # add /bin dir if backing up zenpacks\n # Copy /bin to backup dir \n self.log.info('Backing up bin dir.')\n etcTar = tarfile.open(os.path.join(self.tempDir, 'bin.tar'), 'w')\n etcTar.dereference = True\n etcTar.add(zenPath('bin'), 'bin')\n etcTar.close()\n self.log.info(\"Backup of bin completed.\")",
"def backup_volume(ec2,instances):\n\n for instance in instances:\n retention = get_retention(instance)\n if not is_master(instance['PrivateIpAddress']):\n #make snapshot only on primary\n continue\n\n for dev in instance['BlockDeviceMappings']:\n if dev.get('Ebs', None) is None:\n # skip non-EBS volumes\n continue\n\n retention = get_retention(instance)\n now = datetime.today()\n delete_date_days = (now + timedelta(days=retention['days'])).strftime('%Y-%m-%d')\n delete_date_weeks = (now + timedelta(weeks=retention['weeks'])).strftime('%Y-%m-%d')\n delete_date_months = (now + relativedelta(months=retention['months'])).strftime('%Y-%m-%d')\n desc_date = now.strftime('%Y-%m-%d.%H:%M:%S')\n\n\n # all mongo disks are sdf\n if dev['DeviceName'] == '/dev/sdf':\n vol_id = dev['Ebs']['VolumeId']\n\n # Make sure that only one snapshot is taken, whether daily, weekly or monthly.\n if now.strftime('%d') == '01':\n print \"Creating snapshot of %s volume that will be retain for %d months\" % (vol_id, retention['months'])\n snap = make_snapshot(ec2,vol_id, retention['months'], \"MongoMonthlyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_months)\n elif now.strftime('%a') == 'Sun':\n print \"Creating snapshot of %s volume that will be retain for %d weeks\" % (vol_id, retention['weeks'])\n snap = make_snapshot(ec2,vol_id, retention['weeks'], \"MongoWeeklyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_weeks)\n else:\n print \"Creating snapshot of %s volume that will be retain for %d days\" % (vol_id, retention['days'])\n snap = make_snapshot(ec2,vol_id, retention['days'], \"MongoDailyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_days)\n\n return True",
"def post_stop_backup():\n Logger.info('Backing up Falcon directories before upgrade...')\n directoryMappings = _get_directory_mappings()\n\n absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)\n if not os.path.isdir(absolute_backup_dir):\n os.makedirs(absolute_backup_dir)\n\n for directory in directoryMappings:\n if not os.path.isdir(directory):\n raise Fail(\"Unable to backup missing directory {0}\".format(directory))\n\n archive = os.path.join(absolute_backup_dir, directoryMappings[directory])\n Logger.info('Compressing {0} to {1}'.format(directory, archive))\n\n if os.path.exists(archive):\n os.remove(archive)\n\n # backup the directory, following symlinks instead of including them\n tar_archive.archive_directory_dereference(archive, directory)",
"def runBackup(self):\n with tarfile.open(self.BFILE, 'w:bz2') as tar:\n tar.add(self.DIR, arcname=os.path.basename(self.DIR))",
"def droplet_actions(ctx, disable_backups, reboot, power_cycle, shutdown, power_off,\n\t\t\t\t\tpower_on, password_reset, ipv6, private_networking, upgrade,\n\t\t\t\t\trestore, backup_id, resize, size, rebuild, image, rename, name,\n\t\t\t\t\tchange_kernel, kernel, snapshot, sname, token, tablefmt, proxy):\n\n\tif (not ctx.params['disable_backups'] and not ctx.params['reboot'] \n\t\tand not ctx.params['power_cycle'] and not ctx.params['shutdown'] \n\t\tand not ctx.params['power_off'] and not ctx.params['power_on'] \n\t\tand not ctx.params['password_reset'] and not ctx.params['ipv6'] \n\t\tand not ctx.params['private_networking'] and not ctx.params['upgrade'] \n\t\tand not ctx.params['restore'] and not ctx.params['backup_id'] \n\t\tand not ctx.params['resize'] and not ctx.params['size'] \n\t\tand not ctx.params['rebuild'] and not ctx.params['image'] \n\t\tand not ctx.params['rename'] and not ctx.params['name'] \n\t\tand not ctx.params['change_kernel'] and not ctx.params['kernel'] \n\t\tand not ctx.params['snapshot'] and not ctx.params['sname']):\n\t\treturn click.echo(ctx.get_help())\n\n\toption_list = ['disable_backups', 'reboot', 'power_cycle', 'shutdown', 'power_off',\n\t 'power_on', 'password_reset', 'ipv6', 'private_networking', 'upgrade', 'restore', \n\t 'resize', 'rebuild', 'rename', 'change_kernel', 'snapshot']\n\n\tif validate(ctx.params, option_list):\n\t\tif disable_backups:\n\t\t\tparams = {'type':'disable_backups'}\n\t\t\trecord = 'droplet disable backups'\n\t\t\treturn run_command(disable_backups, params, record, token, proxy, tablefmt)\n\n\t\tif reboot:\n\t\t\tparams = {'type':'reboot'}\n\t\t\trecord = 'droplet reboot'\n\t\t\treturn run_command(reboot, params, record, token, proxy, tablefmt)\n\t\n\t\tif power_cycle:\n\t\t\tparams = {'type':'power_cycle'}\n\t\t\trecord = 'droplet power_cycle'\n\t\t\treturn run_command(power_cycle, params, record, token, proxy, tablefmt)\n\n\t\tif shutdown:\n\t\t\tparams = {'type':'shutdown'}\n\t\t\trecord = 'droplet shutdown'\n\t\t\treturn run_command(shutdown, params, record, token, proxy, tablefmt)\n\n\t\tif power_off:\n\t\t\tparams = {'type':'power_off'}\n\t\t\trecord = 'droplet power_off'\n\t\t\treturn run_command(power_off, params, record, token, proxy, tablefmt)\n\n\t\tif power_on:\n\t\t\tparams = {'type':'power_on'}\n\t\t\trecord = 'droplet power_on'\n\t\t\treturn run_command(power_on, params, record, token, proxy, tablefmt)\n\n\t\tif password_reset:\n\t\t\tparams = {'type':'password_reset'}\n\t\t\trecord = 'droplet password_reset'\n\t\t\treturn run_command(password_reset, params, record, token, proxy, tablefmt)\n\n\t\tif ipv6:\n\t\t\tparams = {'type':'enable_ipv6'}\n\t\t\trecord = 'droplet ipv6'\n\t\t\treturn run_command(ipv6, params, record, token, proxy, tablefmt)\n\n\t\tif private_networking:\n\t\t\tparams = {'type':'enable_private_networking'}\n\t\t\trecord = 'droplet private_networking'\n\t\t\treturn run_command(private_networking, params, record, token, proxy, tablefmt)\n\n\t\tif upgrade:\n\t\t\tparams = {'type':'upgrade'}\n\t\t\trecord = 'droplet upgrade'\n\t\t\treturn run_command(upgrade, params, record, token, proxy, tablefmt)\n\n\t\tif restore:\n\t\t\tparams = {'type':'restore', 'image':backup_id}\n\t\t\trecord = 'droplet restore'\n\t\t\treturn run_command(restore, params, record, token, proxy, tablefmt)\n\n\t\tif resize:\n\t\t\tparams = {'type':'resize', 'size':size}\n\t\t\trecord = 'droplet resize'\n\t\t\treturn run_command(resize, params, record, token, proxy, tablefmt)\n\n\t\tif rebuild:\n\t\t\tparams = {'type':'rebuild', 'image':image}\n\t\t\trecord = 'droplet rebuild'\n\t\t\treturn run_command(rebuild, params, record, token, proxy, tablefmt)\n\n\t\tif rename:\n\t\t\tparams = {'type':'rename', 'name':name}\n\t\t\trecord = 'droplet rename'\n\t\t\treturn run_command(rename, params, record, token, proxy, tablefmt)\n\n\t\tif change_kernel:\n\t\t\tparams = {'type':'change_kernel', 'kernel':kernel}\n\t\t\trecord = 'droplet change_kernel'\n\t\t\treturn run_command(change_kernel, params, record, token, proxy, tablefmt)\n\n\t\tif snapshot:\n\t\t\tparams = {'type':'snapshot', 'name':sname}\n\t\t\trecord = 'droplet snapshot'\n\t\t\treturn run_command(snapshot, params, record, token, proxy, tablefmt)",
"def refresh(self):\r\n self._backupsets = self._get_backupsets()",
"def ListDropletBackups(self, id):\n Backups = r.get(self.APIURL + \"/droplets/\" + id + \"/Backups\",\n auth=self.BasicAuth)\n DropletBackups = Backups.json()\n return DropletBackups",
"def start(self):\n logger.info(\"Starting backup run for %s backups\", self.backup_type)\n instance_list = self.instances_for_backup()\n\n for instance in instance_list:\n instance_id = unicodedata.normalize('NFKD', instance.id).encode('ascii','ignore')\n\n try:\n instance_name = instance.tags['Name']\n except:\n instance_name=None\n\n logger.info(\"Instance-ID [%s] - Instance Name [%s]\" % (instance_id, instance_name))\n\n self.create_ami(instance_id, instance_name) # we create the ami for each instance",
"def test_backup_bin_list():\n\tbackup_and_restore(\n\t\tput_data,\n\t\tNone,\n\t\tlambda context: check_data(context,\n\t\t\tTrue, True, True,\n\t\t\tTrue, False,\n\t\t\tTrue, True, True,\n\t\t\tTrue),\n\t\tbackup_opts=[\"--bin-list\", BIN_NAME_1],\n\t\trestore_opts=[\"--wait\"],\n\t\trestore_delay=1\n\t)",
"def auto_backup_enabled(self):\n return self._auto_backup_enabled",
"def pg_backups(ctx, app_name, database_id):\n gigalixir_database.backups(ctx.obj['host'], app_name, database_id)",
"def enable_all(self) -> None:\n self.permanently_disabled.clear()",
"def volume_backup_supported(request):\n # TODO(lcheng) Cinder does not expose the information if cinder\n # backup is configured yet. This is a workaround until that\n # capability is available.\n # https://bugs.launchpad.net/cinder/+bug/1334856\n return utils.get_dict_config('OPENSTACK_CINDER_FEATURES', 'enable_backup')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Disable backups on all of the droplets to which the tag is applied
|
def disable_backups(self):
return self.act_on_droplets(type='disable_backups')
|
[
"def enable_backups(self):\n return self.act_on_droplets(type='enable_backups')",
"def delete_all_droplets(self):\n self.doapi_manager.request('/v2/droplets', method='DELETE',\n params={\"tag_name\": self.name})",
"def disable_all(self) -> None:\n raise NotImplementedError()",
"def disable_backup(self):\r\n request_json = self._request_json_('Backup', False)\r\n\r\n flag, response = self._cvpysdk_object.make_request('POST', self._AGENT, request_json)\r\n\r\n if flag:\r\n if response.json() and 'response' in response.json():\r\n error_code = response.json()['response'][0]['errorCode']\r\n\r\n if error_code == 0:\r\n return\r\n elif 'errorString' in response.json()['response'][0]:\r\n error_message = response.json()['response'][0]['errorString']\r\n\r\n o_str = 'Failed to disable Backup\\nError: \"{0}\"'.format(error_message)\r\n raise SDKException('Agent', '102', o_str)\r\n else:\r\n raise SDKException('Response', '102')\r\n else:\r\n raise SDKException('Response', '101', self._update_response_(response.text))",
"def allow_backup(self):\n return self._root.find(\"application\").get(\n \"allowBackup\", \"false\") == \"true\"",
"def dropBlockers():\n\n ga = localAvatar.getParentObj()\n blockers = ga.findAllMatches('**/blocker_*')\n blockers.stash()",
"def disable(self):\n for volume in self.volumes:\n try:\n self._renderer.RemoveVolume(volume)\n except:\n pass # TBD: any error logging.",
"def instances_for_backup(self):\n instance = None\n excluded_instances= []\n for excluded in EXCLUDED_INSTANCES:\n try:\n instance = self.instance_id_by_name(excluded)\n except NameError as error:\n logger.error(error)\n exit(2)\n excluded_instances.append(instance)\n\n reservations = conn.get_all_instances()\n all_instances = [i for r in reservations for i in r.instances]\n \n for exc in excluded_instances:\n for instance in all_instances:\n if instance.id == exc.id:\n all_instances.remove(instance)\n return all_instances",
"def delete_all_busy_box_deployments(self):\n for depl in WorkloadUi().deployment_list:\n self.delete_busybox(depl.name, force=True)",
"def kdump_disable(db):\n kdump_table = db.cfgdb.get_table(\"KDUMP\")\n check_kdump_table_existence(kdump_table)\n\n db.cfgdb.mod_entry(\"KDUMP\", \"config\", {\"enabled\": \"false\"})\n click.echo(\"KDUMP configuration changes may require a reboot to take effect.\")\n click.echo(\"Save SONiC configuration using 'config save' before issuing the reboot command.\")",
"def test_disable_tags(self):\n tags_url = reverse(\"settings-tags\")\n tags_disable_url = reverse(\"tags-disable\")\n slice_size = 5\n\n with schema_context(self.schema_name):\n client = rest_framework.test.APIClient()\n ids_to_disable = [str(obj.uuid) for obj in self.enabled_objs[:slice_size]]\n disable_response = client.put(tags_disable_url, {\"ids\": ids_to_disable}, format=\"json\", **self.headers)\n get_response = client.get(tags_url, {\"filter[enabled]\": False, \"limit\": 100}, **self.headers)\n\n disabled_uuids = {item[\"uuid\"] for item in get_response.data[\"data\"]}\n self.assertEqual(disable_response.status_code, status.HTTP_204_NO_CONTENT, disable_response.data)\n self.assertEqual(get_response.data[\"meta\"][\"count\"], len(self.enabled_objs) + slice_size)\n self.assertTrue(set(ids_to_disable).issubset(disabled_uuids))",
"def fetch_all_droplets(self):\n return self.doapi_manager.fetch_all_droplets(tag_name=self.name)",
"def disable_all_buttons():\n all_buttons = [B1, B2, B3, B4, B5, B6, B7, B8, B9]\n for button in all_buttons: \t\t\t\n button.config(state = DISABLED)\t\t\t\t#disables all the buttons",
"def CleanupDropbox(self):\n \n \n #loop over directories and check if anyone has something older than one week\n temp = 'LC'\n dirlist = []\n for k in range(self.low_range,self.hi_range):\n if (k<10):\n temp1 = temp+'0'+str(k)+'_'\n else:\n temp1 = temp+str(k)+'_'\n \n dirlist.append(temp1)\n \n for k in range(len(dirlist)):\n temp = '/LCWA/'+dirlist[k] # file on dropbox\n #print('now checking ',temp)\n\n \n MyDir = self.PA.dbx.files_list_folder(temp) #do NOT use recursive, since that does not work for shared folders\n \n for item in MyDir.entries:\n #print(\"item\",item,' ',MyDir.entries)\n if isinstance(item, dropbox.files.FileMetadata):\n now = datetime.datetime.now() #determine how old a file is\n #print('hallelujah',temp,' ',item.name, ' ',item.server_modified)\n diff = now - item.server_modified #take the difference\n #print('difference in days',diff.days)\n #if diff.days == 1 or diff.days == 2 or diff.days == 3: # changed to or so that we backup the last 2 days\n if diff.days >= 0: # changed to or so that we backup the last 2 days\n print ('name = ' , item.name)\n print ('path = ', item.path_display )\n print ('fileID = ' , item.id)\n print ('date = ', item.server_modified)\n # here we backup and delete the files\n backupfile = self.backupdir+item.name\n #print(\"backing up file \",item.path_display, ' to',backupfile)\n try:\n a = self.PA.dbx.files_download_to_file(backupfile,item.path_display)\n #print(\"return type \",a)\n except:\n print(\"problems with backing up \",item.path_display )\n if(diff.days > 4 ): # changed to -1 so that we backup every day\n \n #print(\"deleting file \",item.path_display )\n self.PA.dbx.files_delete(item.path_display)",
"def droplet_actions(ctx, disable_backups, reboot, power_cycle, shutdown, power_off,\n\t\t\t\t\tpower_on, password_reset, ipv6, private_networking, upgrade,\n\t\t\t\t\trestore, backup_id, resize, size, rebuild, image, rename, name,\n\t\t\t\t\tchange_kernel, kernel, snapshot, sname, token, tablefmt, proxy):\n\n\tif (not ctx.params['disable_backups'] and not ctx.params['reboot'] \n\t\tand not ctx.params['power_cycle'] and not ctx.params['shutdown'] \n\t\tand not ctx.params['power_off'] and not ctx.params['power_on'] \n\t\tand not ctx.params['password_reset'] and not ctx.params['ipv6'] \n\t\tand not ctx.params['private_networking'] and not ctx.params['upgrade'] \n\t\tand not ctx.params['restore'] and not ctx.params['backup_id'] \n\t\tand not ctx.params['resize'] and not ctx.params['size'] \n\t\tand not ctx.params['rebuild'] and not ctx.params['image'] \n\t\tand not ctx.params['rename'] and not ctx.params['name'] \n\t\tand not ctx.params['change_kernel'] and not ctx.params['kernel'] \n\t\tand not ctx.params['snapshot'] and not ctx.params['sname']):\n\t\treturn click.echo(ctx.get_help())\n\n\toption_list = ['disable_backups', 'reboot', 'power_cycle', 'shutdown', 'power_off',\n\t 'power_on', 'password_reset', 'ipv6', 'private_networking', 'upgrade', 'restore', \n\t 'resize', 'rebuild', 'rename', 'change_kernel', 'snapshot']\n\n\tif validate(ctx.params, option_list):\n\t\tif disable_backups:\n\t\t\tparams = {'type':'disable_backups'}\n\t\t\trecord = 'droplet disable backups'\n\t\t\treturn run_command(disable_backups, params, record, token, proxy, tablefmt)\n\n\t\tif reboot:\n\t\t\tparams = {'type':'reboot'}\n\t\t\trecord = 'droplet reboot'\n\t\t\treturn run_command(reboot, params, record, token, proxy, tablefmt)\n\t\n\t\tif power_cycle:\n\t\t\tparams = {'type':'power_cycle'}\n\t\t\trecord = 'droplet power_cycle'\n\t\t\treturn run_command(power_cycle, params, record, token, proxy, tablefmt)\n\n\t\tif shutdown:\n\t\t\tparams = {'type':'shutdown'}\n\t\t\trecord = 'droplet shutdown'\n\t\t\treturn run_command(shutdown, params, record, token, proxy, tablefmt)\n\n\t\tif power_off:\n\t\t\tparams = {'type':'power_off'}\n\t\t\trecord = 'droplet power_off'\n\t\t\treturn run_command(power_off, params, record, token, proxy, tablefmt)\n\n\t\tif power_on:\n\t\t\tparams = {'type':'power_on'}\n\t\t\trecord = 'droplet power_on'\n\t\t\treturn run_command(power_on, params, record, token, proxy, tablefmt)\n\n\t\tif password_reset:\n\t\t\tparams = {'type':'password_reset'}\n\t\t\trecord = 'droplet password_reset'\n\t\t\treturn run_command(password_reset, params, record, token, proxy, tablefmt)\n\n\t\tif ipv6:\n\t\t\tparams = {'type':'enable_ipv6'}\n\t\t\trecord = 'droplet ipv6'\n\t\t\treturn run_command(ipv6, params, record, token, proxy, tablefmt)\n\n\t\tif private_networking:\n\t\t\tparams = {'type':'enable_private_networking'}\n\t\t\trecord = 'droplet private_networking'\n\t\t\treturn run_command(private_networking, params, record, token, proxy, tablefmt)\n\n\t\tif upgrade:\n\t\t\tparams = {'type':'upgrade'}\n\t\t\trecord = 'droplet upgrade'\n\t\t\treturn run_command(upgrade, params, record, token, proxy, tablefmt)\n\n\t\tif restore:\n\t\t\tparams = {'type':'restore', 'image':backup_id}\n\t\t\trecord = 'droplet restore'\n\t\t\treturn run_command(restore, params, record, token, proxy, tablefmt)\n\n\t\tif resize:\n\t\t\tparams = {'type':'resize', 'size':size}\n\t\t\trecord = 'droplet resize'\n\t\t\treturn run_command(resize, params, record, token, proxy, tablefmt)\n\n\t\tif rebuild:\n\t\t\tparams = {'type':'rebuild', 'image':image}\n\t\t\trecord = 'droplet rebuild'\n\t\t\treturn run_command(rebuild, params, record, token, proxy, tablefmt)\n\n\t\tif rename:\n\t\t\tparams = {'type':'rename', 'name':name}\n\t\t\trecord = 'droplet rename'\n\t\t\treturn run_command(rename, params, record, token, proxy, tablefmt)\n\n\t\tif change_kernel:\n\t\t\tparams = {'type':'change_kernel', 'kernel':kernel}\n\t\t\trecord = 'droplet change_kernel'\n\t\t\treturn run_command(change_kernel, params, record, token, proxy, tablefmt)\n\n\t\tif snapshot:\n\t\t\tparams = {'type':'snapshot', 'name':sname}\n\t\t\trecord = 'droplet snapshot'\n\t\t\treturn run_command(snapshot, params, record, token, proxy, tablefmt)",
"def mongo_backup(event, context):\n\n\n ec2 = ec2connect()\n\n #get list of instances with `Backup` tag\n inst = list_instances(ec2)\n backup_volume(ec2,inst)\n remove_old_snapshots(ec2)\n print \"Backup finished successfully!\"\n return True",
"def enable_all(self) -> None:\n self.permanently_disabled.clear()",
"def disable_restore(self):\r\n request_json = self._request_json_('Restore', False)\r\n\r\n flag, response = self._cvpysdk_object.make_request('POST', self._AGENT, request_json)\r\n\r\n if flag:\r\n if response.json() and 'response' in response.json():\r\n error_code = response.json()['response'][0]['errorCode']\r\n\r\n if error_code == 0:\r\n return\r\n elif 'errorString' in response.json()['response'][0]:\r\n error_message = response.json()['response'][0]['errorString']\r\n o_str = 'Failed to disable Backup\\nError: \"{0}\"'.format(error_message)\r\n raise SDKException('Agent', '102', o_str)\r\n else:\r\n raise SDKException('Response', '102')\r\n else:\r\n raise SDKException('Response', '101', self._update_response_(response.text))",
"def backup_volume(ec2,instances):\n\n for instance in instances:\n retention = get_retention(instance)\n if not is_master(instance['PrivateIpAddress']):\n #make snapshot only on primary\n continue\n\n for dev in instance['BlockDeviceMappings']:\n if dev.get('Ebs', None) is None:\n # skip non-EBS volumes\n continue\n\n retention = get_retention(instance)\n now = datetime.today()\n delete_date_days = (now + timedelta(days=retention['days'])).strftime('%Y-%m-%d')\n delete_date_weeks = (now + timedelta(weeks=retention['weeks'])).strftime('%Y-%m-%d')\n delete_date_months = (now + relativedelta(months=retention['months'])).strftime('%Y-%m-%d')\n desc_date = now.strftime('%Y-%m-%d.%H:%M:%S')\n\n\n # all mongo disks are sdf\n if dev['DeviceName'] == '/dev/sdf':\n vol_id = dev['Ebs']['VolumeId']\n\n # Make sure that only one snapshot is taken, whether daily, weekly or monthly.\n if now.strftime('%d') == '01':\n print \"Creating snapshot of %s volume that will be retain for %d months\" % (vol_id, retention['months'])\n snap = make_snapshot(ec2,vol_id, retention['months'], \"MongoMonthlyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_months)\n elif now.strftime('%a') == 'Sun':\n print \"Creating snapshot of %s volume that will be retain for %d weeks\" % (vol_id, retention['weeks'])\n snap = make_snapshot(ec2,vol_id, retention['weeks'], \"MongoWeeklyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_weeks)\n else:\n print \"Creating snapshot of %s volume that will be retain for %d days\" % (vol_id, retention['days'])\n snap = make_snapshot(ec2,vol_id, retention['days'], \"MongoDailyBackupSnapshot-\"+desc_date)\n tag_snapshot(ec2, snap['SnapshotId'], delete_date_days)\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns true if client_id and client_secrets set in file client_secrets
|
def has_client_secrets(client_secrets):
with open(client_secrets) as json_data:
secrets = json.load(json_data)['installed']
client_id = secrets['client_id']
client_secret = secrets['client_secret']
return not client_id.startswith('<GET') and not client_secret.startswith('<GET')
|
[
"def Check():\n try:\n credentials = json.loads(os.environ.get(Varname()))\n except json.decoder.JSONDecodeError as jderr:\n logging.warning(f\"CMCREDENTIALS not found in Check. {datetime.now()}.\")\n DefaultCredentials()\n return False\n\n if credentials[\"refreshtoken\"] != \"\":\n return True\n return False",
"def is_aws_cred_set():\n keys = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']\n return all(len(os.environ.get(k, '')) > 0 for k in keys)",
"def is_config_secret(k: str) -> bool:\n return k in _SECRET_KEYS.get()",
"def calc_env_var(client_secrets_path):\n with open(client_secrets_path, \"rb\") as fobj:\n client_secrets = json.load(fobj)\n with AuthFileManager() as afm:\n afm.set_data(\"client_secrets.json\", client_secrets)\n open_client(afm)\n return afm.to_env_var()",
"def _validate_clientsecrets(clientsecrets_dict):\n _INVALID_FILE_FORMAT_MSG = (\n 'Invalid file format. See '\n 'https://developers.google.com/api-client-library/'\n 'python/guide/aaa_client_secrets')\n\n if clientsecrets_dict is None:\n raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)\n try:\n (client_type, client_info), = clientsecrets_dict.items()\n except (ValueError, AttributeError):\n raise InvalidClientSecretsError(\n _INVALID_FILE_FORMAT_MSG + ' '\n 'Expected a JSON object with a single property for a \"web\" or '\n '\"installed\" application')\n\n if client_type not in VALID_CLIENT:\n raise InvalidClientSecretsError(\n 'Unknown client type: {0}.'.format(client_type))\n\n for prop_name in VALID_CLIENT[client_type]['required']:\n if prop_name not in client_info:\n raise InvalidClientSecretsError(\n 'Missing property \"{0}\" in a client type of \"{1}\".'.format(\n prop_name, client_type))\n for prop_name in VALID_CLIENT[client_type]['string']:\n if client_info[prop_name].startswith('[['):\n raise InvalidClientSecretsError(\n 'Property \"{0}\" is not configured.'.format(prop_name))\n return client_type, client_info",
"def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):\n pass",
"def bq_token_file_valid():\n token_path = bq_token_file_path()\n if token_path == '':\n raise ValueError(\n \"Please set GOOGLE_APPLICATION_CREDENTIALS to the path to the access token.\"\n )\n elif bq_token_file_path_exists(token_path) is False:\n raise ValueError(\n \"Token file could not be found. Please reset your GOOGLE_APPLICATION_CREDENTIALS env var. Current:\",\n token_path\n )\n else:\n return True",
"def use_only_authd(self):\n try:\n with open(common.api_config_path) as f:\n data = f.readlines()\n\n use_only_authd = list(filter(lambda x: x.strip().startswith('config.use_only_authd'), data))\n\n return loads(use_only_authd[0][:-2].strip().split(' = ')[1]) if use_only_authd != [] else False\n except IOError:\n return False",
"def is_credentials_available() -> bool:\n return all([v is not None for v in CONFIG.values()])",
"def has_secret_file(filename):\n secret_file = filename + secret_extension\n if os.path.isfile(secret_file):\n return True\n\n return False",
"def _credfile_exists(self):\n return os.path.exists(self.credfile_loc)",
"def check_dropbox():\n cfg = utils.get_project_configuration()\n if 'dropbox_app_key' not in cfg:\n logging.error(\"'dropbox_app_key' was not found.\")\n return False\n elif 'dropbox_app_secret' not in cfg:\n logging.error(\"'dropbox_app_key' was not found.\")\n return False\n else:\n return True",
"def secret_passed(self, digestor):\n if not self.a1:\n try:\n self.get_a1(digestor=digestor)\n except ValueError:\n return False\n \n assert self.a1 is not None\n \n client_secret = digestor.get_client_secret()\n server_secret = digestor.get_server_secret(a1=self.a1)\n return client_secret == server_secret",
"def get_client_credentials(self):\n if self.client_id == None or self.client_secret == None:\n raise Exception(\"You must set client_id and client_secret.\")\n else: \n client_creds = f\"{self.client_id}:{self.client_secret}\"\n client_creds_b64 = base64.b64encode(client_creds.encode())\n return client_creds_b64.decode()",
"def get_aws_client_id_and_secret(prod, test=False):\n if test:\n cur = get_db().cursor()\n stmt = 'SELECT api_key FROM credentials WHERE provider=?'\n client_id = cur.execute(stmt, ('aws_client_id', )).fetchone()[0]\n client_secret = cur.execute(\n stmt, ('aws_client_secret', )).fetchone()[0]\n return client_id, client_secret\n if prod:\n return (os.environ.get('AWS_CLIENT_ID', None),\n os.environ.get('AWS_CLIENT_SECRET', None))\n\n cur = get_db().cursor()\n stmt = \"SELECT api_key FROM credentials WHERE provider=%s\"\n cur.execute(stmt, ('aws_client_id', ))\n client_id = cur.fetchone()[0]\n cur.execute(stmt, ('aws_client_secret', ))\n client_secret = cur.fetchone()[0]\n return client_id, client_secret",
"def has_credentials (self):\n return True",
"def is_ipa_client_configured():\n return all(\n (\n os.path.isfile(\"/etc/ipa/default.conf\"),\n os.path.isfile(\"/var/lib/ipa-client/sysrestore/sysrestore.state\"),\n )\n )",
"def is_config(filename):\n filename = os.path.basename(filename)\n if filename in [\"server_config\"]:\n return True\n return False",
"def is_oauth(self):\n return self.app_id is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a directory of temporary files with file_id for virtualization of drive contents
|
def create_temp_files(temp_dir, files):
for drive_type, drive_files in files.items():
folder_path = os.path.join(temp_dir, drive_type + '/')
os.mkdir(folder_path)
for file_ in drive_files:
# replace reserved characters in title to assure valid filename
filename = KIOutils.strip_invalid_characters(file_['title'])
filename = '{}.{}'.format(os.path.join(temp_dir, folder_path, filename), drive_type)
with open(filename, 'w') as f:
f.write(file_['id'])
|
[
"def _make_temp_dir(self):\n temp_dir = Path(self.file_path.parent, self.file_path.name + '__tmp')\n temp_dir.mkdir(exist_ok=True, parents=True)\n self.temp_dir = temp_dir",
"def make_temp_dir():\n return tempfile.mkdtemp()",
"def create_temp_folder(self):\n return tempfile.mkdtemp()",
"def create_tmp_dir(self):\n return mkdtemp(dir=os.getcwd())",
"def create_temp_dir(self):\n path = tempfile.mkdtemp()\n if not isinstance(path, bytes):\n path = path.encode(\"utf8\")\n self.temp_dir = path",
"def _create_temp_dir():\n _temp_dir = os.getcwd() + \"/temp\"\n if not os.path.isdir(_temp_dir):\n os.makedirs(_temp_dir)\n return _temp_dir",
"def _create_working_folder(self):\n if self.working_folder is None:\n self.working_folder = tempfile.mkdtemp(\n dir=self.temp_dir\n )",
"def _create_tempdir(self):\n tempname = self.runner.kim_code_name+\"_running\"+self.result_code+\"__\"+self.runner.kim_code_id\n self.runner_temp = kimobjects.kim_obj(self.runner.kim_code, search=False, subdir=tempname)\n shutil.copytree(self.runner.path, self.runner_temp.path)",
"def _getTempSharedDir(self):\n tempDir = self.tempFilesDir\n for i in xrange(self.levels):\n tempDir = os.path.join(tempDir, random.choice(self.validDirs))\n if not os.path.exists(tempDir):\n try:\n os.mkdir(tempDir)\n except os.error:\n if not os.path.exists(tempDir): #In the case that a collision occurs and\n #it is created while we wait then we ignore\n raise\n return tempDir",
"def createTemp(self):\n try:\n self.tempDir = tempfile.mkdtemp('RAD')\n except IOError as e:\n self.raise_on_error(\"Create a temp folder\", e)\n\n createInTemp = lambda fileName: os.path.join(self.tempDir, fileName)\n self.inputRad = createInTemp('input.rad')\n self.octree = createInTemp('octree.oct')\n self.testRoom = createInTemp('testRoom.rad')\n\n with open(self.testRoom, 'w') as testRoom:\n testRoom.write(contextScene)",
"def create_file_in_persistent_dir(self, template_name, template):\n if not os.path.exists(self.persistent_dir):\n print('Creating docker volume dir')\n os.makedirs(self.persistent_dir)\n\n print('Using storage_dir of: %s' % self.storage_dir)\n\n # ensure only relative path here, replace all leading '/' with nothing\n if self.storage_dir.startswith('/'):\n self.storage_dir = re.sub('^/+', '', self.storage_dir)\n\n if len(self.storage_dir) == 0:\n self.storage_dir = 'docker_container_action'\n\n instance_path = os.path.join(self.persistent_dir, self.storage_dir)\n print('Using instance_dir of: %s' % instance_path)\n\n if not os.path.exists(instance_path):\n os.makedirs(instance_path)\n\n try:\n # if a template was specified then write it out into the working directory\n cleaned_template = template.replace('\\r\\n', '\\n')\n path = os.path.join(instance_path, template_name)\n with open(path, 'w+') as f:\n f.write(cleaned_template)\n\n except OSError as oe:\n print('Could not write file into docker container persistent dir')\n return",
"def create_testfile(remove_testdir, tmpdir, request):\n filename = getattr(request, 'param', generate_random_string())\n p = tmpdir.join(filename)\n p.write(generate_random_string(random.randint(1, 100)))\n\n yield tmpdir, filename",
"def create_tmp():\r\n\r\n return tempfile.mkstemp()[1]",
"def get_new_tmpdir(tmpdir_writingFiles):\n\n # define the already existing tmpdirs\n already_existing_files = set(os.listdir(tmpdir_writingFiles))\n\n # get the ID of the folder\n tmpdir_name = id_generator(15, chars=string.ascii_uppercase, already_existing_ids=already_existing_files)\n\n return \"%s/%s\"%(tmpdir_writingFiles, tmpdir_name)",
"def prepare_tmp_files(tmp_dir, gp, target_genome_fasta):\n tmp_tgt = os.path.join(tmp_dir, \"tmp_cgp\")\n tmp_ref = os.path.join(tmp_dir, \"tmp_ref\")\n tmp_psl = os.path.join(tmp_dir, \"tmp_psl\")\n cds = gp.get_cds(target_genome_fasta)\n with open(tmp_tgt, \"w\") as outf:\n outf.write(\">{}\\n{}\\n\".format(gp.name, cds))\n return tmp_tgt, tmp_ref, tmp_psl",
"def mount_ephemeral_storage():\r\n sudo_as(\"if [ ! $(mount | grep -i 'mnt') ];then mkfs.ext3 /dev/sdf && mount /dev/sdf /mnt; mkdir -p /mnt/%(application)s/var; chmod -R a+rw /mnt/%(application)s; fi\" % env)\r\n run('mkdir -p %(script_working_path)s' % env)",
"def make_tmp_file():\n with open(TMP_FILE, 'w') as f:\n pass\n yield\n os.remove(TMP_FILE)",
"def mkdtemp(suffix=\"\", prefix=template, dir=None):\r\n\r\n if dir is None:\r\n dir = gettempdir()\r\n\r\n names = _get_candidate_names()\r\n\r\n for seq in xrange(TMP_MAX):\r\n name = names.next()\r\n file = _os.path.join(dir, prefix + name + suffix)\r\n try:\r\n _os.mkdir(file, 0700)\r\n return file\r\n except OSError, e:\r\n if e.errno == _errno.EEXIST:\r\n continue # try again\r\n raise\r\n\r\n raise IOError, (_errno.EEXIST, \"No usable temporary directory name found\")",
"def _write_new_temp(self, d):\n if d:\n file_utils.safe_create_dir(d)\n ext = MIMETYPES.guess_extension(self.content_type() or '')\n # Exceptions because mimetypes is apparently REALLY OLD\n if ext in {'.jpe', '.jfif'}:\n ext = '.jpg'\n fd, fp = tempfile.mkstemp(\n suffix=ext or '',\n dir=d\n )\n os.close(fd)\n with open(fp, 'wb') as f:\n f.write(self.get_bytes())\n return fp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a list of n numbers in logx scale from x1 to x2.
|
def logx_grid(x1, x2, n):
# the shape if a*x^n. if n=0 => a=x1, if n=N => x1*x^N=x2
if x1 > 0:
xx = (x2 / x1)**(1.0 / n)
return [x1] + [x1 * xx**(i+1) for i in range(1, n)]
else:
xx = x2**(1.0/n)
return [x1] + [xx**(i+1) - 1 for i in range(1, n)]
|
[
"def logrange(first=1.0, times=10, multiplier=0.1):\n return [first * multiplier**i for i in range(times)]",
"def _log2(n):\n while len(_logtable) <= n:\n _logtable.extend([1 + _logtable[-1]] * len(_logtable))\n return _logtable[n]",
"def logrange(start: float, stop: float, num=50, base=10) -> np.ndarray:\n log = np.log\n if start == 0:\n start = 0.000000000001\n return np.logspace(log(start, base), log(stop, base), num, base=base)",
"def logspace(low, high, fineness):\n # Note: lambdas don't play nice with pickling (and thence ipyparallel)\n # def pow10(x):\n # return 10**x\n return AxisStops(np.logspace(np.log10(low), np.log10(high),\n num=abs(int((np.log10(high)-np.log10(low))\n * 5*fineness/np.log10(10))),\n #5*fineness per decade\n base=10,\n dtype=sinn.config.floatX),\n 'log',\n 'x -> np.log10(x)', 'x -> 10**x')",
"def add_logs(log1, log2):\n return log1 + math.log(1 + pow(2, (log2 - log1)), 2)",
"def log2(x):\n pass",
"def log2(x):\n ln2 = torch.log(torch.FloatTensor([2.0]))\n if x.is_cuda:\n ln2 = ln2\n return torch.log(x) / ln2",
"def logs(x, eps=1e-30):\n return sympy.log(x + eps)",
"def loglinspace(Ntot, Nlog, Nlin):\n raise NotImplementedError(\"Doesn't do as advertised....docstring is out of sync.\")\n end = np.log10(Ntot)\n log_step = end/Nlog\n # e.g. np.log10(1e8)/4 -> step=2, ends=(2, 4, 6, 8)\n log_ends = np.arange(log_step, end+1, log_step)\n save_time_arrs = [np.linspace(0, np.power(10, e), Nlin) for e in log_ends]\n t_save = reduce(np.union1d, save_time_arrs)\n save_i = [where(t, t_save) for t in save_time_arrs]\n return t_save, save_i",
"def log(self, base: float = math.e) -> Series:",
"def Lin2Log(x, ratio=1.0, basis=1e3):\n import math\n level = abs(log10(x/basis))*ratio\n return level",
"def gen_vars_loguniform(nsets, min_, max_, n, round_to_int=False):\r\n periods = np.exp(np.random.uniform(low=np.log(min_), high=np.log(max_),\r\n size=(nsets, n)))\r\n if round_to_int:\r\n return np.rint(periods).tolist()\r\n else:\r\n return periods.tolist()",
"def logLinear(self, x, xo, yo):\n logX = np.log(x)\n logXo = np.log(xo)\n logYo = np.log(yo)\n return np.exp(np.interp(logX, logXo, logYo))",
"def log2(s: Series):\n return np.log2(s)",
"def scale_log(self) -> None:\n # Problem are probabilities below 1\n self.values = [log(1.01 + x, 2) for x in self.values]",
"def sample_log(min_value, max_value, n):\n a = np.log10(min_value)\n b = np.log10(max_value)\n\n log_r = np.random.random(size=n) # between [0, 1)\n log_r = (b - a) * log_r + a # scale up\n\n r = np.power(10, log_r)\n\n return r",
"def log_prob(list):\n p=0\n for i in list:\n p += math.log10(i)\n return math.exp(p)",
"def lnprob22Nlog(x):\n global resid_f,alphaab,gmat,meta,cpn\n\n log10Ared,alphared = x[2::2],x[3::2]\n\n if x[0] > 0 and (alpha_min < x[1] < alpha_max) and N.all((alphared > alphared_min) & (alphared < alphared_max)):\n return (logL2(resid_f,alphaab,times_f,gmat,meta,cpn,A=x[0],alpha=x[1],Ared=10**log10Ared,alphared=alphared)\n - math.log(alpha_max - alpha_min)\n - len(alphared) * math.log(alphared_max - alphared_min))\n else:\n return -N.inf",
"def base_two_log(n):\n if n < 2:\n return 0\n else:\n return 1+base_two_log(n//2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the vertex (x,y) of a parabola of the type ax2 + bx + c.
|
def _vertex_parabola(a, b, c):
return -b/(2*a), - (b**2 - 4*a*c) / (4*a)
|
[
"def _parabola_3points(x1, y1, x2, y2, x3, y3):\n delta = (x1 - x2)*(x1 - x3)*(x2 - x3)\n a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / delta\n b = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / delta\n c = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / delta\n return a, b, c",
"def quadratic_vertex(x, y):\n q = _quadratic(x, y)\n return -q.c[1] / (2 * q.c[0])",
"def _parabola(data):\n y = np.asarray(data)\n x = np.linspace(-1, 1, len(y))\n # use only the endpoints; when trying to use the mean of the last few values, the\n # fit is usually not as good since beads expects the endpoints to be 0; may allow\n # setting mean_width as a parameter later\n A = y.min()\n y1 = y[0] - A\n y2 = y[-1] - A\n # mean_width = 5\n # y1 = y[:mean_width].mean() - A\n # y2 = y[-mean_width:].mean() - A\n\n # if parabola == p(x) = A + B * x + C * x**2, find coefficients such that\n # p(x[0]==x1) = y[0] - min(y)==y1, p(x[-1]==x2) = y[-1] - min(y)==y2, and p(x_middle==0) = 0:\n # A = min(y)\n # C = (x1 * y2 - x2 * y1) / (x1 * x2**2 - x2 * x1**2)\n # B = (y1 - C) / x1\n # then replace x1 with -1, x2 with 1, and simplify\n C = (y2 + y1) / 2\n B = C - y1\n\n return A + B * x + C * x**2",
"def vertex_coordinates(self): \n v1,v2 = self.__vertices\n return [v1.coordinate(), v2.coordinate()]",
"def xy_polytope(self):\n V = lcon_to_vert(self.A, self.b)\n if V is not None and V.size > 0:\n hull = ConvexHull(V[:2,:].T)\n return V[:2,hull.vertices]\n else:\n # print \"Infeasible polytope\"\n return np.zeros((2,0))",
"def to_parabola(self) -> None:\n if not self.is_parabola:\n raise ValueError(f\"The underlying geometry is not a parabola: {self.type}\")\n raise NotImplementedError",
"def barycentric_coords(p, simplex):\n return np.linalg.solve(pad1(simplex).T, pad1(p).T).T",
"def graph_point(self, x, y):\n \n return (self.graph_x(x), self.graph_y(y))",
"def triangle(A,B,a,b,c):\n \n x = (a**2-b**2-c**2)/(-2*c)\n \n y1 =np.sqrt(b**2-x**2) #np.sqrt(a**2-(x-b)**2) #\n y2 = -y1\n \n # Transformation into actual coordinate system\n \n AB = B-A\n ABperp = np.array([-AB[1], AB[0], np.zeros(len(A[2]))])\n C1 = A + x/c * AB + y2/c * ABperp\n C2 = A + x/c * AB - y2/c * ABperp\n \n return C1, C2",
"def getPoint(self) -> \"SbVec3f const &\":\n return _coin.SoPrimitiveVertex_getPoint(self)",
"def reconstruct_vertex(x1,x2,x3,x4,\n y1,y2,y3,y4): \n x_part1 = (x1*x3*(y2 - y4) + x1*x4*(y3 - y2) + x2*x3*(y4 - y1) + x2*x4*(y1 - y3))\n x_part2 = ((x1 - x2)*(y3 - y4) + x3*(y2 - y1) + x4*(y1 - y2)) \n x = x_part1 / x_part2\n y_part1 = (x1*y2*y3 - x1*y2*y4 + x2*y1*(y4 - y3) - x3*y1*y4 + x3*y2*y4 + x4*y3*(y1 - y2))\n y_part2 = ((x1 - x2)*(y3 - y4) + x3*(y2 - y1) + x4*(y1 - y2))\n y = y_part1 / y_part2\n\n return np.array(x) , np.array(y)",
"def create_point(point_x, point_y, vertex_x, edge_y, scaling_factor=2):\n # TODO: Geometric mean??\n return (point_x + vertex_x) / scaling_factor, (point_y + edge_y) / scaling_factor",
"def parabola(list1, list2, list3, plo=False, pri=False, **kwargs):\n import matplotlib.pyplot as mp\n import numpy as np\n [x1, y1] = list1\n [x2, y2] = list2\n [x3, y3] = list3\n D = x1**2 * (x2 - x3) + x2**2 * (x3 - x1) + x3**2 * (x1 - x2)\n C = np.array([x2 - x3, x3**2 - x2**2, x2 * x3 * (x2 - x3),\n x3 - x1, x1**2 - x3**2, x3 * x1 * (x3 - x1),\n x1 - x2, x2**2 - x1**2, x1 * x2 * (x1 - x2)]\n ).reshape(3, 3)\n yarr = np.array([y1, y2, y3])\n I = C.T / D\n [a, b, c] = np.dot(I, yarr)\n label = str(a) + 'x^2 + ' + str(b) + 'x + ' + str(c)\n if plo:\n x = np.linspace(x1, x3, 101)\n y = a * x**2 + b * x + c\n mp.plot(x, y, label=label, **kwargs)\n if pri:\n print label\n return a, b, c",
"def get_xy_velocity(posa,posb,v):\n rest = posa-posb\n m = magnitude(rest)\n vx = (v * rest[0])/m\n vy = (v * rest[1])/m\n if m < scout_near:\n return vx * scout_velocity_decay*m/scout_near,vy * scout_velocity_decay*m/scout_near\n return vx,vy",
"def parabolaconstant(self):\n if self.g1 and self.g2 and self.curvecheck:\n return ((self.g2-self.g1)/(2*self.curvecheck()))",
"def calculate_vertices(self):\n scale = self.__scale\n array = [\n (0 * scale, -1 / math.sqrt(3) * scale, 0 * scale),\n (0.5 * scale, 1 / (2 * math.sqrt(3)) * scale, 0 * scale),\n (-0.5 * scale, 1 / (2 * math.sqrt(3)) * scale, 0 * scale),\n (0 * scale, 0 * scale, math.sqrt(2 / 3) * scale)\n ]\n return array",
"def get_c(a, b):\r\n return np.sqrt(a * a + b * b)",
"def in_triangleplane_coords(vertices, v):\n b = basis(vertices)\n v2 = numpy.zeros(2)\n for i in range(2):\n v2[i] = numpy.dot(v, b[i])\n return v2",
"def point(x=0.,y=0.,z=0.):\n return Formex([[[x,y,z]]])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parabola through 3 points.
|
def _parabola_3points(x1, y1, x2, y2, x3, y3):
delta = (x1 - x2)*(x1 - x3)*(x2 - x3)
a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / delta
b = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / delta
c = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / delta
return a, b, c
|
[
"def _vertex_parabola(a, b, c):\n return -b/(2*a), - (b**2 - 4*a*c) / (4*a)",
"def _parabola(data):\n y = np.asarray(data)\n x = np.linspace(-1, 1, len(y))\n # use only the endpoints; when trying to use the mean of the last few values, the\n # fit is usually not as good since beads expects the endpoints to be 0; may allow\n # setting mean_width as a parameter later\n A = y.min()\n y1 = y[0] - A\n y2 = y[-1] - A\n # mean_width = 5\n # y1 = y[:mean_width].mean() - A\n # y2 = y[-mean_width:].mean() - A\n\n # if parabola == p(x) = A + B * x + C * x**2, find coefficients such that\n # p(x[0]==x1) = y[0] - min(y)==y1, p(x[-1]==x2) = y[-1] - min(y)==y2, and p(x_middle==0) = 0:\n # A = min(y)\n # C = (x1 * y2 - x2 * y1) / (x1 * x2**2 - x2 * x1**2)\n # B = (y1 - C) / x1\n # then replace x1 with -1, x2 with 1, and simplify\n C = (y2 + y1) / 2\n B = C - y1\n\n return A + B * x + C * x**2",
"def parabola(list1, list2, list3, plo=False, pri=False, **kwargs):\n import matplotlib.pyplot as mp\n import numpy as np\n [x1, y1] = list1\n [x2, y2] = list2\n [x3, y3] = list3\n D = x1**2 * (x2 - x3) + x2**2 * (x3 - x1) + x3**2 * (x1 - x2)\n C = np.array([x2 - x3, x3**2 - x2**2, x2 * x3 * (x2 - x3),\n x3 - x1, x1**2 - x3**2, x3 * x1 * (x3 - x1),\n x1 - x2, x2**2 - x1**2, x1 * x2 * (x1 - x2)]\n ).reshape(3, 3)\n yarr = np.array([y1, y2, y3])\n I = C.T / D\n [a, b, c] = np.dot(I, yarr)\n label = str(a) + 'x^2 + ' + str(b) + 'x + ' + str(c)\n if plo:\n x = np.linspace(x1, x3, 101)\n y = a * x**2 + b * x + c\n mp.plot(x, y, label=label, **kwargs)\n if pri:\n print label\n return a, b, c",
"def parabolaconstant(self):\n if self.g1 and self.g2 and self.curvecheck:\n return ((self.g2-self.g1)/(2*self.curvecheck()))",
"def polynomiale_carre(a:int, b:int, c:int, x: float) -> float:\n return a * pow(x, 4) + b * pow(x, 2) + c",
"def He3_cross(v,P,T,L): \n PdT=P/T\n return PdT*L*8606.3/v",
"def to_parabola(self) -> None:\n if not self.is_parabola:\n raise ValueError(f\"The underlying geometry is not a parabola: {self.type}\")\n raise NotImplementedError",
"def cross_product(self, p1, p2, p3):\n x1 = p2[0] - p1[0]\n y1 = p2[1] - p1[1]\n x2 = p3[0] - p1[0]\n y2 = p3[1] - p1[1]\n return x1 * y2 - x2 * y1",
"def polynomiale(a: float, b: float, c: float, d: float, x: float) -> float:\n return a*x*x*x + b*x*x + c*x + d",
"def lj_p(r_a):\r\n \r\n func = ((r_a)**(-12)-(r_a)**(-6))\r\n \r\n return func",
"def cubic(xx: np.ndarray) -> np.ndarray:\n return xx**3",
"def __mul__(self, pVal):\n return _almathswig.Velocity3D___mul__(self, pVal)",
"def polynomiale(a:int, b:int, c:int, d:int, x: float) -> float:\n return a * pow(x, 3) + b * pow(x, 2) + c * x + d",
"def __mul__(self, pVal):\n return _almathswig.Position3D___mul__(self, pVal)",
"def lorentzian(params, x):\n return params[0] + params[1] / ((x - params[2]) ** 2 + (0.5 * params[3]) ** 2)",
"def computePointsC(self):\n\n self.P = self.J.dot(self.C)\n self.P0 = self.J0.dot(self.C)",
"def my_cube (x):\n return (x**3)",
"def cube(x):\r\n return x*x*x",
"def cube(arg):\n return round(arg**3, 3)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find first root of f=f(x) for data sets. Given two lists x and f, it returns the value of xstar for which f(xstar) = fstar. Raises an ValueError if no root is found.
|
def feqc(x, f, fstar):
s = f[0] - fstar
for i in range(min(len(x), len(f))):
if (f[i] - fstar) * s < 0.0:
# Linear interpolation
dxf = (f[i] - f[i-1]) / (x[i] - x[i-1])
xstar = x[i-1] + (fstar - f[i-1]) / dxf
istar = i
return xstar, istar
# We get to the end and cannot find the root
return None, None
|
[
"def rootof(f, x, index=None, radicals=True, expand=True):\n return CRootOf(f, x, index=index, radicals=radicals, expand=expand)",
"def calculate_root(f: Polynomial, a, b, eps):\n assert f(a)*f(b) < 0\n\n df = f.deriv()\n\n def newtons_lambda(x):\n return -1 / df(x)\n\n return sim.calculate_root(f, newtons_lambda, a, b, eps)",
"def root_find(s, t, t0, t1, func, tol=1e-08, eps=1e-16):\n # a,b,c: abscissae fa,fb,fc: corresponding function values\n a = t0\n b = t1\n c = a\n\n fa = func(a)\n fb = func(b)\n fc = fa\n\n # Main iteration loop\n n_iter = 0\n while True:\n n_iter += 1\n prev_step = b-a # Distance from the last but one to the last approx.\n tol_act = 0.0 # Actual tolerance\n new_step = 0.0 # Step at this iteration\n\n # Interpolation step is calculated in the form p/q\n # division operations is delayed until the last moment\n p = 0.0\n q = 0.0\n\n if numpy.abs(fc) < numpy.abs(fb):\n # Swap data for b to be the best approximation\n a = b\n b = c\n c = a\n fa = fb\n fb = fc\n fc = fa\n\n tol_act = 2.0*eps*numpy.abs(b) + tol/2.0\n new_step = (c - b)/2.0\n\n # Acceptable approximation found ?\n if numpy.abs(new_step) <= tol_act or fb == 0.0:\n root = b\n value = fb\n print 'finished after {} iterations.'.format(n_iter)\n return (root, value)\n\n # Interpolation may be tried if prev_step was large enough and in true direction\n if numpy.abs(prev_step) >= tol_act and numpy.abs(fa) > numpy.abs(fb):\n cb = c-b\n\n if a == c:\n # If we have only two distinct points, linear interpolation can only be applied\n t1 = fb / fa\n p = cb * t1\n q = 1.0 - t1\n else:\n # Inverse quadratic interpolation\n q = fa/fc\n t1 = fb/fc\n t2 = fb/fa\n p = t2 * (cb*q*(q - t1) - (b - a)*(t1 - 1.0))\n q = (q - 1.0) * (t1 - 1.0) * (t2 - 1.0)\n\n # p was calculated with the opposite sign make p positive and assign possible minus to q\n if p > 0.0:\n q = -q\n else:\n p = -p\n\n # If b+p/q falls in [b,c] and isn't too large, it is accepted\n # If p/q is too large then the bisection procedure can reduce [b,c] range to a larger extent\n if (p < 0.75*cb*q - numpy.abs(tol_act*q)/2.0\n and p < numpy.abs(prev_step*q/2.0)):\n new_step = p/q\n\n # Adjust the step to be not less than tolerance\n if numpy.abs(new_step) < tol_act:\n if new_step > 0.0:\n new_step = tol_act\n else:\n new_step = -tol_act\n\n # Save the previous approximate\n a = b\n fa = fb\n\n # Do step to a new approximation\n b += new_step\n fb = func(b)\n\n # Adjust c for it to have a sign opposite to that of b\n if (fb > 0 and fc > 0) or (fb < 0 and fc < 0):\n c = a\n fc = fa",
"def solve(f, x0=-BOUNDARY, x1=BOUNDARY, epsilon=EPSILON):\n\n if f(x0)*f(x1) <= 0:\n x = (x0 + x1) / 2\n while abs(f(x)) > epsilon:\n\n if not f(x0):\n return x0\n elif not f(x1):\n return x1\n\n x = (x0 + x1) / 2\n if f(x)*f(x0) < 0:\n x1 = x\n elif f(x)*f(x1) < 0:\n x0 = x\n return x\n\n else:\n return None",
"def find_roots(E, v0):\n e = E[E <= v0]\n r0 = np.unique(e.round())\n r = root(f, r0, args=(v0))\n return r[\"x\"]",
"def newtonraphson_method(f, x0, epsilon=10**-4, nMax=100):\n n = 1\n f_ = derive(f)\n while n <= nMax:\n if (f_(x0)==0):\n print(\"Error!, division by zero.\")\n return\n x1 = x0 - (f(x0) / f_(x0))\n print(\"x0: {}, x1: {}\".format(x0, x1))\n if (x1-x0<epsilon):\n print(\"\\nThe root is: {}\".format(x1))\n return x1\n else:\n x0=x1\n return False",
"def test_exact_root(self):\n f = lambda x: x**2 - 1.\n brackets = bracket_root(f,0.,init_step=1.)\n assert_inrange(1.,brackets)\n assert brackets == (0.,2.)",
"def solve_newton(f, df, x0, epsilon=1E-8, max_iter=100):\n xn = x0\n for n in range(0, max_iter):\n fxn = f(xn)\n if abs(fxn) < epsilon:\n return xn\n dfxn = df(xn)\n if dfxn == 0: # avoid zero derivatives\n xn = xn + 1E-3\n continue\n xn = xn - fxn / dfxn\n return None",
"def solve(f, x0=-10000, x1=10000, epsilon=EPSILON):\n\n def binary_solve(g, starting_x, ending_x):\n \"\"\"\n binary search to find solution in log time.\n checks for a solution between starting_x to ending_x\n :return: an x such a |f(x)|< epsilon (in float)\n \"\"\"\n while starting_x <= ending_x:\n mid_point = (starting_x + ending_x) / 2\n if math.isnan(mid_point):\n return mid_point\n if g(mid_point) <= -epsilon:\n # if we are lower then -epsilon we need to search right side\n starting_x = mid_point\n elif g(mid_point) >= epsilon:\n # if we are higher then epsilon we need to search left side\n ending_x = mid_point\n else:\n # if both are false we are in range, and therefore it's the\n # solution\n return mid_point\n return ending_x if (f(ending_x) < f(starting_x)) else starting_x\n\n if not f(x0) * f(x1) < 0:\n return None\n # If we have a monotonic up function we can use it for binary_solve.\n # if it is monotonic down, we take (-f) which is monotonic up with the same\n # solution\n if f(x1) > f(x0):\n return binary_solve(f, x0, x1)\n else:\n minus_f = reverse_function(f)\n return binary_solve(minus_f, x0, x1)",
"def get_current_node(self):\n # TODO: Return the node in the open set with the lowest value of f(node).\n dic={}\n for node in self.openSet:\n dic[node]=self.calculate_fscore(node)\n return min(dic, key=dic.get)",
"def findRoot(x, power, epsilon):\n if x < 0 and power%2 == 0: #Negative number has no even-powered \n #roots\n return None\n low = min(-1.0, x)\n high = max(1.0, x)\n ans = (high + low)/2.0\n while abs(ans**power - x) >= epsilon:\n if ans**power < x:\n low = ans\n else:\n high = ans\n ans = (high + low)/2.0\n return ans",
"def findroot(x, power, epsilon):\n if x < 0 and power%2 == 0:\n return None\n low = min(-1, x)\n high = max (1.0, x)\n ans = (low+high)/2.0\n while abs(ans**power - x) >= epsilon:\n if ans**power < x:\n low = ans\n else:\n high = ans\n ans = (low+high)/2.0\n return ans",
"def findMinimum(f, left, right, minInterval=3e-8):\n# replace this at some point by some better method (Num. Recip. in C, 394f)\n# -- this is easy to fool and massively suboptimal.\n\tmid = (right+left)/2.\n\toffset = (right-left)/4.\n\tif offset<minInterval:\n\t\treturn mid\n\tif f(left+offset)<=f(mid+offset):\n\t\treturn findMinimum(f, left, mid, minInterval)\n\telse:\n\t\treturn findMinimum(f, mid, right, minInterval)",
"def newton_sqrt(self,f,x0, prec):\n z = x0\n try:\n x = f.parent().variable_name()\n if x!='a' : #this is to distinguish between extensions of Qp that are finite vs. not\n S = f.base_ring()[[x]]\n x = S.gen()\n except ValueError:\n pass\n z = x0\n loop_prec = (log(RR(prec))/log(RR(2))).ceil()\n for i in range(loop_prec):\n z = (z+f/z)/2\n try:\n return z + O(x**prec)\n except (NameError,ArithmeticError,TypeError):\n return z",
"def compute_root(poly, x_0, epsilon):\n evalution = evaluate_poly(poly, x_0)\n iterations_n = 1\n while(abs(evalution) > epsilon):\n x_1 = x_0 - evaluate_poly(poly, x_0)/evaluate_poly(compute_deriv(poly), x_0)\n evalution = evaluate_poly(poly, x_1)\n x_0 = x_1\n iterations_n += 1\n return (x_0, iterations_n)",
"def solve(fvals, x0, debug=False):\n\tprint \"Initial guess: x = %22.15e\" % x0\n\tx = x0\n\tkmax = 20 \n\ttol = 1.e-14\n\tfor k in range(kmax):\n\t\tx0 = x\n\t\tf, fp = fvals(x)\n\t\tx = x0 - f/fp\n\t\tif debug:\n\t\t\tprint \"After %s iterations, x = %22.15e\" % (k+1,x)\n\t\tdelta_x = x - x0\n\t\tif abs(delta_x / x) < tol:\n\t\t\tbreak\n\t# if debug:\n\t# \tprint \"solve returns x = %22.15e after %s iterations \\\n\t# \t\t the value of f(x) is %22.15e\" % (x, k+1, f) \n\treturn x, k",
"def newton1d(f, df, ddf, x, niter=10):\n#raise NotImplementedError(\"Problem 3 Incomplete\")\n if np.isclose(df(x),0) or niter == 0:\n return x\n elif np.isclose(ddf(x),0) :\n raise ValueError(\"Division by zero occured.\")\n else :\n return newton1d(f,df,ddf,x-df(x)/float(ddf(x)),niter-1)",
"def root_jsd(x, y):\n try:\n assert isinstance(x, list)\n assert isinstance(y, list)\n except:\n # For individual inputs\n m = (x + y) / 2\n try:\n assert m > 0\n except AssertionError:\n return 0\n else:\n kld_x = x * log(x / m)\n kld_y = y * log(y / m)\n rJSD = sqrt(0.5 * kld_x + 0.5 * kld_y)\n else:\n # For list inputs\n m = [(a + b) / 2 for a, b in zip(x, y)]\n try:\n assert m > 0\n except AssertionError:\n return 0\n else:\n kld_x = entropy(x, m)\n kld_y = entropy(y, m)\n rJSD = sqrt(0.5 * kld_x + 0.5 * kld_y)\n return rJSD",
"def zero_fsolve(f, x0, *args):\n return fsolve(lambda x: f(x), x0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Callback to filter particles by species. The input species can be an integer (particle id), a string (particle name), or None. In this latter case, all particles are returned.
|
def filter_species(system, species):
s = copy.copy(system)
if species is not None:
s.particle = [p for p in system.particle if p.species == species]
return s
|
[
"def particles(self, selection_func=None):\n if selection_func is None:\n return self.particles_\n else:\n return filter(selection_func, self.particles_)",
"def GetParticles(self, time, species,location=None):\n\n # checking parameters\n\n # check the species\n\n species_elec = ['electrons','electron']\n species_ions = ['protons','proton','ions','ion']\n\n if species.lower() not in species_elec + species_ions:\n print 'Error, unknow species \"%s\"' %(species)\n return None\n\n\n # check the location\n if location != None:\n if isinstance(location, collections.Iterable) == False:\n print 'Error, location should be [(x0,y0,x1,y1),]'\n return None\n else:\n x0 = []\n x1 = []\n z0 = []\n z1 = []\n for loc in location:\n if loc[2] <= loc[0]:\n print 'Error x1 <= x0 (%5.3f <= %5.3f)' % (loc[2],loc[0])\n return None\n if loc[3] <= loc[1]:\n print 'Error z1 <= z0 (%5.3f <= %5.3f)' % (loc[3],loc[1])\n return None\n\n x0.append(loc[0])\n x1.append(loc[2])\n z0.append(loc[1])\n z1.append(loc[3])\n\n\n #get the time in wpe^-1 units\n timewpe = time*self.masses['ions']*self.wpewce\n\n # build the filename of the first proc file\n filename = os.path.join(self.path,'parts-%05d-p000.dat' %round(timewpe))\n\n if os.path.isfile(filename) == False:\n print 'Error - no particle file at that time'\n return None\n\n\n # grab the number of particles\n f = open(filename,'rb')\n f.seek(-5*4,2) # 2 means 'from the end', -2*3 means 6 bytes backward\n # the last byte is the size of file (fortran)\n data = np.fromfile(f, dtype=np.int32, count=5) # number of particles\n nsp = data[0:2] # size of the particle arrays\n nsact = data[2:4] # actual number of particles\n f.close()\n\n # we need to find how many processors there are\n # we proceed with the following method : \n # first locate all parts-* files\n # then take the first in the list and find its time\n # then look at how many of file at this time there are in the list\n # that's the number of processors...\n\n\n # list all particle files\n allfiles = glob.glob(os.path.join(self.path,'parts-*'))\n if len(allfiles) == 0:\n print 'Error, no particle file found'\n return None\n\n # take the first\n firstfile = os.path.basename(allfiles[0])\n twpe = re.findall(r'[0-9]{5}',firstfile)[0]\n allproc = glob.glob(os.path.join(self.path,'parts-%s-*' % (twpe)))\n nbproc = len(allproc)\n\n # now we want to check whether some times miss some procs\n # so lets take the first proc of all times\n alltimes = glob.glob(os.path.join(self.path,'parts-*-p000.dat'))\n\n #then for each time grab the time and count the procs\n # they should be equal to nbproc\n for f in alltimes:\n name = os.path.basename(f)\n twpe = re.findall(r'[0-9]{5}',name)[0]\n allproc = glob.glob(os.path.join(self.path,'parts-%s-*' % (twpe)))\n nb = len(allproc)\n if nb != nbproc:\n print 'time %d does not have all proc files' %(int(twpe))\n return None\n\n\n # all right from now on we know how many procs there are\n # and also that all times have all proc files\n\n # fortran file format :\n # write(30)it,dt,teti,xmax,zmax,nx,nz,x,z,vx,vy,vz,nsp,nsact\n\n allproc = glob.glob(os.path.join(self.path,'parts-%05d-*' % (round(timewpe))))\n firstproc = True\n\n offset0 = 4+ 4 + 4 + 4 +2*4+2*4\n\n # these lists will contains the positions of the particles\n # for each location\n xploc = []\n zploc = []\n vxploc = []\n vyploc = []\n vzploc = []\n\n\n for f in allproc:\n fn = os.path.basename(f)\n fp = open(f,'rb')\n\n print '%s is now open' % (fn)\n\n if species.lower() in species_ions:\n print 'reading ions...'\n # pass the stuff before the particle arrays\n offset = offset0\n fp.seek(offset,os.SEEK_SET)\n xp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass electrons\n offset = offset0 + 4*nsp[0]*2\n fp.seek(offset,os.SEEK_SET)\n zp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass electrons\n offset = offset0 + 4*nsp[0]*4\n fp.seek(offset,os.SEEK_SET)\n vxp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass electrons\n offset = offset0 + 4*nsp[0]*6\n fp.seek(offset,os.SEEK_SET)\n vyp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass electrons\n offset = offset0 + 4*nsp[0]*8\n fp.seek(offset,os.SEEK_SET)\n vzp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n\n\n elif species.lower() in species_elec:\n print \"readling electrons...\"\n # pass the stuff before the particle arrays\n offset = offset0 + 4*nsp[0]\n fp.seek(offset,os.SEEK_SET)\n xp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass ions\n offset = offset0 + 4*nsp[0]*3\n fp.seek(offset,os.SEEK_SET)\n zp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass ions\n offset = offset0 + 4*nsp[0]*5\n fp.seek(offset,os.SEEK_SET)\n vxp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass ions\n offset = offset0 + 4*nsp[0]*7\n fp.seek(offset,os.SEEK_SET)\n vyp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n # pass ions\n offset = offset0 + 4*nsp[0]*9\n fp.seek(offset,os.SEEK_SET)\n vzp = np.fromfile(fp, dtype=np.float32,count=nsp[0])\n\n # then we have to put the coordinates in ion units\n xp /= np.sqrt(self.masses['ions'])\n zp /= np.sqrt(self.masses['ions'])\n\n # caution : particle position is defined in a box\n # with z=0 at the center, so let's put zp back in [0,zmax]\n zp += self.domsize[1]\n\n #reset to the first location\n iloc = 0\n\n # loop on all locations where particles are selected\n for x0i,z0i,x1i,z1i in zip(x0,z0,x1,z1):\n\n print \"looking for particles in (%5.3f,%5.3f,%5.3f,%5.3f) in file %s \"% (x0i,z0i,x1i,z1i,fn)\n\n # ok now we have to take those electrons which are satisfy\n # our criteria, location etc.\n if location != None:\n idp = np.where((xp > x0i) & (xp < x1i) \\\n & (zp > z0i) & (zp < z1i))[0]\n\n print \"ok found %d particles here\" %(idp.size)\n\n # if we read the first processor\n # the arrays are not yet defined\n if firstproc == True:\n xploc.append(xp[idp])\n zploc.append(zp[idp])\n vxploc.append(vxp[idp] * self.wpewce * np.sqrt(self.masses['ions']))\n vyploc.append(vyp[idp] * self.wpewce * np.sqrt(self.masses['ions']))\n vzploc.append(vzp[idp] * self.wpewce * np.sqrt(self.masses['ions']))\n\n\n # but if it is not the first processor, \n # the arrays are know already so we want to concatenate\n # new data to them\n else:\n xploc[iloc] = np.concatenate((xploc[iloc],xp[idp]))\n zploc[iloc] = np.concatenate((zploc[iloc],zp[idp]))\n vxploc[iloc] = np.concatenate((vxploc[iloc],vxp[idp] * self.wpewce * np.sqrt(self.masses['ions'])))\n vyploc[iloc] = np.concatenate((vyploc[iloc],vyp[idp] * self.wpewce * np.sqrt(self.masses['ions'])))\n vzploc[iloc] = np.concatenate((vzploc[iloc],vzp[idp] * self.wpewce * np.sqrt(self.masses['ions'])))\n\n # next location\n iloc += 1\n\n # that's it, we've got all particles from all locations for this\n # processor, let's close the file and loop to the next\n firstproc = False\n fp.close()\n\n\n # We've got all particles from all location and all processors\n # ok so now let's loop over the locations and group the data into\n # Particles objects\n\n list_particles = []\n\n for i in range(len(location)):\n\n # make a r[2,nbpart] and v[3,nbpart] to fit in Particle constructor\n r_p = np.zeros((2,xploc[i].size), dtype=np.float32)\n v_p = np.zeros((3,vxploc[i].size),dtype=np.float32)\n\n r_p[0,:] = xploc[i]\n r_p[1,:] = zploc[i]\n v_p[0,:] = vxploc[i]\n v_p[1,:] = vyploc[i]\n v_p[2,:] = vzploc[i]\n\n list_particles.append(particles.Particles(r_p,v_p,species))\n\n\n # ok we got the list now return it\n return list_particles",
"def get_species(self, species):\n\n atoms = [site for site in self.sites\n if site.species_string == species]\n\n return atoms",
"def _iter_particles(self, names=None):\n if names is None:\n for node in self._root.particle._f_iter_nodes():\n yield self._get_particles(node._v_name)\n else:\n for name in names:\n if name in self._get_child_names(self._root.particle):\n yield self._get_particles(name)",
"def get_all_species(self):\n return self.species_list",
"def _filter_species(parsed):\n coreactants, catalysts, other_species, _ = parsed\n combined = [d['Species'] for d in coreactants] + [d['Species'] for d in catalysts]\n # if not coreactants or catalysts found, return unchanged\n if not combined:\n return other_species\n\n else:\n unaccounted = []\n combined = ' '.join(combined)\n for species in other_species:\n found = re.search(re.escape(species), combined) # include individual tokens for multi-token names\n if not found and species != 'M':\n unaccounted.append(species)\n return list(set(unaccounted))",
"def listSpecies(self):\n rows = yield self._db.runQuery(self._listSpeciesSQL)\n returnValue([name for (name,) in rows])",
"def particles_with_pdgID(self, pdgId):\n particle_filter = functools.partial(_filter_by_pdgId, pdgId=pdgId)\n return self.particles(particle_filter)",
"def get_all_species_by_genus_id(id_genus):\n listOfSpecies = []\n sqlObj = _Species_sql_new()\n results = sqlObj.select_all_species_of_genus_id(id_genus)\n for element in results:\n listOfSpecies.append(Specie(element[0], element[1], element[2]))\n return listOfSpecies",
"def cull_species(self):\n for s in self.species:\n organism_cnt = len(s.organisms)\n\n s.organisms.sort(cmp=lambda x, y: cmp(x.fitness, y.fitness),\n reverse=True)\n\n # Since we take the floor we add one so there's atleast one \n # survivor.\n survivors = int(math.floor(organism_cnt*self.conf.survival_rate))+1\n\n map(lambda x: x.marked_death(), s.organisms[survivors:])\n\n self.log.info('gen %d culled species %d from %d to %d',\n self.generation,\n s.species_id,\n organism_cnt,\n len(s.organisms))",
"def filter_instances_by_family(instances, family_name=None):\n for instance in instances:\n familyName = None\n for p in instance.customParameters:\n param, value = p.name, p.value\n if param == 'familyName':\n familyName = value\n if familyName == family_name:\n yield instance",
"def particleExists():\n pass",
"def _filter_particles(self, elem_den, pos, velocity, den):\n #Filter particles that are non-dense, as they will not be in halos\n ind2 = np.where(np.logical_and(den > 3e-4, elem_den > 0))\n halo_cofm = self.sub_cofm\n sub_cofm = self.sub_sub_cofm\n ind3 = []\n frachigh = 1.5\n fraclow = 0.7\n non_rot = 0\n for ii in ind2[0]:\n #Is this within the virial radius of any halo?\n ppos = pos[ii,:]\n dd = ne.evaluate(\"sum((halo_cofm - ppos)**2,axis=1)\")\n ind = np.where(dd < self.sub_radii**2)\n #Check subhalos\n if np.size(ind) < 1:\n dd = ne.evaluate(\"sum((sub_cofm - ppos)**2,axis=1)\")\n ind = np.where(dd < self.sub_sub_radii**2)\n if np.size(ind) < 1:\n continue\n ind = ind[0][0]\n hvel = self.sub_sub_vel[ind,:]\n hcofm = self.sub_sub_cofm[ind,:]\n hrad = self.sub_sub_radii[ind]\n vvir = self.virial_vel([ind,], subhalo=True)[0]\n else:\n ind = ind[0][0]\n hvel = self.sub_vel[ind,:]\n hcofm = self.sub_cofm[ind,:]\n hrad = self.sub_radii[ind]\n vvir = self.virial_vel([ind,])[0]\n #It is! What is the perpendicular velocity wrt this halo?\n lvel = velocity[ii, :] - hvel\n #Radial vector from halo\n lpos = ppos - hcofm\n ldist = np.sqrt(np.sum(lpos**2))\n #Find parallel by dotting with unit vector\n vpar = np.dot(lvel, lpos/ldist)\n vperp = np.sqrt(np.sum(lvel**2) - vpar)\n #Rotational velocity assuming NFW concentration 10 (like MW).\n vhalo = vvir * np.sqrt(5*ldist) / (1+ 10 * ldist / hrad)\n #Are we rotation supported?\n #Also, the angular vector should dominate over the radial\n if np.abs(vperp / (vpar+0.1)) < 2 or vperp / vhalo > frachigh or vperp / vhalo < fraclow:\n non_rot += 1\n continue\n #If we are, add to the list\n ind3+= [ii,]\n print(\"Filtered \",np.size(ind2[0]),\" particles to \",np.size(ind3))\n print(\"Non-rotating \",non_rot)\n return ind3",
"def iter_particles(self, ids=None):\n if ids is None:\n for row in self._group.particles:\n yield Particle(\n id=row['id'], coordinates=tuple(row['coordinates']))\n else:\n # FIXME: we might want to use an indexed query for these cases.\n for particle_id in ids:\n yield self.get_particle(particle_id)",
"def filter(\n self, name: Literal[\"quantile\", \"value\", \"radiusoutlier\"], *args, **kwargs\n ) -> PointCloud:\n name = name.upper()\n if name in ALL_FILTERS:\n return ALL_FILTERS[name](self, *args, **kwargs)\n else:\n raise ValueError(\"Unsupported filter. Check docstring\")",
"def refresh(self):\n self.logger.debug(\"Refreshing species filter options\")\n \n inputSpecieList = self.pipelinePage.inputState.specieList\n refSpecieList = self.pipelinePage.refState.specieList\n \n # set of added species\n currentSpecies = set()\n \n # remove species that don't exist\n num = self.specieList.count()\n for i in range(num - 1, -1, -1):\n item = self.specieList.item(i)\n \n # remove if doesn't exist in both ref and input\n if item.symbol not in inputSpecieList and item.symbol not in refSpecieList:\n self.logger.debug(\" Removing species option: %s\", item.symbol)\n self.specieList.takeItem(i) # does this delete it?\n \n else:\n currentSpecies.add(item.symbol)\n \n # unique species from ref/input\n combinedSpecieList = list(inputSpecieList) + list(refSpecieList)\n uniqueCurrentSpecies = set(combinedSpecieList)\n \n # add species that aren't already added\n for sym in uniqueCurrentSpecies:\n if sym in currentSpecies:\n self.logger.debug(\" Keeping species option: %s\", sym)\n \n else:\n self.logger.debug(\" Adding species option: %s\", sym)\n name = elements.atomName(sym)\n item = SpeciesListItem(sym, name=name)\n self.specieList.addItem(item)\n \n # update visible species list\n self.speciesListChanged()",
"def compact_slices(self, species):\n if self.buffered_slices[species] != []:\n particle_array = np.concatenate(\n self.buffered_slices[species], axis=1)\n else:\n particle_array = np.empty((9,0))\n\n return particle_array",
"def filter_on_pet(self, predicate: Callable[[Pet], bool]) -> Zoo:\n return {\n pet_name: pair for (pet_name, pair) in self.zoo.items() if predicate(pair.pet)\n }",
"def _iter_particles(self, ids=None):\n if ids is None:\n return iter(self._particles)\n else:\n return self._particles.itersequence(ids)",
"def has_particle(self, id):\n for row in self._group.particles.where(\n 'id == value', condvars={'value': id}):\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Copy particle property `field` from `trajectory` at the current frame in system. It requires atooms >= 1.10.0
|
def copy_field(system, field, trajectory):
# Only available in atooms > 1.10.0
so = trajectory[system.frame]
for p, po in zip(system.particle, so.particle):
x = getattr(po, field)
setattr(p, field, x)
return system
|
[
"def TrackVelocity3D(particle, fieldset, time):\n print(\"TIME : %g\" % time)\n (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] #\n particle.u = u1 * 1852. * 60. * math.cos(particle.lat * math.pi/180.) \n particle.v = v1 * 1852. * 60.",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec3d_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFVec3d_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFBox3d_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFPlane_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec3f_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec2d_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFVec3f_copyFrom(self, field)",
"def copy_traj_attributes(target, origin, start):\n\n # The list of copied attributes can be extended here with time\n # Or perhaps ask the mdtraj guys to implement something similar?\n\n target._xyz[start:start+origin.n_frames] = origin._xyz\n target._unitcell_lengths[start:start+origin.n_frames] = origin._unitcell_lengths\n target._unitcell_angles[start:start+origin.n_frames] = origin._unitcell_angles\n target._time[start:start+origin.n_frames] = origin._time\n\n return target",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFVec2d_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec3b_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFTime_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFEngine_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFVec3b_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFPlane_copyFrom(self, field)",
"def read_trajectory(self):\n with open(self._file_path,'r') as f:\n for line in f.readlines()[1:]:\n # convert string to float\n time_slice = map(float,line.split())\n\n # index 0 is time\n self._time_list.append(time_slice[0])\n\n # index 1, 2, 3 are x, y, theta\n base_pose = Pose2D()\n base_pose.x = time_slice[1]\n base_pose.y = time_slice[2]\n base_pose.theta = time_slice[3]\n self._base_pose_list.append(base_pose)\n self._joints_pos_dict['base_x'].append(time_slice[1])\n self._joints_pos_dict['x_y'].append(time_slice[2])\n self._joints_pos_dict['y_car'].append(time_slice[3])\n\n # index from 4 to 10 are joint_a1 to joint_a7\n for idx in range(self._joint_num):\n self._joints_pos_dict['joint_a'+str(idx+1)].append(time_slice[idx+4])",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec3s_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFMatrix_copyFrom(self, field)",
"def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFEngine_copyFrom(self, field)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter a list of entries so as to best match an input template. Lazy, slow version O(NM).
|
def _templated(entry, template, keep_multiple=False):
match = []
for t in template:
def compare(x):
return abs(x - t)
match.append(min(entry, key=compare))
if not keep_multiple:
match = list(set(match))
return sorted(match)
|
[
"def filter_template_list(template_list, output_filter):\n output_filter = [re.compile(flt) for flt in output_filter]\n template_list = [\n templ\n for templ in template_list\n for rex in output_filter if rex.match(templ)\n ]\n LOG.debug('Filtered template files list: %s', template_list)\n return template_list",
"def filter(file_list):\n class file_group:\n \"\"\"\n This class is just a collection to keep all the values associated together\n some things are calculated here but no internal values should be changed after the fact.\n \"\"\"\n def __init__(self, files):\n self.files = files\n offset = timedelta(hours = 20)\n offset_start = files[0].start_time - offset\n self.day_str = offset_start.strftime(CONFIG[\"Date Format\"])\n self.duration = files[0].elapsed_time(files[-1])\n\n if \"ALL\" in CONFIG[\"Dates\"]:\n in_date_range = lambda x: True\n else:\n valid_dates = dates_for_match(CONFIG[\"Dates\"])\n in_date_range = lambda x: x in valid_dates\n\n files_by_day = {}\n for group in file_list:\n fg = file_group(group)\n day = fg.day_str\n if in_date_range(day):\n if day in files_by_day:\n if fg.duration > files[day].duration:\n files_by_day[day] = fg\n else:\n files_by_day.update({day: fg})\n\n valid_groups = [v.files for k,v in files_by_day.items()]\n return valid_groups",
"def test_filter_rows_list_input():\n ls = [\n {'s': 'a', 'i': 1, 'f': 1.0},\n {'s': 'b', 'i': 2, 'f': 2.0},\n {'s': 'c', 'i': 3, 'f': 3.0},\n ]\n filtered = query_csv.filter_rows(ls, {'s': 'a'})\n assert list(filtered) == [\n {'s': 'a', 'i': 1, 'f': 1.0},\n ]",
"def _apply_filter(self, feed, patterns):\n\n entries = [entry for entry in self.entries if entry[u\"feed_id\"] == feed[u\"feed_id\"]]\n if not entries:\n # no unread entries\n return None\n\n print u\"Searching \\\"{}\\\" for matching items...\".format(feed[u\"title\"]),\n sys.stdout.flush()\n\n count = len(self.to_be_filtered)\n for pattern in patterns:\n regex = re.compile(pattern)\n for entry in entries:\n if not entry[u\"title\"]:\n # Untitled entries are both valid and extant\n continue\n if regex.search(entry[u\"title\"]):\n # TODO: remove entry from entries\n self.to_be_filtered.append(entry[u\"id\"])\n\n return len(self.to_be_filtered) - count",
"def search_any_predicate(self, templates):\n predicates = [self.pattern(template).search for template in templates]\n return lambda text: any(predicate(text) for predicate in predicates)",
"def filter(items: List[Product], spec: Specification):",
"def _filter_entries(self, entries: List[FeedEntry]) -> List[FeedEntry]:\n filtered_entries = entries\n if self._apply_filters:\n # Always remove entries without coordinates.\n filtered_entries = list(\n filter(\n lambda entry: (entry.coordinates is not None)\n and (entry.coordinates != (None, None)),\n filtered_entries,\n )\n )\n # Always remove entries on the ground (altitude: 0).\n filtered_entries = list(\n filter(lambda entry: entry.altitude > 0, filtered_entries)\n )\n # Filter by distance.\n if self._filter_radius:\n filtered_entries = list(\n filter(\n lambda entry: entry.distance_to_home <= self._filter_radius,\n filtered_entries,\n )\n )\n return filtered_entries",
"def filter(f, xs):\r\n return type(xs)(lazy_filter(f, xs))",
"def _filter_entries(self,\n entries: List[VICEmergencyIncidentsFeedEntry]) \\\n -> List[VICEmergencyIncidentsFeedEntry]:\n filtered_entries = super()._filter_entries(entries)\n if self._filter_inc_categories:\n filtered_entries = list(filter(lambda entry:\n entry.category1 in self._filter_inc_categories,\n filtered_entries))\n if self._filter_exc_categories:\n filtered_entries = list(filter(lambda entry:\n entry.category1 not in self._filter_exc_categories,\n filtered_entries))\n if not self._filter_statewide:\n filtered_entries = list(filter(lambda entry:\n entry.statewide not in ['Y'],\n filtered_entries))\n\n return filtered_entries",
"def filter(f,data):\n for item in data:\n if f(item):\n yield item",
"def filter_addresses(addresses: list, pattern):\n rec = re.compile(pattern)\n for address in addresses:\n if rec.search(address) is not None:\n yield address",
"def _filterOutput(self, pipelines, filter_dict, bIn):\n filtered = []\n for line in pipelines[:]:\n check = False # \"check\" means \"match\"\n # This inner for loop is deceiving: the filter_dict usually has a\n # single key:value and then the break/else is pure confusion.\n for key, value in filter_dict.items():\n if 'any' in value or value == ['']:\n check = True if key in line.keys() else False\n else:\n # Use full match for numerical values, and use substring\n # match for string values\n if str(line[key]).isdigit():\n check = str(line[key]) in value\n else:\n check = any([em in str(line[key]) for em in value])\n if check is bIn:\n break\n else:\n # No 'break': include this pipeline\n filtered.append(line)\n return filtered",
"def test_filter_list(a_list, b_list): # pylint: disable=redefined-outer-name\n # Test text\n expected = [\n [\"John 'Da Man'\", \"Repici\", \"120 Jefferson St.\", \"Riverside\", \"NJ\", \"08075\"],\n ]\n assert expected == tjcim.filter_list('Repici', a_list)\n\n # Test integer\n expected = [\n [\"FEB\", \"318\", \"342\", \"391\"],\n ]\n assert expected == tjcim.filter_list(\"342\", b_list)\n\n # Test multiple\n expected = [\n [\"MAY\", \"363\", \"420\", \"472\"],\n [\"JUN\", \"435\", \"472\", \"535\"],\n ]\n assert expected == tjcim.filter_list(\"472\", b_list)\n\n # invalid cases\n assert tjcim.filter_list(\"jjjj\", a_list) is None",
"def get_matching_entries(self, entry):\n return [e for e in self.entries if e.matches(entry)]",
"def add_template_matches(self, template_names, entries):\n entries = tuple(entries)\n\n # Get index (path,hash) -> [ match, match, ... ]\n index = {}\n for path, sha256, template_match in entries:\n index.setdefault((path, sha256), []).append(template_match)\n\n for chunk in chunks(index.keys(), size=1000):\n with self.database.session_scope() as session:\n\n templates = self._ensure_templates_exist(session, template_names)\n\n query = session.query(Files).options(joinedload(Files.template_matches))\n existing_files = query.filter(self._by_path_and_hash(chunk)).all()\n self._delete_file_template_matches(session, existing_files, templates.values())\n\n # Update existing files\n for file in existing_files:\n self._create_template_matches(file, index[(file.file_path, file.sha256)], templates)\n\n # Create missing files\n existing = {(file.file_path, file.sha256) for file in existing_files}\n for path, sha256 in set(chunk) - existing:\n file = Files(file_path=path, sha256=sha256)\n self._create_template_matches(file, index[(path, sha256)], templates)\n session.add(file)",
"def filter_pair_list_by_items(font, pair_list, filter_item_left=None, filter_item_right=None):\n # sanitizing input (?)\n if len(filter_item_left.strip()) == 0:\n filter_item_left = None\n if len(filter_item_right.strip()) == 0:\n filter_item_right = None\n\n # no filtering needed here\n if not filter_item_left and not filter_item_right:\n return pair_list\n\n # XXXX maybe it is expensive to have that calculate each time the pair list\n # is updated? Should this be cached somewhere?\n grouped_dict_1, grouped_dict_2 = _make_grouped_dicts(font.groups)\n\n # add groups to the filter items if relevant\n pertinent_items_1 = [filter_item_left]\n group_item_1 = grouped_dict_1.get(filter_item_left, None)\n if group_item_1:\n pertinent_items_1.append(group_item_1)\n pertinent_items_2 = [filter_item_right]\n group_item_2 = grouped_dict_2.get(filter_item_right, None)\n if group_item_2:\n pertinent_items_2.append(group_item_2)\n\n filtered_kerning = []\n for pair in pair_list:\n pair_item_1, pair_item_2 = pair\n if(\n (filter_item_left is None or pair_item_1 in pertinent_items_1) and\n (filter_item_right is None or pair_item_2 in pertinent_items_2)\n ):\n filtered_kerning.append(pair)\n\n return filtered_kerning",
"def _FilterTestUsers(self, query, user_list):\n filter_key, filter_value = query.split(':')\n key_translations = {'email': 'primaryEmail'}\n if filter_key in key_translations:\n filter_key = key_translations[filter_key]\n filter_value = filter_value.rstrip('*')\n return [u for u in user_list if u.get(filter_key).startswith(filter_value)]",
"def _filter_list(self, data, name_or_id, filters):\n if name_or_id:\n identifier_matches = []\n for e in data:\n e_id = str(e.get('id', None))\n e_name = e.get('name', None)\n # cinder likes to be different and use display_name\n e_display_name = e.get('display_name', None)\n if str(name_or_id) in (e_id, e_name, e_display_name):\n identifier_matches.append(e)\n data = identifier_matches\n\n if not filters:\n return data\n\n def _dict_filter(f, d):\n if not d:\n return False\n for key in f.keys():\n if isinstance(f[key], dict):\n if not _dict_filter(f[key], d.get(key, None)):\n return False\n elif d.get(key, None) != f[key]:\n return False\n return True\n\n filtered = []\n for e in data:\n filtered.append(e)\n for key in filters.keys():\n if isinstance(filters[key], dict):\n if not _dict_filter(filters[key], e.get(key, None)):\n filtered.pop()\n break\n elif e.get(key, None) != filters[key]:\n filtered.pop()\n break\n return filtered",
"def filter_by_value(list_, filter_values):\n filtered_list = []\n\n for value in list_:\n for filter_value in filter_values:\n if filter_value in str(value):\n filtered_list.append(value)\n\n return filtered_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a table with traffic data, return a formatted dictionary.
|
def get_traffic_stats(traffic_table):
log = logging.getLogger('get_traffic_stats')
traffic_rows = traffic_table.find_all('tr')
#log.debug(traffic_rows)
traffic = {}
i = 0
for j in traffic_rows:
# Only lines interested in are 1 and 2
if i in [1, 2]:
cols = j.find_all('td')
traffic[cols[0].string.lower()] = {
'bytes': cols[1].string,
'packets': cols[2].string,
'errors': cols[3].string }
i = i+1
return traffic
|
[
"def data_pretty_print(self, data):\n data_str = tabulate(data, headers=\"keys\", tablefmt=\"psql\")\n return data_str",
"def pretty_print_table(hashtable):\n for key,val in hashtable.items():\n values = [\",\".join(map(str, v)) for v in val]\n print(key + \"\\t\" + \"\\t\".join(values))",
"def gettabledict(self, tablename):\n urlpath = '/' + tablename\n return self.getdict(urlpath)",
"def clitable_to_dict(cli_table):\n\n objs = []\n for row in cli_table:\n temp_dict = {}\n for index, element in enumerate(row):\n temp_dict[cli_table.header[index].lower()] = element\n objs.append(temp_dict)\n\n return objs",
"def to_dict(self) -> Dict[str, Any]:\n return {\"name\": self.table_name, \"kind\": self.table_kind, \"data\": [r.to_dict() for r in self]}",
"def table(data):\n return pd.DataFrame(json_normalize(data))",
"def tabulate_info(packet, max_width):\n table = BeautifulTable(max_width=max_width)\n table.column_headers = packet.get_info().keys()\n row = [\n tabulate_info(attr, max_width=max_width/2)\n if hasattr(attr, 'get_info')\n else format_hex(attr)\n for attr in packet.get_info().values()\n ]\n table.append_row(row)\n return table",
"def create_table(small_dict):\r\n keys, values = tuple(zip(*small_dict.items()))\r\n table = tabulate(\r\n [values],\r\n headers=keys,\r\n tablefmt=\"pipe\",\r\n floatfmt=\".3f\",\r\n stralign=\"center\",\r\n numalign=\"center\",\r\n )\r\n return table",
"def print_virt_table(data):\r\n\r\n table = prettytable.PrettyTable()\r\n table.add_column('Keys', data.keys())\r\n table.add_column('Values', data.values())\r\n for tbl in table.align.keys():\r\n table.align[tbl] = 'l'\r\n return table",
"def format_table(row):\n shelter_name = row[\"FacilityName\"]\n last_report = row[\"timestamp_local\"]\n district = integrify(row[\"CouncilDistrict\"])\n occupied_beds = integrify(row[\"occupied_beds_computed\"])\n aval_beds = integrify(row[\"open_beds_computed\"])\n male_tot = integrify(row[\"Total Men Currently at Site\"])\n female_total = integrify(row[\"Total Women Currently at Site\"])\n pets = integrify(row[\"Number of Pets Currently at Site\"])\n ems_calls = integrify(row[\"Number of EMS Calls\"])\n ems_transport = integrify(row[\"Number of EMS Transports\"])\n num_quar = integrify(row[\"Clients currently quarantined\"])\n trail_open = integrify(row[\"Number of Open Trailers\"])\n trail_occupied_women = integrify(row[\"Total Women Currently in Trailer\"])\n trail_occupied_men = integrify(row[\"Total Men Currently in Trailer\"])\n trail_occupied_pets = integrify(row[\"Total Pets Currently in Trailer\"])\n\n shelter = f\"\"\"<b>{shelter_name}</b><br>\n <i>Council District {district}</i><br>\n <i>Report Time: {last_report}</i><br>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Occupied Beds: {occupied_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Available Beds: {aval_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Male: {male_tot}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Female: {female_total}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Pets: {pets}</p><br>\n <i>Trailer Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Trailer Open Beds: {trail_open}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Men: {trail_occupied_men}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Women: {trail_occupied_women}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Pets: {trail_occupied_pets}\n </p><br>\n <i>Health Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Number of EMS Calls: {ems_calls}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of EMS Transports: {ems_transport}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of currently quarantined clients: {num_quar}\n </p>\n\n\n \"\"\"\n return shelter.strip()",
"def map(self, data: list) -> Dict[Any, Any]:\n entry_list = []\n for resource in data:\n entry_list.append(\n [\n resource.get(\"LogicalResourceId\", \"-\"),\n resource.get(\"PhysicalResourceId\", \"-\"),\n ]\n )\n table_data = {\n \"format_string\": \"{Logical ID:<{0}} {Physical ID:<{1}}\",\n \"format_args\": OrderedDict(\n {\n \"Logical ID\": \"Logical ID\",\n \"Physical ID\": \"Physical ID\",\n }\n ),\n \"table_name\": \"Resources\",\n \"data\": entry_list,\n }\n return table_data",
"def _make_dict_from_table(self, table_name, pkey_name):\n table = {}\n c = self.conn.cursor()\n c.execute('select * from %s;' % (table_name))\n # extract just the field name\n description = [ f[0] for f in c.description]\n for row in c:\n row_dict = dict(zip(description, row))\n table[row_dict[pkey_name]] = row_dict\n c.close()\n return table",
"def _table_elem_to_json(table_elem):\n return {\n key: value\n for (key, value) in [\n (\n node.xpath(\"*\")[0].text_content(),\n node.xpath(\"*\")[1].attrib[\"href\"]\n if node.xpath(\"*\")[1].tag == \"a\"\n else node.xpath(\"*\")[1].text_content(),\n )\n for node in table_elem\n ]\n }",
"def dump_inline_table(self, section):\n retval = ''\n if isinstance(section, dict):\n val_list = []\n for k, v in section.items():\n val = self.dump_inline_table(v)\n val_list.append(k + ' = ' + val)\n\n retval += '{ ' + ', '.join(val_list) + ' }\\n'\n return retval\n return unicode(self.dump_value(section))",
"def html_table_to_dict(html):\n soup = BeautifulSoup(html, 'html.parser')\n tables = soup.find_all('table')\n results = []\n for table in tables:\n table_headers = [header.text for header in table.find('thead').find_all('th')]\n table_body = []\n for row in table.find('tbody').find_all('tr'):\n row_dict = {}\n for i, cell in enumerate(row.find_all('td')):\n row_dict[table_headers[i]] = cell.text\n table_body.append(row_dict)\n results.append(table_body)\n return results",
"def json_print(table_proxy):\n print(json.dumps([(dict(row.items())) for row in table_proxy]))",
"def print_table(table):\n rows = execute(\"SELECT * FROM {}\".format(table), fetch_all=True)\n \n if rows == []:\n return\n\n table = PrettyTable(rows[0].keys())\n\n for row in rows:\n table.add_row(list(row))\n\n print(table)",
"def convert_array_to_json_string(table):\n json_table = \"[\"\n for ii, row in enumerate(table):\n if ii > 0:\n json_table += \",\\n\"\n row_str = \"[\" + \", \".join([\"%.4g\" % e for e in row]) + \"]\"\n json_table += row_str\n json_table += \" ]\"\n return json_table",
"def get_tee_info(soup):\n rows = soup.find_all('tr')\n tees = {}\n try:\n headings = [head.text for head in rows[0].find_all('th')]\n all_tees = [value.text.strip().split('\\n') for value in rows[1:]]\n for tee in all_tees:\n tees[tee[0].replace('.', '').replace('$', '')] = dict(\n zip(headings[1:], tee[1:])\n )\n return tees\n except IndexError:\n return {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Push a new worker into the queue, but randomly (it has to depend on the type and urgency of the worker)
|
def pushRandom(t):
Worker.push(t)
shuffle(Worker.workers)
|
[
"def push_to_queue(self):\n redis = self.redis_pool.get_connection()\n redis.publish(self.collection_name, self.worker_id)",
"def queue_fixture():\n new_queue = our_queue()\n return new_queue",
"async def add_worker_to_pool(self, worker_id):\n if not worker_id in self.worker_instances:\n self.worker_instances.add(worker_id)",
"def add_to_queue(self, person, time):\n self.queue.append(person)\n if len(self.queue) == 1:\n person.start_serving(self, time)",
"def _additem(self):\n\n self.queue.put(self._genitem())",
"def _new_worker(self):\n\n # create a worker instance\n w = Worker(self)\n\n # append new worker to list\n self.all_workers.append(w)\n\n # return new worker\n return w",
"def get_free_worker(self) -> T:\n\n # Try to accuire one of the workers\n for worker in self._workers:\n worker: T = worker\n\n if not worker.lock.locked():\n return worker\n\n # If none is available, try to add new one\n if len(self._workers) < self._max_workers:\n self._workers.append(self._worker_creation_func())\n return self._workers[-1]\n\n # If none of the above options work,\n # return a random worker.\n return random.choice(self._workers)",
"def create_queue(self, queue):",
"def create_worker_block(WorkerId=None, Reason=None):\n pass",
"def _worker_loop(self) -> None:\n msgq = self.msg_q\n while self._do_main_loop:\n if self._isactive:\n msg = self.generate_msg()\n if msg is not None:\n # print(\"enqueueing {}\".format(msg))\n msgq.put(msg)\n # --\n gevent.sleep(self._sec_sleep)",
"def __call__(self, event, payload):\n # as we defined a threadpool we can enqueue our item\n # and move to the next.\n self.threadpool.enqueue(event, payload)\n print(\"Thread with payload \" + str(payload) + \" is enqueued\")",
"def push(queue, item):\n queue.append(item)",
"def allocate_worker(self):\n raise NotImplementedError",
"def spawn_worker_bees_in_zone(self, zone, count):\n self.worker_bees = self.worker_bees + zone.spawn_workers(count, self.function)\n return zone.best_bee",
"def add_worker(self, worker, count):\n\n # We record the multiple counts as different workers\n for idx in xrange(count):\n self._workers.put(worker)\n self._cpus[worker] = count\n self._active[worker] = 0",
"def create_worker(self, worker: domain.Worker):\n with self.transaction() as t:\n return t.create_worker(worker)",
"def test1():\r\n q = make_priority_queue()\r\n count = 0\r\n while True:\r\n if count == 10:\r\n break\r\n i = rand.randint(1,10)\r\n task = \"Task\" + str(count + 1)\r\n enqueue(q, Task(task, i))\r\n count += 1\r\n print(\"Created Queue: \", q)\r\n t = front(q)\r\n print(\"Highest priority task is\", t.name, \"with priority\", t.priority)\r\n t = back(q)\r\n print(\"Lowest priority task is\", t.name, \"with priority\", t.priority)\r\n while not is_empty(q):\r\n t = front(q)\r\n dequeue(q)\r\n if is_empty(q) is True:\r\n print(\"Dequeue Success? - True\")\r\n else:\r\n print(\"Dequeue Success? - False\")",
"def test_minimalWorker(self):\n pool = None\n\n def recordingFactory(*a, **kw):\n nonlocal pool\n pool = LocalWorkerPool(*a, autostop=True, **kw)\n return pool\n\n maxWorkers = 7\n numTests = 3\n\n runner = self.getRunner(\n maxWorkers=maxWorkers, workerPoolFactory=recordingFactory\n )\n suite = TrialSuite([TestCase() for n in range(numTests)])\n self.successResultOf(runner.runAsync(suite))\n assert_that(pool._started[0].workers, has_length(numTests))",
"def testPriority(self):\n element = WorkQueueElement(RequestName='backend_test',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n highprielement = WorkQueueElement(RequestName='backend_test_high',\n WMSpec=self.processingSpec,\n Status='Available', Jobs=10,\n SiteWhitelist=[\"place\"],\n Priority=100)\n element2 = WorkQueueElement(RequestName='backend_test_2',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n element3 = WorkQueueElement(RequestName='backend_test_3',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=1)\n lowprielement = WorkQueueElement(RequestName='backend_test_low',\n WMSpec=self.processingSpec,\n Status='Available',\n SiteWhitelist=[\"place\"],\n Jobs=10, Priority=0.1)\n self.backend.insertElements([element])\n self.backend.availableWork({'place': 1000}, {})\n # timestamp in elements have second coarseness, 2nd element must\n # have a higher timestamp to force it after the 1st\n time.sleep(1)\n self.backend.insertElements([lowprielement, element2, highprielement])\n self.backend.availableWork({'place': 1000}, {})\n time.sleep(1)\n self.backend.insertElements([element3])\n work = self.backend.availableWork({'place': 1000}, {})\n # order should be high to low, with the standard elements in the order\n # they were queueud\n self.assertEqual([x['RequestName'] for x in work[0]],\n ['backend_test_high', 'backend_test', 'backend_test_2',\n 'backend_test_3', 'backend_test_low'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
launch the worker, increase the time
|
def launch(self):
Worker.time += 1
|
[
"def run_job():",
"def worker_function(time_left):\r\n timer = TimerApp(time_left)",
"def worker():\r\n\r\n while True:\r\n t = threading.Timer(10.0, hello)\r\n t.start()\r\n t.join()",
"def do_main(self):\n self.pool.spawn_n(self._periodic_runner)\n super(Manager, self).do_main()",
"def run(self):\n time = 0\n while time <= self.max_time:\n self.step()\n time += self.time_step",
"def run_job(self):\n\n try:\n job_item = self.job_queue.get(block=False, timeout=1)\n except Exception:\n self.log.debug(\n \"Directord server found nothing to do, cooling down\"\n \" the poller.\"\n )\n return 512, time.time()\n else:\n restrict_sha256 = job_item.get(\"restrict\")\n if restrict_sha256:\n if job_item[\"task_sha256sum\"] not in restrict_sha256:\n self.log.debug(\n \"Job restriction %s is unknown.\", restrict_sha256\n )\n return 512, time.time()\n\n job_targets = job_item.pop(\"targets\", list())\n # NOTE(cloudnull): We run on all targets if query is used.\n run_query = job_item[\"verb\"] == \"QUERY\"\n\n if job_targets and not run_query:\n targets = list()\n for job_target in job_targets:\n job_target = job_target.encode()\n if job_target in self.workers:\n targets.append(job_target)\n else:\n self.log.critical(\n \"Target %s is in an unknown state.\", job_target\n )\n return 512, time.time()\n else:\n targets = self.workers.keys()\n\n if job_item.get(\"run_once\", False) and not run_query:\n self.log.debug(\"Run once enabled.\")\n targets = [targets[0]]\n\n if run_query:\n job_item[\"targets\"] = [i.decode() for i in targets]\n\n task = job_item.get(\"task\", utils.get_uuid())\n job_info = self.create_return_jobs(\n task=task, job_item=job_item, targets=targets\n )\n self.log.debug(\"Sending job:%s\", job_item)\n for identity in targets:\n if job_item[\"verb\"] in [\"ADD\", \"COPY\"]:\n for file_path in job_item[\"from\"]:\n job_item[\"file_sha256sum\"] = utils.file_sha256(\n file_path=file_path\n )\n if job_item[\"to\"].endswith(os.sep):\n job_item[\"file_to\"] = os.path.join(\n job_item[\"to\"],\n os.path.basename(file_path),\n )\n else:\n job_item[\"file_to\"] = job_item[\"to\"]\n\n if job_item[\"file_to\"] not in job_info[\"TRANSFERS\"]:\n job_info[\"TRANSFERS\"].append(job_item[\"file_to\"])\n\n self.log.debug(\n \"Sending file transfer message for\"\n \" file_path:%s to identity:%s\",\n file_path,\n identity.decode(),\n )\n self.driver.socket_send(\n socket=self.bind_job,\n identity=identity,\n command=job_item[\"verb\"].encode(),\n data=json.dumps(job_item).encode(),\n info=file_path.encode(),\n )\n else:\n self.log.debug(\n \"Sending job message for job:%s to identity:%s\",\n job_item[\"verb\"].encode(),\n identity.decode(),\n )\n self.driver.socket_send(\n socket=self.bind_job,\n identity=identity,\n command=job_item[\"verb\"].encode(),\n data=json.dumps(job_item).encode(),\n )\n\n self.log.debug(\"Sent job %s to %s\", task, identity)\n else:\n self.return_jobs[task] = job_info\n\n return 128, time.time()",
"def run(self):\n # 首先创建一堆worker,存放起来\n workers = []\n for worker_id in range(len(self.worker_args)):\n workers.append(self.create_worker(worker_id))\n smac = self.create_server()\n scheduler = self.create_scheduler()\n # 首先将每个worker全部打开,然后运行smac\n for worker in workers:\n worker.start()\n smac.optimize()\n # 最后等待worker结束(实际上并不会结束)\n for worker in workers:\n worker.join()",
"def start_worker(self):\n self._thread_worker = _start_thread(self._start)",
"def on_worker_starts(self):\n pass",
"def runworker():\n app.run(debug=False)",
"def start(self):\n self.threadpool.callInThread(self.run)",
"def run_worker():\n from asu.utils.garbagecollector import GarbageCollector\n from asu.utils.boss import Boss\n from asu.utils.updater import Updater\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger(__name__)\n\n log.info(\"start garbage collector\")\n gaco = GarbageCollector()\n gaco.start()\n\n log.info(\"start boss\")\n boss = Boss()\n boss.start()\n\n log.info(\"start updater\")\n uper = Updater()\n uper.start()",
"def thread_task(self):\n for _ in range(100000): \n self.increment()",
"def start_timer():\n TIMERS[\"procedure worker\"] = Timer(1, worker)\n TIMERS[\"procedure worker\"].start()",
"def start_worker(self):\n self._process_worker = Process(target=worker_loop, args=(self.task_obj, \n self._qin, self._qout, self._qout_sync, self.impatient))\n self._process_worker.start()",
"def __start_worker(self, worker):\n self.logfile.debug(\"manager - STARTING %s\" % worker.name)\n assert isinstance(worker, QueuedWorker)\n self.logfile.debug(\"manager - STARTING %s\" % worker.name)\n self.working_stations_pool.allocate_first_free_station(worker)\n data = worker.get_needed_data(self.completed_jobs)\n work_process = Process(target=worker._run, args=(data,))\n work_process.start()\n self.pid_by_worker_name.setdefault(worker.name, work_process.pid)",
"def run(self):\n jouerMPD(self.radio)\n time.sleep(self.duree)\n stopMPD()",
"def start_process(self):\n self.start_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n self.starttime_num = time.time()\n self.end_time = \"running...\"",
"def timer_fcn(self):\n\n # get copy of job list\n with self._job_lock:\n # empty the class wide job que\n cur_jobs = self.pump_jobs[:]\n self.pump_jobs = list()\n\n # execute the local job que\n for job in cur_jobs:\n job.execute()\n # wait at least 1 second before executing the next job\n time.sleep(1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The Activate workers generates new Compute workers for every linked node
|
def launch(self):
self.target_node.activation += self.activation_to_add
for n in self.target_node.linksOut.keys():
Worker.pushRandom(Compute(n))
super().launch()
|
[
"def activate(self):\n to_state = self.node_states[:] # a copy\n \n # activate the internal nodes\n for i in range(self.size):\n if self.node_connections.has_key(i):\n total = 0.0\n for key, weight in self.node_connections[i]:\n # nodes outside of the size of the internal are the forewardnodes\n if key >= self.size:\n total += self.forward_states[key - self.size] * weight\n else:\n total += self.node_states[key] * weight\n \n total = self.neural_fn(total)\n to_state[i] = (total > self.neural_thresh)\n \n self.node_states = to_state[:]\n \n \n # activate the rear nodes\n to_state = self.rear_states[:]\n for i in range(self.rsize):\n total = 0.0\n for key, weight in self.internal_rear_connections[i]:\n total += self.node_states[key] * weight\n total = self.neural_fn(total)\n to_state[i] = (total > self.neural_thresh)\n \n self.rear_states = to_state[:]",
"def __init__(self, addresses: List[str], graph_maker: Callable[[Device, tf.Session], T]) -> None:\n self.cluster = tf.train.ClusterSpec({\"worker\": addresses})\n self.population = []\n for task_index in range(len(addresses)):\n device = '/job:worker/task:' + str(task_index)\n server = tf.train.Server(self.cluster, job_name=\"worker\", task_index=task_index)\n sess = tf.Session(server.target)\n self.population.append(graph_maker(device, sess))",
"def _create_workers(self):\n for worker_config in self.__config.worker_configs:\n worker = CopyingManagerWorker(self.__config, worker_config)\n self.__workers[worker_config[\"id\"]] = worker",
"def __call__(self):\n # Initialize all workers\n self.initialize_workers()\n \n # Make tasks and assign each task to a worker\n tasks = self.make_tasks()\n assert len(tasks) <= self.num_worker, 'The number of tasks cannot exceed the number of workers.'\n self.assign_tasks(tasks)\n \n # Stop all workers and terminate all processes\n self.stop_workers()",
"def _add_compute_nodes(scheduler_commands, slots_per_node, number_of_nodes=1):\n initial_compute_nodes = scheduler_commands.get_compute_nodes()\n\n number_of_nodes = len(initial_compute_nodes) + number_of_nodes\n # submit a job to perform a scaling up action and have new instances\n result = scheduler_commands.submit_command(\"sleep 1\", nodes=number_of_nodes, slots=slots_per_node)\n job_id = scheduler_commands.assert_job_submitted(result.stdout)\n scheduler_commands.wait_job_completed(job_id)\n scheduler_commands.assert_job_succeeded(job_id)\n\n return [node for node in scheduler_commands.get_compute_nodes() if node not in initial_compute_nodes]",
"def launch ():\n get_network_info()\n core.registerNew(job_aware_switch)",
"def start_workers(self):\n for worker in self.workers:\n worker.start()",
"def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n self.worker_portion = 1\n worker_count_min = int(self.world_size * self.worker_portion)\n\n for _ in range(100):\n time.sleep(1)\n n_workers = len(self.client.scheduler_info()[\"workers\"])\n logging.info(\"Accessed Workers: {}\".format(n_workers))\n if n_workers >= worker_count_min:\n workers = self.client.scheduler_info()[\"workers\"]\n workers_list = []\n workers_port = {}\n for k, _ in workers.items():\n workers_list.append(k)\n (ip, port) = k.replace(\"//\", \"\").split(\":\")[1:]\n if ip in workers_port:\n workers_port[ip].append(port)\n else:\n workers_port[ip] = [port]\n os.environ[\"vega_workers_list\"] = json.dumps(workers_port)\n logging.info(\"worker list: {}\".format(workers_list))\n slave_ips = list(set([item[6:].split(\":\")[0] for item in workers_list]))\n slave_ips.remove(General.cluster.master_ip)\n General.cluster.salves = slave_ips\n return 1\n return 0",
"def generate_workers(num_workers):\n \n workers_list = []\n # init workers\n for i in range(num_workers):\n worker = sy.VirtualWorker(hook, id=str(i))\n workers_list.append(worker)\n \n return workers_list",
"def initialize_workers(self):\n # Create pipes as communicators between master and workers\n self.master_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.num_worker)])\n \n # Create a Process for each worker\n self.list_process = [Process(target=self.worker_class(), # individual instantiation for each Process\n args=[master_conn, worker_conn], \n daemon=self.daemonic_worker) \n for master_conn, worker_conn in zip(self.master_conns, self.worker_conns)]\n \n # Start (fork) all processes, so all workers are stand by waiting for master's command to work\n # Note that Linux OS will fork all connection terminals, so it's good to close unused ones here.\n [process.start() for process in self.list_process]\n \n # Close all worker connections here as they are not used in master process\n # Note that this should come after all the processes started\n [worker_conn.close() for worker_conn in self.worker_conns]",
"def _populate(self):\n for k in range(self._nworkers - len(self._workers)):\n w = _Worker(\n self._function,\n self._args,\n self._tasks,\n self._results,\n self._max_tasks,\n self._errors,\n self._error,\n )\n self._workers.append(w)\n w.start()",
"def execute(self, nodenet, nodes, netapi):\n for uid, node in nodes.items():\n node.reset_slots()\n\n # propagate activation\n for uid, node in nodes.items():\n for gate_type in node.get_gate_types():\n gate = node.get_gate(gate_type)\n for link in gate.get_links():\n link.target_slot.add_activation(float(gate.activation) * float(link.weight)) # TODO: where's the string coming from?",
"def launch_optimizer_workers(self, n_itr):\n if self.world_size == 1:\n return\n offset = self.affinity.optimizer[0].get(\"master_cpus\", [0])[0]\n port = find_port(offset=offset)\n affinities = self.affinity.optimizer\n runners = [AsyncOptWorker(\n rank=rank,\n world_size=self.world_size,\n algo=self.algo,\n agent=self.agent,\n n_itr=n_itr,\n affinity=affinities[rank],\n seed=self.seed + 100,\n ctrl=self.ctrl,\n port=port,\n ) for rank in range(1, len(affinities))]\n procs = [mp.Process(target=r.optimize, args=()) for r in runners]\n for p in procs:\n p.start()\n torch.distributed.init_process_group(\n backend=\"nccl\",\n rank=0,\n world_size=self.world_size,\n init_method=f\"tcp://127.0.0.1:{port}\",\n )\n self.optimizer_procs = procs",
"def initialize_workers(self):\n self.workers = []\n for j in range(self.n):\n # generate p according to spammer-hammer model\n p_j = np.random.choice([1., 0.5], p=[self.q, 1 - self.q])\n worker = Worker(j, p_j)\n self.workers.append(worker)\n return",
"def create_workers(self):\n for _ in range(DataCollectors_Configuration.NO_OF_THEARDS):\n thread = Thread(target=self.work)\n # make the thread daemon to stop the thread when main program exits\n thread.daemon = True\n thread.start()",
"def train(self):\n for ens_mem in self.ensemble_members:\n ens_mem.train()",
"def activate(self):\r\n for consitit_name in self: # for each neuron group:\r\n self[consitit_name].activate() # find how future net values are affected from propagations this timestep.\r\n # set net values for the next timestep.\r\n self.clock += 1 # advance net's clock.\r",
"def bootstrap_cluster(used):\n global stash, nodes\n #drecrement used to accommodate for the seednode\n used -= 1\n nodes = stash[:used]\n stash = stash[used:]\n inject_hosts_files()\n log.info(\"Running bootstrap scripts\")\n #bootstrap the seed node\n seeds[0].bootstrap()\n #bootstrap the rest of the nodes\n for n in nodes:\n n.bootstrap()\n save_cluster()\n log.info(\"READY!!\")",
"def training_pool(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load all of the tensors required to begin federated learning.
|
def _load_initial_tensors(self):
tensor_dict, round_number = utils.deconstruct_model_proto(
self.model, compression_pipeline=self.compression_pipeline)
if round_number > self.round_number:
self.logger.info(
f'Starting training from round {round_number} of previously saved model'
)
self.round_number = round_number
tensor_key_dict = {
TensorKey(k, self.uuid, self.round_number, False, ('model',)):
v for k, v in tensor_dict.items()
}
# all initial model tensors are loaded here
self.tensor_db.cache_tensor(tensor_key_dict)
self.logger.debug(f'This is the initial tensor_db: {self.tensor_db}')
|
[
"def _load_initial_tensors_from_dict(self, tensor_dict):\n tensor_key_dict = {\n TensorKey(k, self.uuid, self.round_number, False, ('model',)):\n v for k, v in tensor_dict.items()\n }\n # all initial model tensors are loaded here\n self.tensor_db.cache_tensor(tensor_key_dict)\n self.logger.debug(f'This is the initial tensor_db: {self.tensor_db}')",
"def load_models(self):\n load_path, self.iteration = self.get_latest_checkpoint(\n return_iteration=True\n )\n self.q0_net.load(os.path.join(\n load_path, \"Q0_net_state_dict\"\n ))\n self.q1_net.load(os.path.join(\n load_path, \"Q1_net_state_dict\"\n ))\n self.q0_target_net.load(os.path.join(\n load_path, \"Q0_target_net_state_dict\"\n ))\n self.q1_target_net.load(os.path.join(\n load_path, \"Q1_target_net_state_dict\"\n ))\n self.pi_net.load(os.path.join(\n load_path, \"Policy_net_state_dict\"\n ))\n self.q0_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Q0_optimizer_state_dict\"\n )))\n self.q1_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Q1_optimizer_state_dict\"\n )))\n self.pi_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Policy_optimizer_state_dict\"\n )))\n self.alpha_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Alpha_optimizer_state_dict\"\n )))\n self.log_alpha = torch.load(os.path.join(\n load_path,\n \"Ln_Entropy_Coefficient\"\n ))\n self.replay_buffer.load(os.path.join(\n load_path,\n \"Replay_Buffer_data\"\n ))",
"def load_tensors(self, graph, update_cost=False):\n # Input tensors\n self.x_tf = graph.get_tensor_by_name(\"x_tf:0\")\n self.y_tf = graph.get_tensor_by_name(\"y_tf:0\")\n self.w_tf = graph.get_tensor_by_name(\"w_tf:0\")\n\n # Tensors for training and prediction.\n self.learn_rate_tf = graph.get_tensor_by_name(\"learn_rate_tf:0\")\n self.keep_prob_tf = graph.get_tensor_by_name(\"keep_prob_tf:0\")\n self.train_step_tf = graph.get_operation_by_name('train_step_tf')\n self.logits_tf = graph.get_tensor_by_name(\"logits_tf:0\")\n if not update_cost:\n self.loss_tf = graph.get_tensor_by_name('loss_tf:0')\n else:\n self.loss_tf = self.loss_tensor()\n self.training_tf = graph.get_tensor_by_name(\"training_tf:0\")\n self.extra_update_ops_tf = tf.get_collection(tf.GraphKeys.UPDATE_OPS)",
"def load_all_images(self):\n self._load_train_images()\n self.load_test_images()",
"def _load_tensors(self, graph): \n if len(graph.edges) == 0:\n return True\n\n # Dictionary to store source nodes and destination nodes of edges\n # Also ensures only traversable edges are pushed to database\n to_nodes = dict()\n from_nodes = dict()\n\n # Populating to_nodes and from_nodes using BFS\n adj_list = graph.adj_list\n start_node_indices = graph.start_node_indices\n\n # Queue and visit status of nodes for BFS\n queue = Queue()\n vis = [False] * len(graph.nodes)\n\n for start_node_index in start_node_indices:\n if not vis[start_node_index]:\n vis[start_node_index] = True\n queue.put(start_node_index)\n\n # BFS\n while not queue.empty():\n src_node_index = queue.get()\n\n if src_node_index not in adj_list:\n continue\n\n for [edge_index, dest_node_index] in adj_list[src_node_index]:\n\n if edge_index not in from_nodes:\n from_nodes.update({edge_index : set()})\n \n from_nodes[edge_index].add(src_node_index + 1)\n\n if edge_index not in to_nodes:\n to_nodes.update({edge_index : set()})\n\n to_nodes[edge_index].add(dest_node_index + 1)\n\n if not vis[dest_node_index]:\n vis[dest_node_index] = True\n queue.put(dest_node_index)\n\n # Edge attributes that are not pushed to the database\n NOT_STORED_ATTR = ['label', 'value'] \n\n try:\n # Surrogate Id for tensors\n tensor_id = 0\n\n # Number of mutations per row is the number of attributes being \n # pushed to database.\n # 4 additional attributes, 'model_name', 'tensor_id', \n # 'from_operator_ids' and 'to_operator_ids', present in db other \n # than the class attributes.\n num_attributes = len(vars(graph.edges[0])) + 4\n\n # Number of nodes to be processed per batch i.e.\n # floor(max mutations per batch / number of mutations per row)\n num_edges_per_batch = self._MAX_MUTATIONS // num_attributes\n \n edge_indices = list(to_nodes.keys())\n num_edges = len(edge_indices)\n\n # TO-DO : Add retry logic if a batch fails\n while tensor_id < num_edges:\n with self.database.batch() as batch:\n for _ in range(num_edges_per_batch):\n if tensor_id == num_edges:\n break\n\n edge_index = edge_indices[tensor_id]\n edge = graph.edges[edge_index]\n to_operator_ids = list(to_nodes[edge_index])\n from_operator_ids = list(from_nodes[edge_index])\n\n # To store the database column names and their \n # values to be inserted\n column_names = [\n 'model_name', 'tensor_id', \n 'from_operator_ids', 'to_operator_ids'\n ]\n values = [\n graph.model_name, tensor_id + 1, \n from_operator_ids, to_operator_ids\n ]\n\n attrs = vars(edge)\n for item in attrs.items():\n if item[0] not in NOT_STORED_ATTR:\n column_names.append(item[0])\n values.append(item[1])\n\n column_names = tuple(column_names)\n values = tuple(values)\n\n batch.insert(\n table = 'Tensors',\n columns = column_names,\n values = [values]\n )\n tensor_id += 1\n return True\n except Exception as e:\n print(e)\n query = \"DELETE FROM Models WHERE model_name = \\'\" + graph.model_name + \"\\'\"\n deleted_rows = self.database.execute_partitioned_dml(\n query\n )\n return False",
"def _load_base(self):\n\n # Check if pre-computed \"tables\" exist for faster loading\n fn_prestored = os.path.join(self.path, '__prestored')\n if os.path.isdir(fn_prestored):\n try:\n self.entity2idx = common.json_load(\n os.path.join(fn_prestored, 'entity2idx.json'))\n self.rel2idx = common.json_load(\n os.path.join(fn_prestored, 'rel2idx.json'))\n self.train_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'train_set.json'))]\n self.test_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'test_set.json'))]\n self.valid_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'valid_set.json'))]\n except FileExistsError as e:\n print(e)\n else:\n # load each data_type in order\n\n data = {\n \"train\": list(self._load_data_file(\"train\")),\n \"valid\": list(self._load_data_file(\"valid\")),\n \"test\": list(self._load_data_file(\"test\")),\n }\n\n # Needs to be done over all datasets, as there are some defective\n # datasets like WN18RR or Yago3-10\n self._generate_unique_ids(\n data[\"train\"][0] + data[\"valid\"][0] + data[\"test\"][0],\n data[\"train\"][1] + data[\"valid\"][1] + data[\"test\"][1],\n data[\"train\"][2] + data[\"valid\"][2] + data[\"test\"][2])\n\n for data_type in [\"train\", \"test\", \"valid\"]:\n heads, rels, tails = data[data_type]\n\n if data_type == \"train\":\n self.train_set, self.train_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.train_oog:\n print(self.train_oog)\n elif data_type == \"test\":\n self.test_set, self.test_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.test_oog:\n print(self.test_oog)\n elif data_type == \"valid\":\n self.valid_set, self.valid_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.valid_oog:\n print(self.valid_oog)\n\n # print(\"If the list are not empty, something is wrong with the data:\", train_oog, valid_oog, test_oog)\n\n # Create folder and dump generated files to preloading\n common.mkdir_p(fn_prestored)\n common.json_dump(os.path.join(fn_prestored, 'entity2idx.json'),\n self.entity2idx)\n common.json_dump(os.path.join(fn_prestored, 'rel2idx.json'),\n self.rel2idx)\n common.json_dump(os.path.join(fn_prestored, 'train_set.json'),\n self.train_set)\n common.json_dump(os.path.join(fn_prestored, 'test_set.json'),\n self.test_set)\n common.json_dump(os.path.join(fn_prestored, 'valid_set.json'),\n self.valid_set)\n\n # For easier access and checking if other data types are added\n self.data_type2array = {\"train\": self.train_set,\n \"test\": self.test_set,\n \"valid\": self.valid_set}\n\n # Set some useful variables\n self.n_entities = len(self.entity2idx)\n self.n_relations = len(self.rel2idx)\n self.number_of_entries = {\"train\": len(self.train_set),\n \"test\": len(self.test_set),\n \"valid\": len(self.valid_set)}",
"def __init_tensors(self, im_shape):\n self.__init_tensor_register()\n self.__init_input(im_shape)",
"def loadStateAndData(self):\n\n with ThreadPoolExecutor(max_workers=2) as e:\n e.submit(self.loadState, 'net6.pickle')\n e.submit(self.loadData, reshape=True)",
"def __init_tensor_register(self):\n self.tensors = dict()",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = zip(images, labels)",
"async def train_federated_model(\n *,\n initialize: tff.Computation,\n train: tff.Computation,\n train_data_source: tff.program.FederatedDataSource,\n evaluation: tff.Computation,\n evaluation_data_source: tff.program.FederatedDataSource,\n total_rounds: int,\n num_clients: int,\n train_metrics_manager: Optional[\n tff.program.ReleaseManager[tff.program.ReleasableStructure, int]\n ] = None,\n evaluation_metrics_manager: Optional[\n tff.program.ReleaseManager[tff.program.ReleasableStructure, int]\n ] = None,\n model_output_manager: Optional[\n tff.program.ReleaseManager[\n tff.program.ReleasableStructure, Optional[object]\n ]\n ] = None,\n program_state_manager: Optional[\n tff.program.ProgramStateManager[tff.program.ProgramStateStructure]\n ] = None,\n) -> None:\n tff.program.check_in_federated_context()\n _check_expected_type_signatures(\n initialize=initialize,\n train=train,\n train_data_source=train_data_source,\n evaluation=evaluation,\n evaluation_data_source=evaluation_data_source,\n )\n\n # Cast the `program_state_manager` to a more specific type: a manager that\n # loads and saves `_ProgramState`s instead of a manager that loads and saves\n # `tff.program.ProgramStateStructure`s. This allows the program logic to:\n # * Keep `_ProgramState` private.\n # * Have static typing within the program logic.\n # * Require callers to provide a `program_state_manager` capable of handling\n # any `tff.program.ProgramStateStructure`.\n program_state_manager = typing.cast(\n Optional[tff.program.ProgramStateManager[_ProgramState]],\n program_state_manager,\n )\n\n initial_state = initialize()\n\n # Try to load the latest program state. If the program logic failed on a\n # previous run, this program state can be used to restore the execution of\n # this program logic and skip unnecessary steps.\n if program_state_manager is not None:\n initial_state = await tff.program.materialize_value(initial_state)\n structure = _ProgramState(initial_state, round_num=0)\n program_state, version = await program_state_manager.load_latest(structure)\n\n # TODO: b/271445312 - Cast `program_state` to `_ProgramState`. `TypeVar`s\n # are lost from async function signatures.\n program_state = typing.cast(_ProgramState, program_state)\n else:\n program_state = None\n version = 0\n\n # Assign the inputs to the program logic using the loaded program state if\n # available or the initialized state.\n if program_state is not None:\n state = program_state.state\n start_round = program_state.round_num + 1\n else:\n state = initial_state\n start_round = 1\n\n # Construct a async context manager to group and run tasks concurrently.\n # Program logic will release values and save program state, these functions\n # are asynchronous and can be run concurrently. However, it is possible to\n # schedule these functions differently using\n # [asyncio](https://docs.python.org/3/library/asyncio.html).\n async with _TaskGroup() as task_group:\n # Construct an iterator from the `train_data_source` which returns client\n # data used during training.\n train_data_iterator = train_data_source.iterator()\n\n # Train `state` for some number of rounds. Both `state` and `start_round`\n # are inputs to this loop and are saved using the `program_state_manager`.\n # This means that if there is a failure during training, previously trained\n # rounds will be skipped.\n for round_num in range(start_round, total_rounds + 1):\n\n # Run one round of training.\n train_data = train_data_iterator.select(num_clients)\n state, metrics = train(state, train_data)\n\n # Release the training metrics.\n if train_metrics_manager is not None:\n _, metrics_type = train.type_signature.result # pytype: disable=attribute-error\n metrics_type = metrics_type.member\n task_group.create_task(\n train_metrics_manager.release(metrics, metrics_type, round_num)\n )\n\n # Save the current program state.\n if program_state_manager is not None:\n program_state = _ProgramState(state, round_num)\n version = version + 1\n task_group.create_task(\n program_state_manager.save(program_state, version)\n )\n\n # Run one round of evaluation. This is similar to running one round of\n # training above, except using the `evaluation` computation and the\n # `evaluation_data_source`.\n evaluation_data_iterator = evaluation_data_source.iterator()\n evaluation_data = evaluation_data_iterator.select(num_clients)\n evaluation_metrics = evaluation(state, evaluation_data)\n\n # Release the evaluation metrics.\n if evaluation_metrics_manager is not None:\n evaluation_metrics_type = evaluation.type_signature.result.member # pytype: disable=attribute-error\n task_group.create_task(\n evaluation_metrics_manager.release(\n evaluation_metrics, evaluation_metrics_type, total_rounds + 1\n )\n )\n\n # Release the model output.\n if model_output_manager is not None:\n _, state_type = train.type_signature.result # pytype: disable=attribute-error\n state_type = state_type.member\n task_group.create_task(\n model_output_manager.release(state, state_type, None)\n )",
"def load_model(self, import_fn, map_location='cuda'):\n loaded_state_dict = torch.load(import_fn, map_location=map_location)\n self.ae.load_state_dict(loaded_state_dict)",
"def __infer_existing_tensors(self, F) -> None:\n for attr_name, types_with_attr in F.get_feature_list().items():\n for vt in types_with_attr:\n attr_dtype = F.get_data(np.array([0]), vt, attr_name).dtype\n self.create_named_tensor(\n attr_name=attr_name,\n properties=None,\n vertex_type=vt,\n dtype=attr_dtype,\n )",
"def load_weights(self):\n #TODO: Move the weights file selection to main.py\n self.agent1.actor_local.load_state_dict(torch.load('results/results_7/agent1_actor.pth'))\n self.agent1.critic_local.load_state_dict(torch.load('results/results_7/agent1_critic.pth'))\n\n self.agent2.actor_local.load_state_dict(torch.load('results/results_7/agent2_actor.pth'))\n self.agent2.critic_local.load_state_dict(torch.load('results/results_7/agent2_critic.pth'))",
"def main():\n\n # Load model\n num_classes = 365\n model = models.resnet18(num_classes=num_classes)\n model.load_state_dict(torch.load(model_path)['state_dict'])\n model.eval()\n\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() \n else \"cpu\"\n )\n\n model.to(device)\n\n # Create dataloaders with paths\n original_classes_dataloader = load_data_with_paths(original_classes_datadir)\n new_classes_dataloader = load_data_with_paths(new_classes_datadir)\n\n # Extract feature activations\n original_classnames, original_labels, original_features, original_paths = get_features_with_paths(device, model, original_classes_dataloader)\n\n # Save\n new_classnames, new_classes_labels, new_classes_features, new_classes_paths = get_features_with_paths(device, model, new_classes_dataloader)\n\n np.savez(\n 'test_features',\n #'places_features',\n original_classnames=original_classnames,\n original_labels=original_labels,\n original_feature=original_features,\n original_paths=original_paths,\n new_classnames=new_classnames,\n new_classes_labels=new_classes_labels,\n new_classes_features=new_classes_features,\n new_classes_paths=new_classes_paths\n )\n print('Done')",
"def load_dataset(self):",
"def load_embeddings(self):\n\n path = os.path.join(self.train_path, 'char-CNN-RNN-embeddings.pickle')\n file = open(path, 'rb')\n embeddings = pickle.load(file, encoding = 'iso-8859-1')\n embeddings = np.array(embeddings)\n #embeddings = torch.from_numpy(embeddings)\n #embeddings = embeddings.to(device)\n self.embeddings = embeddings\n print('Embeddings load for {} files'.format(embeddings.shape[0]))\n print('Each file consists of {} embeddings of size {}'.format(embeddings.shape[1], embeddings.shape[2]))\n file.close()",
"def load_data():\n\n # Get the data.\n train_data_filename = maybe_download('train-images-idx3-ubyte.gz')\n train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')\n test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')\n test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')\n\n # Extract it into numpy arrays.\n train_data = extract_data(train_data_filename, FLAGS.train_size + FLAGS.validation_size)\n train_labels = extract_labels(train_labels_filename, FLAGS.train_size + FLAGS.validation_size)\n test_data = extract_data(test_data_filename, FLAGS.test_size)\n test_labels = extract_labels(test_labels_filename, FLAGS.test_size)\n\n validation_data = train_data[:FLAGS.validation_size, ...]\n validation_labels = train_labels[:FLAGS.validation_size]\n train_data = train_data[FLAGS.validation_size:, ...]\n train_labels = train_labels[FLAGS.validation_size:]\n\n return train_data, train_labels, validation_data, validation_labels, test_data, test_labels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load all of the tensors required to begin federated learning.
|
def _load_initial_tensors_from_dict(self, tensor_dict):
tensor_key_dict = {
TensorKey(k, self.uuid, self.round_number, False, ('model',)):
v for k, v in tensor_dict.items()
}
# all initial model tensors are loaded here
self.tensor_db.cache_tensor(tensor_key_dict)
self.logger.debug(f'This is the initial tensor_db: {self.tensor_db}')
|
[
"def _load_initial_tensors(self):\n tensor_dict, round_number = utils.deconstruct_model_proto(\n self.model, compression_pipeline=self.compression_pipeline)\n\n if round_number > self.round_number:\n self.logger.info(\n f'Starting training from round {round_number} of previously saved model'\n )\n self.round_number = round_number\n tensor_key_dict = {\n TensorKey(k, self.uuid, self.round_number, False, ('model',)):\n v for k, v in tensor_dict.items()\n }\n # all initial model tensors are loaded here\n self.tensor_db.cache_tensor(tensor_key_dict)\n self.logger.debug(f'This is the initial tensor_db: {self.tensor_db}')",
"def load_models(self):\n load_path, self.iteration = self.get_latest_checkpoint(\n return_iteration=True\n )\n self.q0_net.load(os.path.join(\n load_path, \"Q0_net_state_dict\"\n ))\n self.q1_net.load(os.path.join(\n load_path, \"Q1_net_state_dict\"\n ))\n self.q0_target_net.load(os.path.join(\n load_path, \"Q0_target_net_state_dict\"\n ))\n self.q1_target_net.load(os.path.join(\n load_path, \"Q1_target_net_state_dict\"\n ))\n self.pi_net.load(os.path.join(\n load_path, \"Policy_net_state_dict\"\n ))\n self.q0_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Q0_optimizer_state_dict\"\n )))\n self.q1_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Q1_optimizer_state_dict\"\n )))\n self.pi_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Policy_optimizer_state_dict\"\n )))\n self.alpha_optim.load_state_dict(torch.load(os.path.join(\n load_path, \"Alpha_optimizer_state_dict\"\n )))\n self.log_alpha = torch.load(os.path.join(\n load_path,\n \"Ln_Entropy_Coefficient\"\n ))\n self.replay_buffer.load(os.path.join(\n load_path,\n \"Replay_Buffer_data\"\n ))",
"def load_tensors(self, graph, update_cost=False):\n # Input tensors\n self.x_tf = graph.get_tensor_by_name(\"x_tf:0\")\n self.y_tf = graph.get_tensor_by_name(\"y_tf:0\")\n self.w_tf = graph.get_tensor_by_name(\"w_tf:0\")\n\n # Tensors for training and prediction.\n self.learn_rate_tf = graph.get_tensor_by_name(\"learn_rate_tf:0\")\n self.keep_prob_tf = graph.get_tensor_by_name(\"keep_prob_tf:0\")\n self.train_step_tf = graph.get_operation_by_name('train_step_tf')\n self.logits_tf = graph.get_tensor_by_name(\"logits_tf:0\")\n if not update_cost:\n self.loss_tf = graph.get_tensor_by_name('loss_tf:0')\n else:\n self.loss_tf = self.loss_tensor()\n self.training_tf = graph.get_tensor_by_name(\"training_tf:0\")\n self.extra_update_ops_tf = tf.get_collection(tf.GraphKeys.UPDATE_OPS)",
"def load_all_images(self):\n self._load_train_images()\n self.load_test_images()",
"def _load_tensors(self, graph): \n if len(graph.edges) == 0:\n return True\n\n # Dictionary to store source nodes and destination nodes of edges\n # Also ensures only traversable edges are pushed to database\n to_nodes = dict()\n from_nodes = dict()\n\n # Populating to_nodes and from_nodes using BFS\n adj_list = graph.adj_list\n start_node_indices = graph.start_node_indices\n\n # Queue and visit status of nodes for BFS\n queue = Queue()\n vis = [False] * len(graph.nodes)\n\n for start_node_index in start_node_indices:\n if not vis[start_node_index]:\n vis[start_node_index] = True\n queue.put(start_node_index)\n\n # BFS\n while not queue.empty():\n src_node_index = queue.get()\n\n if src_node_index not in adj_list:\n continue\n\n for [edge_index, dest_node_index] in adj_list[src_node_index]:\n\n if edge_index not in from_nodes:\n from_nodes.update({edge_index : set()})\n \n from_nodes[edge_index].add(src_node_index + 1)\n\n if edge_index not in to_nodes:\n to_nodes.update({edge_index : set()})\n\n to_nodes[edge_index].add(dest_node_index + 1)\n\n if not vis[dest_node_index]:\n vis[dest_node_index] = True\n queue.put(dest_node_index)\n\n # Edge attributes that are not pushed to the database\n NOT_STORED_ATTR = ['label', 'value'] \n\n try:\n # Surrogate Id for tensors\n tensor_id = 0\n\n # Number of mutations per row is the number of attributes being \n # pushed to database.\n # 4 additional attributes, 'model_name', 'tensor_id', \n # 'from_operator_ids' and 'to_operator_ids', present in db other \n # than the class attributes.\n num_attributes = len(vars(graph.edges[0])) + 4\n\n # Number of nodes to be processed per batch i.e.\n # floor(max mutations per batch / number of mutations per row)\n num_edges_per_batch = self._MAX_MUTATIONS // num_attributes\n \n edge_indices = list(to_nodes.keys())\n num_edges = len(edge_indices)\n\n # TO-DO : Add retry logic if a batch fails\n while tensor_id < num_edges:\n with self.database.batch() as batch:\n for _ in range(num_edges_per_batch):\n if tensor_id == num_edges:\n break\n\n edge_index = edge_indices[tensor_id]\n edge = graph.edges[edge_index]\n to_operator_ids = list(to_nodes[edge_index])\n from_operator_ids = list(from_nodes[edge_index])\n\n # To store the database column names and their \n # values to be inserted\n column_names = [\n 'model_name', 'tensor_id', \n 'from_operator_ids', 'to_operator_ids'\n ]\n values = [\n graph.model_name, tensor_id + 1, \n from_operator_ids, to_operator_ids\n ]\n\n attrs = vars(edge)\n for item in attrs.items():\n if item[0] not in NOT_STORED_ATTR:\n column_names.append(item[0])\n values.append(item[1])\n\n column_names = tuple(column_names)\n values = tuple(values)\n\n batch.insert(\n table = 'Tensors',\n columns = column_names,\n values = [values]\n )\n tensor_id += 1\n return True\n except Exception as e:\n print(e)\n query = \"DELETE FROM Models WHERE model_name = \\'\" + graph.model_name + \"\\'\"\n deleted_rows = self.database.execute_partitioned_dml(\n query\n )\n return False",
"def _load_base(self):\n\n # Check if pre-computed \"tables\" exist for faster loading\n fn_prestored = os.path.join(self.path, '__prestored')\n if os.path.isdir(fn_prestored):\n try:\n self.entity2idx = common.json_load(\n os.path.join(fn_prestored, 'entity2idx.json'))\n self.rel2idx = common.json_load(\n os.path.join(fn_prestored, 'rel2idx.json'))\n self.train_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'train_set.json'))]\n self.test_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'test_set.json'))]\n self.valid_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'valid_set.json'))]\n except FileExistsError as e:\n print(e)\n else:\n # load each data_type in order\n\n data = {\n \"train\": list(self._load_data_file(\"train\")),\n \"valid\": list(self._load_data_file(\"valid\")),\n \"test\": list(self._load_data_file(\"test\")),\n }\n\n # Needs to be done over all datasets, as there are some defective\n # datasets like WN18RR or Yago3-10\n self._generate_unique_ids(\n data[\"train\"][0] + data[\"valid\"][0] + data[\"test\"][0],\n data[\"train\"][1] + data[\"valid\"][1] + data[\"test\"][1],\n data[\"train\"][2] + data[\"valid\"][2] + data[\"test\"][2])\n\n for data_type in [\"train\", \"test\", \"valid\"]:\n heads, rels, tails = data[data_type]\n\n if data_type == \"train\":\n self.train_set, self.train_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.train_oog:\n print(self.train_oog)\n elif data_type == \"test\":\n self.test_set, self.test_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.test_oog:\n print(self.test_oog)\n elif data_type == \"valid\":\n self.valid_set, self.valid_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.valid_oog:\n print(self.valid_oog)\n\n # print(\"If the list are not empty, something is wrong with the data:\", train_oog, valid_oog, test_oog)\n\n # Create folder and dump generated files to preloading\n common.mkdir_p(fn_prestored)\n common.json_dump(os.path.join(fn_prestored, 'entity2idx.json'),\n self.entity2idx)\n common.json_dump(os.path.join(fn_prestored, 'rel2idx.json'),\n self.rel2idx)\n common.json_dump(os.path.join(fn_prestored, 'train_set.json'),\n self.train_set)\n common.json_dump(os.path.join(fn_prestored, 'test_set.json'),\n self.test_set)\n common.json_dump(os.path.join(fn_prestored, 'valid_set.json'),\n self.valid_set)\n\n # For easier access and checking if other data types are added\n self.data_type2array = {\"train\": self.train_set,\n \"test\": self.test_set,\n \"valid\": self.valid_set}\n\n # Set some useful variables\n self.n_entities = len(self.entity2idx)\n self.n_relations = len(self.rel2idx)\n self.number_of_entries = {\"train\": len(self.train_set),\n \"test\": len(self.test_set),\n \"valid\": len(self.valid_set)}",
"def __init_tensors(self, im_shape):\n self.__init_tensor_register()\n self.__init_input(im_shape)",
"def loadStateAndData(self):\n\n with ThreadPoolExecutor(max_workers=2) as e:\n e.submit(self.loadState, 'net6.pickle')\n e.submit(self.loadData, reshape=True)",
"def __init_tensor_register(self):\n self.tensors = dict()",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = zip(images, labels)",
"async def train_federated_model(\n *,\n initialize: tff.Computation,\n train: tff.Computation,\n train_data_source: tff.program.FederatedDataSource,\n evaluation: tff.Computation,\n evaluation_data_source: tff.program.FederatedDataSource,\n total_rounds: int,\n num_clients: int,\n train_metrics_manager: Optional[\n tff.program.ReleaseManager[tff.program.ReleasableStructure, int]\n ] = None,\n evaluation_metrics_manager: Optional[\n tff.program.ReleaseManager[tff.program.ReleasableStructure, int]\n ] = None,\n model_output_manager: Optional[\n tff.program.ReleaseManager[\n tff.program.ReleasableStructure, Optional[object]\n ]\n ] = None,\n program_state_manager: Optional[\n tff.program.ProgramStateManager[tff.program.ProgramStateStructure]\n ] = None,\n) -> None:\n tff.program.check_in_federated_context()\n _check_expected_type_signatures(\n initialize=initialize,\n train=train,\n train_data_source=train_data_source,\n evaluation=evaluation,\n evaluation_data_source=evaluation_data_source,\n )\n\n # Cast the `program_state_manager` to a more specific type: a manager that\n # loads and saves `_ProgramState`s instead of a manager that loads and saves\n # `tff.program.ProgramStateStructure`s. This allows the program logic to:\n # * Keep `_ProgramState` private.\n # * Have static typing within the program logic.\n # * Require callers to provide a `program_state_manager` capable of handling\n # any `tff.program.ProgramStateStructure`.\n program_state_manager = typing.cast(\n Optional[tff.program.ProgramStateManager[_ProgramState]],\n program_state_manager,\n )\n\n initial_state = initialize()\n\n # Try to load the latest program state. If the program logic failed on a\n # previous run, this program state can be used to restore the execution of\n # this program logic and skip unnecessary steps.\n if program_state_manager is not None:\n initial_state = await tff.program.materialize_value(initial_state)\n structure = _ProgramState(initial_state, round_num=0)\n program_state, version = await program_state_manager.load_latest(structure)\n\n # TODO: b/271445312 - Cast `program_state` to `_ProgramState`. `TypeVar`s\n # are lost from async function signatures.\n program_state = typing.cast(_ProgramState, program_state)\n else:\n program_state = None\n version = 0\n\n # Assign the inputs to the program logic using the loaded program state if\n # available or the initialized state.\n if program_state is not None:\n state = program_state.state\n start_round = program_state.round_num + 1\n else:\n state = initial_state\n start_round = 1\n\n # Construct a async context manager to group and run tasks concurrently.\n # Program logic will release values and save program state, these functions\n # are asynchronous and can be run concurrently. However, it is possible to\n # schedule these functions differently using\n # [asyncio](https://docs.python.org/3/library/asyncio.html).\n async with _TaskGroup() as task_group:\n # Construct an iterator from the `train_data_source` which returns client\n # data used during training.\n train_data_iterator = train_data_source.iterator()\n\n # Train `state` for some number of rounds. Both `state` and `start_round`\n # are inputs to this loop and are saved using the `program_state_manager`.\n # This means that if there is a failure during training, previously trained\n # rounds will be skipped.\n for round_num in range(start_round, total_rounds + 1):\n\n # Run one round of training.\n train_data = train_data_iterator.select(num_clients)\n state, metrics = train(state, train_data)\n\n # Release the training metrics.\n if train_metrics_manager is not None:\n _, metrics_type = train.type_signature.result # pytype: disable=attribute-error\n metrics_type = metrics_type.member\n task_group.create_task(\n train_metrics_manager.release(metrics, metrics_type, round_num)\n )\n\n # Save the current program state.\n if program_state_manager is not None:\n program_state = _ProgramState(state, round_num)\n version = version + 1\n task_group.create_task(\n program_state_manager.save(program_state, version)\n )\n\n # Run one round of evaluation. This is similar to running one round of\n # training above, except using the `evaluation` computation and the\n # `evaluation_data_source`.\n evaluation_data_iterator = evaluation_data_source.iterator()\n evaluation_data = evaluation_data_iterator.select(num_clients)\n evaluation_metrics = evaluation(state, evaluation_data)\n\n # Release the evaluation metrics.\n if evaluation_metrics_manager is not None:\n evaluation_metrics_type = evaluation.type_signature.result.member # pytype: disable=attribute-error\n task_group.create_task(\n evaluation_metrics_manager.release(\n evaluation_metrics, evaluation_metrics_type, total_rounds + 1\n )\n )\n\n # Release the model output.\n if model_output_manager is not None:\n _, state_type = train.type_signature.result # pytype: disable=attribute-error\n state_type = state_type.member\n task_group.create_task(\n model_output_manager.release(state, state_type, None)\n )",
"def load_model(self, import_fn, map_location='cuda'):\n loaded_state_dict = torch.load(import_fn, map_location=map_location)\n self.ae.load_state_dict(loaded_state_dict)",
"def __infer_existing_tensors(self, F) -> None:\n for attr_name, types_with_attr in F.get_feature_list().items():\n for vt in types_with_attr:\n attr_dtype = F.get_data(np.array([0]), vt, attr_name).dtype\n self.create_named_tensor(\n attr_name=attr_name,\n properties=None,\n vertex_type=vt,\n dtype=attr_dtype,\n )",
"def load_weights(self):\n #TODO: Move the weights file selection to main.py\n self.agent1.actor_local.load_state_dict(torch.load('results/results_7/agent1_actor.pth'))\n self.agent1.critic_local.load_state_dict(torch.load('results/results_7/agent1_critic.pth'))\n\n self.agent2.actor_local.load_state_dict(torch.load('results/results_7/agent2_actor.pth'))\n self.agent2.critic_local.load_state_dict(torch.load('results/results_7/agent2_critic.pth'))",
"def main():\n\n # Load model\n num_classes = 365\n model = models.resnet18(num_classes=num_classes)\n model.load_state_dict(torch.load(model_path)['state_dict'])\n model.eval()\n\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() \n else \"cpu\"\n )\n\n model.to(device)\n\n # Create dataloaders with paths\n original_classes_dataloader = load_data_with_paths(original_classes_datadir)\n new_classes_dataloader = load_data_with_paths(new_classes_datadir)\n\n # Extract feature activations\n original_classnames, original_labels, original_features, original_paths = get_features_with_paths(device, model, original_classes_dataloader)\n\n # Save\n new_classnames, new_classes_labels, new_classes_features, new_classes_paths = get_features_with_paths(device, model, new_classes_dataloader)\n\n np.savez(\n 'test_features',\n #'places_features',\n original_classnames=original_classnames,\n original_labels=original_labels,\n original_feature=original_features,\n original_paths=original_paths,\n new_classnames=new_classnames,\n new_classes_labels=new_classes_labels,\n new_classes_features=new_classes_features,\n new_classes_paths=new_classes_paths\n )\n print('Done')",
"def load_dataset(self):",
"def load_embeddings(self):\n\n path = os.path.join(self.train_path, 'char-CNN-RNN-embeddings.pickle')\n file = open(path, 'rb')\n embeddings = pickle.load(file, encoding = 'iso-8859-1')\n embeddings = np.array(embeddings)\n #embeddings = torch.from_numpy(embeddings)\n #embeddings = embeddings.to(device)\n self.embeddings = embeddings\n print('Embeddings load for {} files'.format(embeddings.shape[0]))\n print('Each file consists of {} embeddings of size {}'.format(embeddings.shape[1], embeddings.shape[2]))\n file.close()",
"def load_data():\n\n # Get the data.\n train_data_filename = maybe_download('train-images-idx3-ubyte.gz')\n train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')\n test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')\n test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')\n\n # Extract it into numpy arrays.\n train_data = extract_data(train_data_filename, FLAGS.train_size + FLAGS.validation_size)\n train_labels = extract_labels(train_labels_filename, FLAGS.train_size + FLAGS.validation_size)\n test_data = extract_data(test_data_filename, FLAGS.test_size)\n test_labels = extract_labels(test_labels_filename, FLAGS.test_size)\n\n validation_data = train_data[:FLAGS.validation_size, ...]\n validation_labels = train_labels[:FLAGS.validation_size]\n train_data = train_data[FLAGS.validation_size:, ...]\n train_labels = train_labels[FLAGS.validation_size:]\n\n return train_data, train_labels, validation_data, validation_labels, test_data, test_labels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determine if the collaborator certificate and ID are valid for this federation.
|
def valid_collaborator_cn_and_id(self, cert_common_name,
collaborator_common_name):
# if self.test_mode_whitelist is None, then the common_name must
# match collaborator_common_name and be in authorized_cols
# FIXME: '' instead of None is just for protobuf compatibility.
# Cleaner solution?
if self.single_col_cert_common_name == '':
return (cert_common_name == collaborator_common_name
and collaborator_common_name in self.authorized_cols)
# otherwise, common_name must be in whitelist and
# collaborator_common_name must be in authorized_cols
else:
return (cert_common_name == self.single_col_cert_common_name
and collaborator_common_name in self.authorized_cols)
|
[
"def check_id(self):\n\n is_file = os.path.isfile(self.id_path)\n is_valid = self.validate_id_file()\n return bool(is_file and is_valid)",
"def check_cid(self) -> bool:\n return True",
"def is_valid(self):\n return self.identifier is not None and self.identifier != \"\"",
"def is_valid(self) -> bool:\n if not (0 < self.id64 < 2**64):\n return False # this shouldn't ever happen unless someone messes around with id64 but w/e\n if not (Universe.Invalid < self.universe <= Universe.Dev):\n return False\n if not (Type.Invalid < self.type <= Type.AnonUser):\n return False\n\n match self.type:\n case Type.Individual:\n return self.id != 0 and Instance.All <= self.instance <= Instance.Web\n case Type.Clan:\n return self.id != 0 and self.instance == Instance.All\n case Type.GameServer:\n return self.id != 0\n return True",
"def _kc_ident_in_resource(self):\n kc_sys_ids = [\n ident for ident in self.resource.get('identifier', []) if\n ident['system'] == self.user.kc_identifier_system]\n if not kc_sys_ids:\n return False\n if len(kc_sys_ids) != 1:\n raise ValueError(\n \"unexpected multiple KC identifiers on Patient \"\n f\"{self.resource['id']}\")\n result = kc_sys_ids[0]['value'] == self.user.kc_identifier_value\n # Cache internals in self.user if this happens to be the owners\n if result:\n self.user.extract_internals()\n return result",
"def _is_data_valid(self, request):\n course_id = request.data.get('course_id')\n\n if not course_id:\n return False, None, 'Field course_id is missing.'\n\n try:\n course_key = CourseKey.from_string(course_id)\n courses.get_course(course_key)\n except (InvalidKeyError, ValueError) as ex:\n log.exception('Unable to locate course matching %s.', course_id)\n return False, None, str(ex)\n\n return True, course_key, None",
"def _intermediary_account_exists(self):\n party_details = get_counterpartys_intermediary_details(self.acm_obj)\n if party_details.get('NAME'):\n return True\n return False",
"def _isMemberIdValid(self, verifiedMemberId: int) -> bool:\n return verifiedMemberId is not None and verifiedMemberId == self._memberId",
"def verified(presentation: Presentation, rule: Rule) -> bool:\n principals = []\n for c in presentation.credentials:\n try:\n ok, principal = validate_chain(c)\n assert ok\n principals.append(principal)\n except:\n return False\n return satisfies(principals, rule)",
"def is_proposal_tac_member(self, proposal_code):\n\n return (\n len(\n set(self._tac_member_partners).intersection(\n self._proposal_partners(proposal_code)\n )\n )\n > 0\n )",
"def validate_cert_chain(self):\r\n\r\n\t\tchain = self.trusting_chain\r\n\t\tif len(self.trusting_chain) <= 1:\r\n\t\t\treturn False \r\n\t\tfor i in range(0, len(chain) - 1):\r\n\r\n\t\t\tif not self.validate_certificate(chain[i]):\r\n\t\t\t\treturn False\r\n\r\n\t\t\t#verifies if the signatures are valid \r\n\t\t\tif not self.validate_signature(chain[i+1], chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\t# verifies if the certificate is not on a CRL \r\n\t\t\tif not self.crl_validation(chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\treturn True",
"def validate_id_file(self):\n\n try:\n f_id = open(self.id_path, \"r\")\n except IOError:\n return False\n\n is_valid = bool(re.search(get_id_re(), f_id.read()))\n\n f_id.close()\n\n return is_valid",
"def verify_identity(self):\n global static_model\n if self.identity == None:\n return False\n if isinstance(self.identity, str):\n if len(self.identity) > 0:\n for pen in static_model.available_pens:\n if self.identity.upper() == pen.identity.upper():\n return True\n return False",
"def has_identity(self):\n return self.identity_group_ref is None or self.identity is not None",
"def certificateExists(self, id):\n# _log.debug(\"certificateExist\")\n# return digest(\"{}cert\".format(id)) in self.storage\n return cert_key_from_id(id) in self.storage",
"def is_current_collaborator(self):\n latest_position = self.get_latest_position()\n if latest_position is not None:\n # print('Checkpoint 1: ' + str(latest_position.is_current_collaborator()))\n return latest_position.is_current_collaborator()\n else:\n return False",
"def _is_ccx_course(course_key):\n return hasattr(course_key, 'ccx')",
"def is_tac_member(self, partner_code=None):\n\n if not partner_code:\n return len(self._tac_member_partners)\n\n return partner_code in self._tac_member_partners",
"def is_valid(self) -> bool:\n polygon = self.polygon\n return self.name is not None and polygon.is_valid and not polygon.interiors"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.