query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Get the image size the highest magnification image would have to be resized to get an equivalent magnification
def get_size_for_mag(slide, mag): max_size = slide.dimensions max_mag = highest_mag(slide) downsample = max_mag/mag return [np.int(np.round(dim/downsample)) for dim in max_size]
[ "def _get_scaled_image_size(self):\n unscaled_size = self._get_original_image_size()\n return (unscaled_size[0] * self.scale, unscaled_size[1] * self.scale)", "def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)", "def get_resized_size(self):\r\n f = self.fmt\r\n iw, ih = self.image.size\r\n\r\n if not f.stretch and iw <= self.fw and ih <= self.fh:\r\n return\r\n\r\n if self.image_ratio == self.format_ratio:\r\n # same ratio, just resize\r\n return (self.fw, self.fh)\r\n\r\n elif self.image_ratio < self.format_ratio:\r\n # image taller than format\r\n return (self.fh * iw / ih, self.fh)\r\n\r\n else: # self.image_ratio > self.format_ratio\r\n # image wider than format\r\n return (self.fw, self.fw * ih / iw)", "def get_image_size(self):", "def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio", "def _image_resolution(image_filename):\n img = mpimg.imread(image_filename)\n return img.shape", "def _get_max_scale(self) -> int:", "def calculate_thumb_size(max_size, image_size):\n if image_size[0] < max_size[0] and image_size[1] < max_size[1]:\n return image_size\n ratio_width = image_size[0]/max_size[0]\n ratio_heigh = image_size[1]/max_size[1]\n if ratio_width > ratio_heigh:\n w = max_size[0]\n h = image_size[1] / ratio_width\n else:\n h = max_size[1]\n w = image_size[0] / ratio_heigh\n return int(w), int(h)", "def get_max_size(self) -> float:\n return max(self.gt[1] * self.nx, self.gt[5] * self.ny)", "def get_target_scale(im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def get_image_size(self):\n return self.image_size if hasattr(self, 'image_size') else None", "def get_pixel_size(self):\n current_mag = self.get_mag()\n current_frame_size_selector = self.get_frame_size_selector()\n return self.MAG_PX_SIZE_FACTOR / (current_mag\n * self.STORE_RES[current_frame_size_selector][0])", "def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale", "def get_pix_size(self):\n aperture_dict = self.get_aperture_pars()\n pix_size = aperture_dict['pix']\n return pix_size", "def getSizePix(self):\r\n if 'sizePix' in self.currentCalib:\r\n return self.currentCalib['sizePix']\r\n else:\r\n return None", "def _get_extended_image_size(height, width, patch_size, stride):\n\n ext_height, ext_width = 0, 0\n\n def sliding_distance(n_windows, window_size, stride):\n return window_size * n_windows - (window_size - stride) * (n_windows - 1)\n\n if height < patch_size:\n ext_height = patch_size\n else:\n for n in range(height):\n distance = sliding_distance(n, patch_size, stride)\n if distance > height:\n ext_height = distance\n break\n\n if width < patch_size:\n ext_width = patch_size\n else:\n for n in range(width):\n distance = sliding_distance(n, patch_size, stride)\n if distance > width:\n ext_width = distance\n break\n\n return ext_height, ext_width", "def DoGetBestSize(self):\n return wx.Size(self._bitmap.GetWidth(), self._bitmap.GetHeight())", "def GetBestSize(self):\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds 5 latest blog posts as `latest_articles`, 5 latest comments as `latest_comments`, and all tags (annotated with `num_articles` field) as `tags` to the context, regardless of `request`.
def latest_content(request): latest_articles = Article.published_articles()[:5] latest_comments = Comment.objects.all().order_by('-pub_date')[:5] tags = Tag.objects.annotate(num_articles=Count('article')).order_by( '-num_articles') contributors = Contributor.objects.annotate( num_articles=Count('article')).order_by('-num_articles') return {'latest_articles': latest_articles, 'latest_comments': latest_comments, 'tags': tags, 'contributors': contributors, }
[ "def recent_blogs_view(self, request):\n\n return self.render(\n request,\n context_overrides={\n \"title\": \"Latest News\",\n \"blogs\": self.recent_blogs(),\n },\n )", "def last_five(request):\n flag_five = True\n topics = (\n request.db[\"topic\"].find().sort([(\"$natural\", -1), (\"topic_date\", -1)]).limit(5)\n )\n\n return render_to_response(\n \"templates/home.html\",\n {\"topics\": topics, \"flag_five\": flag_five, \"count\": count(request)},\n request=request,\n )", "def get_recent_posts(self, request, count):\n if request.has_permission('edit'):\n return DBSession.query(Post).filter_by(blog=self).order_by('created desc').slice(0, count).all()\n else:\n return DBSession.query(Post).filter_by(blog=self, published=True).order_by('created desc').slice(0, count).all()", "def forum_latest_posts(parser, token):\n bits = token.contents.split()\n if len(bits) not in (1, 2, 4):\n raise TemplateSyntaxError('%s tag requires none, one or three arguments' % bits[0])\n if bits[2] != 'as':\n raise TemplateSyntaxError(\"Second argument to %s tag must be 'as'\" % bits[0])\n if not bits[1]:\n bits[1] = 5 # Default number of items\n if not bits[3]:\n bits[3] = 'latest_posts'\n return ForumLatestPostsNode(bits[1], bits[3])", "def do_get_latest_blog_posts(parser, token):\n bits = token.contents.split()\n if len(bits) != 4:\n raise template.TemplateSyntaxError, \"'%s' tag takes three arguments\" % bits[0]\n if bits[2] != 'as':\n raise template.TemplateSyntaxError, \"Second argument to '%s' tag must be 'as'\" % bits[0]\n return LatestBlogPostsNode(bits[1], bits[3])", "def latest(request):\n post_list = Post.objects.exclude(hidden = True).order_by('-created')\n paginator = Paginator(post_list, 10)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n try:\n posts = paginator.page(page)\n except EmptyPage, InvalidPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/latest.html', {'posts': posts},\n context_instance = RequestContext(request))", "def get_context_data(self, **kwargs):\n import math\n context = super(ThreadView, self).get_context_data(**kwargs)\n page = self.request.GET.get('page')\n comments = Paginator(self.object.comments.all().order_by(\n 'date_created'), 5)\n comments_count = self.object.comments.count()\n calc = (comments_count + 1) / 5\n next_page = math.ceil(calc)\n context['all_threads_on_forum'] = Thread.objects.all()\n context['all_comments_on_forum'] = Comment.objects.all()\n context['comments'] = comments.get_page(page)\n context['comments_count'] = comments_count\n context['next_page'] = next_page\n return context", "def index(request, archive=False):\n context = {'archive':archive}\n posts = Post.objects.all()\n if not archive:\n posts = posts[:10]\n context['posts'] = posts\n if request.user.is_authenticated():\n #These are the new news items the logged in user has\n context['new_posts'] = NewBlog.objects.filter(user=request.user)\n return render(request, 'blog/index.html', context)", "def recent_comments(context):\n latest = context[\"settings\"].COMMENTS_NUM_LATEST\n comments = ThreadedComment.objects.all().select_related(\"user\")\n context[\"comments\"] = comments.order_by(\"-id\")[:latest]\n return context", "def recent_reviews(context):\n comments = []\n latest = context[\"settings\"].COMMENTS_NUM_LATEST\n comments_queryset = ThreadedComment.objects.all()\n for comment in comments_queryset.select_related(\"user\"):\n if isinstance(comment.content_object, BlogPost):\n comments.append(comment) \n context[\"comments\"] = comments\n return context", "def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]", "def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))", "def forum_latest_user_posts(parser, token):\n bits = token.contents.split()\n if len(bits) not in (2, 3, 5):\n raise TemplateSyntaxError('%s tag requires one, two or four arguments' % bits[0])\n if bits[3] != 'as':\n raise TemplateSyntaxError(\"Second argument to %s tag must be 'as'\" % bits[0])\n if not bits[2]:\n bits[2] = 5 # Default number of items\n if not bits[3]:\n bits[4] = 'latest_user_posts'\n return ForumLatestUserPostsNode(bits[1], bits[2], bits[4])", "def aboutPage(request):\n\n\t#collect 3 latest blogs and display them on the page\n\tlast_blog_0 = Blogs.objects.all().reverse()[0]\n\tlast_blog_1 = Blogs.objects.all().reverse()[1]\n\tlast_blog_2 = Blogs.objects.all().reverse()[2]\n\n\t#context dictionary to render the latest blog on the page\n\tcontext = {\n\t'last_blog_0':last_blog_0,\n\t'last_blog_1':last_blog_1,\n\t'last_blog_2':last_blog_2\n\t}\n\n\treturn render(request, 'about.html', context)", "def home(request):\n articles = Article.objects.all()\n return render(request, 'blog/home.html', {'last_articles': articles})", "def index(request):\n # compute last week\n last_week = timezone.now() - timezone.timedelta(7)\n # get post with most votes\n posts = Post.objects.filter(created__gte=last_week).annotate(count=Count('users_votes')).order_by('-count')\n paginator = Paginator(posts, 5)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/index.html', {'posts': posts})", "def get_recent_posts(self, numposts=10, blogid=1):\n return self.execute('metaWeblog.getRecentPosts', blogid, self.username, self.password, numposts)", "def blog(request):\n posts = Post.objects.all().order_by('date_added')\n\n posts_paginator = Paginator(posts, 5)\n page_num = request.GET.get('page')\n page = posts_paginator.get_page(page_num)\n\n template = 'blog/blog.html'\n context = {\n 'page': page,\n 'posts': posts,\n }\n\n return render(request, template, context)", "def index(request):\n\n ## Obtemos solo los objetos que cumplen dicha condicion\n ## Que estan activos\n query_blog = Blog.objects.all()\n enable_list = query_blog.filter(enable = True)\n post_list = []\n for blog_config in enable_list:\n post_config = feedparser.parse(blog_config.feed)\n for post in post_config['entries']:\n date = calendar.timegm(post.published_parsed)\n post_list.append({'date':date, 'title':post.title,\n 'date_format':post.published, 'content':post.content[0].value,\n 'link':post.link, 'author':blog_config.author})\n list_aux = [(dict_[\"date\"], dict_) for dict_ in post_list]\n list_aux.sort(reverse=True)\n order_post = [dist_ for (key, dist_) in list_aux]\n\n paginator = Paginator(order_post, 6)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n posts = paginator.page(paginator.num_pages)\n return render_to_response('index.html', {'enable_list' : enable_list,\n 'post_list': posts, 'page_number': range(1, paginator.num_pages+1)})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format a Roku Channel name.
def format_channel_name(channel_number: str, channel_name: str | None = None) -> str: if channel_name is not None and channel_name != "": return f"{channel_name} ({channel_number})" return channel_number
[ "def channel_name(radio_id: int, channel_id: int) -> str:\n return f\"COMM{radio_id} Ch {channel_id}\"", "def gen_channel_name_v2(shotnr: int, channel_rg: str):\n return f\"{shotnr:05d}_ch{channel_rg:s}\"", "def format_channel(discord_id: int) -> str:\n return format_discord_str(discord_id, '#')", "def channel_name(self) -> str:\n return self._channel_name", "def channel_name(self):\n\n return self.channel.name if self.channel is not None else None", "def channel_name(self):\n return self._channel_name_dict", "def set_channel_name(self, cnum=1, CNAME=\"\"):\n scpi_command = scpi_preprocess(\":CONF:CHAN{:}:NAME '{:}'\", cnum, CNAME)\n self.write(scpi_command)", "def repr_nickname(self, nickname, channel_obj):\n assert isinstance(nickname, bytes)\n if not channel_obj:\n return nickname\n # TODO: halfop and any other modes\n elif channel_obj.is_oper(nickname):\n return b'@' + nickname\n elif channel_obj.is_voiced(nickname):\n return b'+' + nickname\n return b' ' + nickname", "def channel_name(pv):\n return 'ca://' + pv", "def _get_group_channel_name(self, group):\n return f\"{self.prefix}__group__{group}\"", "def get_mangled_channel_name(self, var_name, kernel_id):\n if var_name in self.channel_mangle:\n return self.channel_mangle[var_name][kernel_id]\n else:\n return var_name", "def fmt(competitor_name: str) -> str:\n name = competitor_name.replace(\"_a\", r\" $\\alpha$ \")\n name = name.replace(\"_b\", r\" $\\beta$ \")\n return name", "def channel_string(self, pre=\"\", full=False):\n\n return \" \".join(pre+c.get_chanstr(full=full) for c in self.channels)", "def get_channel_name(self, channelid, isdm=False):\n\n if isdm:\n return channelid\n\n request = SimpleRequest(self.headers).request\n channel = request.grab_page('https://discordapp.com/api/%s/channels/%s' % (self.api, channelid))\n\n if channel is not None and len(channel) > 0:\n return '%s_%s' % (channelid, self.safe_name(channel['name']))\n\n else:\n error('Unable to fetch channel name from id, generating one instead.')\n return '%s_%s' % (channelid, random_str(12))", "def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")", "def make_channel_id(channel, scale):\n return \"{0}-{1}-{2}\".format(channel.device_gid, channel.channel_num, scale)", "def button_string(channel, red, blue):\n return 'CH{:s}_{:s}_{:s}'.format(channel, red, blue)", "def fmt_nick(nick):\n\n green = w.color(\"green\")\n reset = w.color(\"reset\")\n nick_col = w.color(w.info_get(\"irc_nick_color_name\", nick))\n\n return \"{}[{}{}{}]{}\".format(green, nick_col, nick, green, reset)", "def channel_name(self, das_name, channel):\n\tdas = self.das_config[self._fix_key(das_name)]\n\tfor key in das.keys():\n\t if key not in [ \"datapath\", 'xlabel', 'ylabel', 'name', 'status']: # list of ignored names\n\t\tif das[key] != str(channel):\n\t\t continue # skip of number of\n\t\telse:\n\t\t return key\n\treturn None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_projects() should return a list of projects.
def test_get_projects_returns_projects(fc: fetcher.Fetcher): projects = fc.get_projects() assert isinstance(projects, list) assert isinstance(projects[0], models.Project)
[ "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def get_projects(self):\n result = self.client.send_get('get_projects')\n return result", "def getProjects(self):\n getParams = {'active':1,'account__active':1}\n return self._getJSONResponse('project', getParams)", "def get_list(self):\n result = issue_request(method='GET', endpoint='account/projects', token=self.token)\n return result", "def test_list_projects(self):\n\n # list projects\n arguments, connection = self.connect_to_server()\n list_projects.main(arguments, connection)", "def get_projects():\n result = []\n projs = projects.get_projects()\n project_ids = [p.id for p in projs]\n repos_fut = projects.get_repos_async(project_ids)\n metadata_fut = projects.get_metadata_async(project_ids)\n ndb.Future.wait_all([repos_fut, metadata_fut])\n repos, metadata = repos_fut.get_result(), metadata_fut.get_result()\n for p in projs:\n repo_type, repo_url = repos.get(p.id, (None, None))\n if repo_type is None:\n # Not yet consistent.\n continue\n name = None\n if metadata.get(p.id) and metadata[p.id].name:\n name = metadata[p.id].name\n result.append(Project(\n id=p.id,\n name=name,\n repo_type=repo_type,\n repo_url=repo_url,\n ))\n return result", "def gitlab_projects(): # noqa\n try:\n if current_user.is_authenticated:\n user = _get_user_from_invenio_user(current_user.email)\n else:\n user = get_user_from_token(request.args.get('access_token'))\n secrets_store = REANAUserSecretsStore(str(user.id_))\n gitlab_token = secrets_store.get_secret_value('gitlab_access_token')\n gitlab_user = secrets_store.get_secret_value('gitlab_user')\n gitlab_url = REANA_GITLAB_URL + \\\n \"/api/v4/users/{0}/projects?access_token={1}\"\n response = requests.get(gitlab_url.format(gitlab_user, gitlab_token))\n if response.status_code == 200:\n return response.content, 200\n else:\n return jsonify({\"message\": \"Project list could not be retrieved\"}),\n response.status_code\n except ValueError:\n return jsonify({\"message\": \"Token is not valid.\"}), 403\n except Exception as e:\n logging.error(traceback.format_exc())\n return jsonify({\"message\": str(e)}), 500", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def get_projects(self) -> List['Project']:\n return self._bind(self.meta_repo.get_projects())", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def get_projects(self):\n self.cur.execute('SELECT * FROM projects;')\n projects = [Projects(row) for row in self.cur.fetchall()]\n return projects", "def test_return_project_list():\n result = False\n project_obj = prism.Projects(api_client=_api())\n\n projects = project_obj.get()\n if projects:\n result = True\n\n assert not result", "def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def test_get_projects(self):\n pass", "def test_api_projects_get(self):\n pass", "def retrieve_projects_raw(self):\n link = \"https://app.getsentry.com/api/0/projects/\"\n return self.retrieve_from_link(link)", "def fetch_projects(self, **kwargs):\n\n from github.objects import Project\n\n def map_func(data):\n return Project.from_data(data, self.http)\n\n return CollectionIterator(self.http.fetch_projectowner_projects,\n self.id, map_func=map_func, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetchet.get_projects() should be able to filter on project.
def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name): projects = fc.get_projects(test_project_name) assert isinstance(projects, list) assert len(projects) == 1 assert projects[0].name == test_project_name
[ "def test_get_projects(self):\n pass", "def test_list_project_request(self):\n pass", "def test_list_projects(self):\n pass", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def test_api_projects_get(self):\n pass", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def test_list_project(self):\n pass", "def test_projects_list(self):\n pass", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def _getsubprojects(self):\r\n result = self._session.execute(\"query -t project \\\"recursive_is_member_of('%s', none)\\\" -u -f \\\"%%objectname\\\"\" % self.objectname, ObjectListResult(self._session))\r\n return result.output", "def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)", "def get_queryset(self):\n return get_projects_for_user(self.request.user)", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def test_timeline_filters_project(self):\n with self.app.app_context():\n u = user(save=True)\n p = project(save=True)\n status(user=u, project=p, save=True)\n p2 = project(name='Test Project 2', slug='test-project-2',\n save=True)\n status(user=u, project=p2, save=True)\n\n response = self.client.get(self._url())\n data = json.loads(response.data)\n eq_(len(data), 1)\n eq_(data[0]['project'], p.dictify())", "def test_list_projects(self):\n\n # list projects\n arguments, connection = self.connect_to_server()\n list_projects.main(arguments, connection)", "def findProject(self):\n\n # check that we actually have json\n if hasattr(cherrypy.request, 'json'):\n data = cherrypy.request.json\n else:\n data = dict()\n\n # TODO validate projectNumbers; verify projectNumbers is list of ints\n\n validNum = []\n result = []\n if 'projectNumbers' in data:\n # if not admin, find only authorized projects\n if cherrypy.session['role'] == 'admin':\n validNum = data['projectNumbers']\n else:\n for pNum in data['projectNumbers']:\n if pNum in cherrypy.session['projectNumbers']:\n validNum.append(pNum)\n\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else:\n if cherrypy.session['role'] != 'admin':\n validNum = cherrypy.session['projectNumbers']\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else: # is admin\n for res in self.colProjects.find({'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n\n for res in result:\n res = self.calculateBudget(res[\"projectNumber\"])\n return result", "def getProjects(self , teamindex = 0):\r\n if self.userdata == {}:\r\n self.reloadUserdata()\r\n projects = self.userdata['user']['teams'][teamindex]['projects']\r\n return projects" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_models() should return a list of models.
def test_get_models_returns_models(fc: fetcher.Fetcher): ml = fc.get_models() assert isinstance(ml, list) assert isinstance(ml[0], models.LookmlModel)
[ "def get_models(self):\n self.load()\n return self._models", "def _get_models():\n from . import models\n return models", "def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"api_declaration\"][\"models\"]", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def load_models(args, model_types=['generator', 'discriminator']):\n models = []\n for model_type in model_types:\n model = load_model(args, model_type)\n models.append(model)\n return models", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def get_models(self):\n return self.ensemble.get_models()", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def test_list_models(default_domino_client):\n models_list = default_domino_client.models_list()\n assert (\n models_list[\"objectType\"] == \"list\"\n ), f\"models_list returned unexpected result:\\n{pformat(models_list)}\"\n assert isinstance(\n models_list[\"data\"], list\n ), f\"Unable to retrieve models:\\n{pformat(models_list)}\"", "def get_all_models():\n app_config = apps.get_app_config(settings.WORLD_DATA_APP)\n return app_config.get_models()", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def get_models(self, task: TaskVar, project: ProjectVar = None) -> List['Model']:\n return self._bind(self.meta_repo.get_models(task, project))", "def available_models() -> List[str]:\n return list(_MODEL_INFO.keys())", "def list_models(self):\n request = self.client.projects().models()\\\n .list(parent=self.parent)\\\n .execute()\n return request", "def get_s3_models():\n result = requests.get('/'.join((S3_BUCKET_URL, 'Models',\n 's3_models.json')))\n try:\n output = result.json()\n assert isinstance(output, dict)\n except json.JSONDecodeError or AssertionError:\n output = {}\n logger.warning('Online deft models are currently unavailable')\n return output", "def all_models():\n return list(_MODEL_CLASSES)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_models() should be able to filter on project or model.
def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model): ml = fc.get_models(project=test_project_name) assert all(m.project_name == test_project_name for m in ml) ml = fc.get_models(model=test_model["name"]) assert all(m.name == test_model["name"] for m in ml) ml = fc.get_models(project=test_project_name, model=test_model["name"]) assert all( m.project_name == test_project_name and m.name == test_model["name"] for m in ml )
[ "def query_models(\n cls,\n project_name=None, # type: Optional[str]\n model_name=None, # type: Optional[str]\n tags=None, # type: Optional[Sequence[str]]\n only_published=False, # type: bool\n include_archived=False, # type: bool\n max_results=None, # type: Optional[int]\n metadata=None, # type: Optional[Dict[str, str]]\n ):\n # type: (...) -> List[Model]\n if project_name:\n # noinspection PyProtectedMember\n res = _Model._get_default_session().send(\n projects.GetAllRequest(\n name=exact_match_regex(project_name),\n only_fields=[\"id\", \"name\", \"last_update\"],\n )\n )\n project = get_single_result(\n entity=\"project\", query=project_name, results=res.response.projects\n )\n else:\n project = None\n\n only_fields = [\"id\", \"created\", \"system_tags\"]\n\n extra_fields = {\n \"metadata.{}.value\".format(k): v for k, v in (metadata or {}).items()\n }\n\n models_fetched = []\n\n page = 0\n page_size = 500\n results_left = max_results if max_results is not None else float(\"inf\")\n while True:\n # noinspection PyProtectedMember\n res = _Model._get_default_session().send(\n models.GetAllRequest(\n project=[project.id] if project else None,\n name=exact_match_regex(model_name)\n if model_name is not None\n else None,\n only_fields=only_fields,\n tags=tags or None,\n system_tags=[\"-\" + cls._archived_tag]\n if not include_archived\n else None,\n ready=True if only_published else None,\n order_by=[\"-created\"],\n page=page,\n page_size=page_size if results_left > page_size else results_left,\n _allow_extra_fields_=True,\n **extra_fields\n )\n )\n if not res.response.models:\n break\n models_fetched.extend(res.response.models)\n results_left -= len(res.response.models)\n if results_left <= 0 or len(res.response.models) < page_size:\n break\n\n page += 1\n\n return [Model(model_id=m.id) for m in models_fetched]", "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def _get_models():\n from . import models\n return models", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def get_models(self, task: TaskVar, project: ProjectVar = None) -> List['Model']:\n return self._bind(self.meta_repo.get_models(task, project))", "def list_models(self):\n request = self.client.projects().models()\\\n .list(parent=self.parent)\\\n .execute()\n return request", "def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"api_declaration\"][\"models\"]", "def test_coupledmodels_get(self):\n pass", "def findModels(make):\n\n pass", "def get_model_objects(\n self, model: django.db.models.Model, filter: dict = None, get: dict = None\n ) -> list:\n\n ret = []\n prefetch = []\n select = []\n\n # iterate over all model fields and append many to one fields to a prefetch list\n for field in model._meta.get_fields():\n if field.many_to_many:\n prefetch.append(field.name)\n\n if field.many_to_one:\n select.append(field.name)\n\n # if a filter was provided\n if filter != None:\n objects = (\n model.objects.prefetch_related(*prefetch)\n .select_related(*select)\n .filter(**filter)\n )\n for obj in objects:\n ret.append(obj)\n\n elif get != None:\n object = (\n model.objects.prefetch_related(*prefetch)\n .select_related(*select)\n .get(**get)\n )\n ret = object\n\n else:\n objects = (\n model.objects.prefetch_related(*prefetch).select_related(*select).all()\n )\n for obj in objects:\n ret.append(obj)\n\n return ret", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def filter_cityengine(models):\n \n filtered = []\n \n for m in models:\n if 'tahirazim/apiupload' not in m['base_path']:\n filtered.append(m)\n \n return filtered", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models", "def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)", "def test_get_index_models(self):\n models = get_index_models(\"baz\")\n assert models == [apps.get_model(\"tests\", \"ExampleModel\")]", "def retrieve_models_data_from_api(self, year=None,\n make_id=None, annotated=False):\n\n try:\n data = sema_client.retrieve_models(\n dataset_ids=[self.dataset_id],\n year=year,\n make_id=make_id\n )\n if annotated:\n data = {\n 'brand_id_': self.brand.brand_id,\n 'dataset_id_': self.dataset_id,\n 'models_': data\n }\n if year:\n data['year_'] = year\n if make_id:\n data['make_id_'] = make_id\n data = [data]\n return data\n except Exception:\n raise", "def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)", "def get_models(automaker, year):\n\n return set([car['model'] for car in data if car['automaker'] == automaker and car['year'] == year])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_models() should throw if a model is not found.
def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_models(project=project, model=model) assert "An error occured while getting models." in str(exc.value)
[ "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def _get_models():\n from . import models\n return models", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return", "def test_model_serializer_not_found(self):\n\n # GIVEN app and model name\n\n # WHEN fetching data for the model via generic ReST API\n response = self.api.GET(self.app_label, self.model_name4)\n\n # THEN it should fail\n self.assertTrue(response.error)\n self.assertTrue('Invalid data serializer' in response.data['errors'][0])", "def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))", "def load_models(args, model_types=['generator', 'discriminator']):\n models = []\n for model_type in model_types:\n model = load_model(args, model_type)\n models.append(model)\n return models", "def get_s3_models():\n result = requests.get('/'.join((S3_BUCKET_URL, 'Models',\n 's3_models.json')))\n try:\n output = result.json()\n assert isinstance(output, dict)\n except json.JSONDecodeError or AssertionError:\n output = {}\n logger.warning('Online deft models are currently unavailable')\n return output", "def get_models(self):\n self.load()\n return self._models", "def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)", "def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"api_declaration\"][\"models\"]", "def load_models(\n cls,\n data_sources: Set[str],\n scope: Scope,\n specific_model_name: Optional[str] = None,\n ) -> List[HuskyModel]:\n # get all models\n models = cls._load_augmented_models()\n\n # create iterator of models belonging to the scope\n scoped_model_iterator = (model for model in models if does_model_belong_to_scope(model, scope))\n\n if specific_model_name:\n # now, if we want a specific model (by name), try to find it\n selected_models = [model for model in scoped_model_iterator if model.name == specific_model_name]\n else:\n # otherwise, filter models based on scope\n selected_models = [\n model for model in scoped_model_iterator if data_sources.issubset(set(model.data_sources))\n ]\n\n if len(selected_models) == 0:\n raise ModelNotFoundException(\n {\n 'data_sources': data_sources,\n 'specific_model_name': specific_model_name,\n }\n )\n\n return selected_models", "def load_and_check_all_models(self):\n completed_models = []\n failed_models = []\n for trg_lang, modelpaths in self.models.items():\n model = self.download_model(trg_lang)\n if model is not None:\n completed_models.append(trg_lang)\n else:\n failed_models.append(trg_lang)\n print('Model loading succeeded for {}/{} models: {}'.format(\n len(completed_models), len(self.models), completed_models))\n print('Model loading failed for {}/{} models: {}'.format(\n len(failed_models), len(self.models), failed_models))", "def test_coupledmodels_get(self):\n pass", "def findModels(make):\n\n pass", "def get_loaded_models():\n global loaded_models\n if loaded_models is None:\n loaded_models = get_modules(NETWORKS_DIR)\n\n return loaded_models", "def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models", "def get_available_models():\n\n return utils.get_files_in_subdirectory(PathManager.MODELS_DIRECTORY)", "def opt_get_all_models_rest_api():\n return retrieve_all_models()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_used_models() should return models that have queries against them.
def test_get_used_models(fc: fetcher.Fetcher, test_model): used_models = fc.get_used_models() assert isinstance(used_models, dict) assert len(used_models) > 0 assert all(type(model_name) == str for model_name in used_models.keys()) assert all(type(query_count) == int for query_count in used_models.values()) assert test_model["name"] in used_models.keys()
[ "def get_loaded_models():\n global loaded_models\n if loaded_models is None:\n loaded_models = get_modules(NETWORKS_DIR)\n\n return loaded_models", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def get_available_models():\n\n return utils.get_files_in_subdirectory(PathManager.MODELS_DIRECTORY)", "def available_models() -> List[str]:\n return list(_MODEL_INFO.keys())", "def get_models(self):\n self.load()\n return self._models", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def _get_models():\n from . import models\n return models", "def get_all_models():\n app_config = apps.get_app_config(settings.WORLD_DATA_APP)\n return app_config.get_models()", "def available_models() -> List[str]:\n return list(_MODELS.keys())", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def get_models(self):\n return self.ensemble.get_models()", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def test_coupledmodels_get(self):\n pass", "def all_models():\n return list(_MODEL_CLASSES)", "def get_registered_models(self):\n return list(self._registered_models.keys())", "def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)", "def get_models(self):\n\n base = self.get_base()\n return getattr(base, self.resource).json[\"api_declaration\"][\"models\"]", "def get_models():\n return list(TFGraphBuilderFactory.__model_builders.keys())", "def _load_all_models(cls) -> List[HuskyModel]:\n # get virtual state\n state = get_state()\n # map it to internal state\n internal_state = VirtualStateMapper.to_husky(state)\n\n return internal_state.models" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explores() should return a list of explores.
def test_get_explores(fc: fetcher.Fetcher): explores = fc.get_explores() assert isinstance(explores, list) assert len(explores) > 0 assert isinstance(explores[0], models.LookmlModelExplore)
[ "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def get_explores_from_model(client, model_name, log=LOGGER):\n try:\n return [explore.name for explore in client.lookml_model(model_name, fields='explores').explores]\n except SDKError:\n cprint(f\"ERROR:\\tModel {model_name} not found.\", \"FAIL\", log)\n exit()", "def enum_exploits(self, source):\n\n # init local list\n response = []\n\n self.cur.execute(\n \"SELECT DISTINCT id,title,file,link FROM exploits_db WHERE source = '{0}' and cve_id=? order by id\".format(source),\n self.query)\n\n for data in self.cur.fetchall():\n id = data[0]\n title = data[1]\n file = data[2]\n url = data[3]\n\n # format the response\n exploits = {\"id\": id, \"parameters\": {\"title\": title, \"file\": file, \"url\": url}}\n response.append(exploits)\n\n return response", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def explores(self, explores):\n\n self._explores = explores", "def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list", "def get_offers():\n\n _logger.info(\"Start getting offers from tracker...\")\n\n response = requests_manager.get(\n requests.Session(),\n settings.TRACKER_URL,\n params={\"page\": \"Offers\", \"api_key\": settings.BINOM_API_KEY, \"group\": \"all\", \"status\": \"all\"},\n )\n\n if not isinstance(response, requests.Response):\n _logger.error(f\"Network error occurred while trying to get offers from tracker: {response}\")\n return []\n\n try:\n response_json = response.json()\n except json.JSONDecodeError as decode_error:\n _logger.error(\n f\"Can't decode response from tracker (offers getting): {decode_error.doc}\"\n )\n return []\n\n try:\n offers_list = [\n Offer(\n id=int(offer[\"id\"]),\n geo=offer[\"geo\"],\n name=offer[\"name\"],\n group=offer[\"group_name\"],\n network=offer[\"network_name\"],\n )\n for offer in response_json\n ]\n _logger.info(\"Offers were successfully get.\")\n\n return offers_list\n except KeyError:\n _logger.error(f\"Can't parse response from tracker (offers getting): {response_json}\")\n return []", "def get_expenses(self, state: List[str], export_non_reimbursable: bool, updated_at: List[str]):\n expenses = self.connection.Expenses.get_all(state=state, updated_at=updated_at)\n\n if not export_non_reimbursable:\n expenses = list(filter(lambda expense: expense['reimbursable'], expenses))\n\n return expenses", "def test_list_expenses(client):\n \n # Deletes any existing data that may cause tests to fail\n delete_existing_data()\n \n # Creates sample data that will be used in test\n create_sample_data()\n\n # Tests GET success\n response = client.get('/')\n assert response.status_code == 200\n \n # Tests if HTML is being returned \n assert response.content_type == 'text/html; charset=utf-8'\n \n # Tests if required elements are present \n titles_headings = ['<title>List Expenses - Expenses</title>', '<h1 class=\"m-4\">Your Expenses</h1>']\n table_headers = ['<th scope=\"col\">Name</th>', '<th scope=\"col\">Category</th>', '<th scope=\"col\">Amount</th>', '<th scope=\"col\">Date</th>']\n expense_elements = ['Expense 1', 'Expense 2', 'Expense 3', 'Expense 4', 'Expense 5', 'Expense 6', 'Expense 7', 'Expense 8', 'Expense 9', 'Expense 10', 'Expense 11', 'Test Category', '$10.00', '$1.00', '2021-01-01']\n other_elements = ['navbar', 'footer']\n elements = titles_headings + table_headers + expense_elements + other_elements\n for element in elements:\n assert element.encode() in response.data\n \n # Deletes sample data that was used in test\n delete_sample_data()", "def continuing_less_expenses(self):\n log_str = \"continuing_less_expenses() Method.\\n\"\n non_expenses = []\n for i in self.available_cand:\n if not i.return_expenses:\n non_expenses.append(i)\n log_str += \"{} is below expenses quota.\\n\".format(i.name)\n\n self.write_log(log_str)\n return non_expenses", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def expenses(self):\n return self.__get_distinct_accounts_by_type(Account.EXPENSE)", "def _get_exchanges(token: str) -> List[mtypes.Exchange]:\n _LOG.info(\"Getting exchanges from API ...\")\n response = get_client().service.ExchangeList(Token=token)\n\n exchanges = [\n mtypes.Exchange.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.EXCHANGES[\"EXCHANGE\"])\n ]\n _LOG.info(\"Got %s exchanges\", len(exchanges))\n return exchanges", "def get_option_expirations(symbol: str, source: str = \"Nasdaq\") -> list:\n source = re.sub(r\"\\s+\", \"\", source.lower())\n output = []\n if source == \"tradier\":\n output = tradier_model.option_expirations(symbol)\n if source == \"yahoofinance\":\n output = yfinance_model.option_expirations(symbol)\n if source == \"nasdaq\":\n output = nasdaq_model.option_expirations(symbol)\n if source == \"intrinio\":\n output = intrinio_model.get_expiration_dates(symbol)\n\n if not output:\n logger.info(\"Invalid Source or Symbol\")\n console.print(\"Invalid Source or Symbol\")\n return []\n\n return output", "def get_expired_requests(imaging_server_id):\n now = datetime.datetime.utcnow()\n res = sql.get_conn().execute(select(\n [model.requests.c.id],\n (model.requests.c.expires < now)\n & (model.requests.c.state != 'expired')\n & (model.requests.c.state != 'closed')\n & (model.requests.c.imaging_server_id == imaging_server_id)))\n expired = [r[0] for r in res.fetchall()]\n return expired", "def expenses_for_period(self, start=None, end=None):\n return list(self._expenses_for_period(start, end))", "def expenses(self):\n\n return Expenses.objects.filter(\n house=self.house,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explores() should be able to filter on model and/or explore.
def test_get_explores_filters(fc: fetcher.Fetcher): explores = fc.get_explores(model="henry_dusty") assert all(e.model_name == "henry_dusty" for e in explores) explores = fc.get_explores(model="henry_qa", explore="explore_2_joins_all_used") assert all( e.model_name == "henry_qa" and e.name == "explore_2_joins_all_used" for e in explores )
[ "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def get_explores_from_model(client, model_name, log=LOGGER):\n try:\n return [explore.name for explore in client.lookml_model(model_name, fields='explores').explores]\n except SDKError:\n cprint(f\"ERROR:\\tModel {model_name} not found.\", \"FAIL\", log)\n exit()", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explores_throws_if_model_or_explore_does_not_exist(\n fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_explores(model=model, explore=explore)\n assert msg in str(exc.value)", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def enum_exploits(self, source):\n\n # init local list\n response = []\n\n self.cur.execute(\n \"SELECT DISTINCT id,title,file,link FROM exploits_db WHERE source = '{0}' and cve_id=? order by id\".format(source),\n self.query)\n\n for data in self.cur.fetchall():\n id = data[0]\n title = data[1]\n file = data[2]\n url = data[3]\n\n # format the response\n exploits = {\"id\": id, \"parameters\": {\"title\": title, \"file\": file, \"url\": url}}\n response.append(exploits)\n\n return response", "def find_expsets_processedfiles_requiring_higlass_items(connection, check_name, action_name, search_queries, minutes_leeway=1):\n # Create the check\n check = CheckResult(connection, check_name)\n check.action = action_name\n check.full_output = {}\n\n # Generate the terms each Experiment Set will return.\n fields_to_include = \"&field=\" + \"&field=\".join([\n \"accession\",\n \"award.uuid\",\n \"contributing_labs.uuid\",\n \"description\",\n \"experiments_in_set.processed_files.accession\",\n \"experiments_in_set.processed_files.genome_assembly\",\n \"experiments_in_set.processed_files.higlass_uid\",\n \"experiments_in_set.processed_files.status\",\n \"lab.uuid\",\n \"processed_files.accession\",\n \"processed_files.genome_assembly\",\n \"processed_files.higlass_uid\",\n \"processed_files.status\",\n \"static_content\",\n ])\n\n # If no search query was provided, fail\n if not search_queries:\n check.summary = check.description = \"No search query provided, nothing to update.\"\n check.status = 'PASS'\n check.allow_action = False\n return check\n\n expsets_by_accession = {}\n # Use all of the search queries to make a list of the ExpSets we will work on.\n for query in search_queries:\n # Interpolate the timestamps, if needed\n query = interpolate_query_check_timestamps(connection, query, action_name, check, minutes_leeway)\n\n # Add to base search\n processed_expsets_query = \"/search/?type=ExperimentSetReplicate\" + query + fields_to_include\n\n # Query the Experiment Sets\n search_res = ff_utils.search_metadata(processed_expsets_query, key=connection.ff_keys)\n\n # Collate the results into a dict of ExpSets, ordered by accession\n for expset in search_res:\n expsets_by_accession[ expset[\"accession\"] ] = expset\n\n # Get the reference files\n reference_files_by_ga = get_reference_files(connection)\n check.full_output['reference_files'] = reference_files_by_ga\n\n # Collate all of the Higlass Items that need to be updated. Store them by genome assembly, then accession.\n target_files_by_ga = {}\n for expset_accession, expset in expsets_by_accession.items():\n # Get all of the processed files. Stop if there is an error.\n file_info = gather_processedfiles_for_expset(expset)\n\n if file_info[\"error\"]:\n continue\n\n # If there is a manually created higlass item, don't clobber it with a automatically generated one.\n if file_info[\"manual_higlass_view_config\"]:\n continue\n\n processed_file_genome_assembly = file_info[\"genome_assembly\"]\n contributing_labs = [ cl[\"uuid\"] for cl in expset.get(\"contributing_labs\", []) ]\n\n if processed_file_genome_assembly not in target_files_by_ga:\n target_files_by_ga[ processed_file_genome_assembly ] = {}\n target_files_by_ga[ processed_file_genome_assembly ][expset_accession] = {\n \"accession\" : expset_accession,\n \"award\" : expset[\"award\"][\"uuid\"],\n \"contributing_labs\" : contributing_labs,\n \"description\": expset[\"description\"],\n \"files\" : file_info[\"files\"],\n \"lab\" : expset[\"lab\"][\"uuid\"],\n \"static_content\" : expset.get(\"static_content\", []),\n }\n\n # Check for missing reference files\n for ga in target_files_by_ga:\n if ga in reference_files_by_ga and len(reference_files_by_ga[ga]) >= 2:\n full_output_key = \"ready_expsets\"\n else:\n full_output_key = \"missing_reference_files\"\n if full_output_key not in check.full_output:\n check.full_output[full_output_key] = {}\n check.full_output[full_output_key][ga] = target_files_by_ga[ga]\n\n ready_to_generate_count = 0\n if \"ready_expsets\" in check.full_output:\n ready_to_generate_count = sum([len(accessions) for x, accessions in check.full_output[\"ready_expsets\"].items()])\n\n check.summary = \"\"\n # If there are no files to act upon, we're done.\n if not target_files_by_ga:\n check.summary = check.description = \"No new view configs to generate\"\n check.status = 'PASS'\n check.allow_action = False\n return check\n\n check.summary += \"Ready to generate Higlass Items for {higlass_count} Experiment Sets. \".format(higlass_count=ready_to_generate_count)\n if \"missing_reference_files\" in check.full_output:\n check.summary += \"Missing reference files for {gas}, skipping. \".format(\n gas=\", \".join(check.full_output[\"missing_reference_files\"].keys())\n )\n\n check.status = 'WARN'\n check.description = check.summary + \"See full_output for details.\"\n\n if ready_to_generate_count <= 0:\n check.allow_action = False\n else:\n check.allow_action = True\n return check", "def check_fields_in_model(client, models=None, explores=None, continue_on_errors=False, excludes=None, log=LOGGER):\n if models is None:\n models = get_models(client)\n elif isinstance(models, str):\n models = [models]\n for model in models:\n if explores is None:\n explores = get_explores_from_model(client, model)\n elif isinstance(explores, str):\n explores = [explores]\n for explore in explores:\n cprint(f\"\\nChecking fields for explore: \", \"BOLD\", log, end='')\n cprint(f\"{explore}\", \"OKBLUE\", log, end='')\n cprint(\" in model: \", \"BOLD\", log, end='')\n cprint(f\"{model}\", \"OKBLUE\", log)\n print(\"_\" * 79)\n if excludes is None:\n excludes = []\n _ = check_fields(client, model, explore, continue_on_errors, excludes)", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]", "def explore(self) -> None:\n self._is_exploring = True", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def test_get_recommendations(self):\n data = self._assertGetRecommendations({'context': self.context})\n \n self.assert_(set(self.expected_terms) & set(data['terms'].split()))\n self.assert_([e for e in data['results'] if e['url']==self.doc.origin])", "def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list", "def fetch_entries(request):\n # Access restrictions\n if not request.user.is_authenticated:\n return HttpResponse(\"Permission denied\")\n if not request.user.is_superuser:\n return HttpResponse(\"Permission denied\")\n \n # Get data from request\n searchterm = request.GET.get('search')\n page_number = request.GET.get('page')\n selected_tab = request.GET.get('tab')\n\n # Get indvidiual search terms\n searchterms = None\n if searchterm:\n searchterms = searchterm.split(',')\n for term in searchterms:\n term = term.strip()\n \n if not searchterm:\n searchterms = [\"\"]\n \n # Get data for selected tab and search terms\n data = None\n if selected_tab == \"Mitglied\":\n data = Mitglied.history.none()\n for term in searchterms:\n data = data | Mitglied.history.filter(Q(id__icontains=term) | Q(vorname__icontains=term) | Q(name__icontains=term))\n if selected_tab == \"MitgliedMail\":\n data = MitgliedMail.history.none()\n for term in searchterms:\n data = data | MitgliedMail.history.filter(Q(mitglied__id__icontains=term) | Q(mitglied__vorname__icontains=term) | Q(mitglied__name__icontains=term) | Q(email__icontains=term))\n if selected_tab == \"MitgliedAmt\":\n data = MitgliedAmt.history.none()\n for term in searchterms:\n data = data | MitgliedAmt.history.filter(Q(mitglied__id__icontains=term) | Q(mitglied__vorname__icontains=term) | Q(mitglied__name__icontains=term) \n | Q(funktion__id__icontains=term) | Q(funktion__bezeichnung__icontains=term) \n | Q(funktion__organisationseinheit__bezeichnung__icontains=term)\n | Q(funktion__unterbereich__bezeichnung__icontains=term))\n\n if selected_tab == \"Organisationseinheit\":\n data = Organisationseinheit.history.none()\n for term in searchterms:\n data = data | Organisationseinheit.history.filter(Q(id__icontains=term) | Q(bezeichnung__icontains=term))\n if selected_tab == \"Unterbereich\":\n data = Unterbereich.history.none()\n for term in searchterms:\n data = data | Unterbereich.history.filter(Q(id__icontains=term) | Q(bezeichnung__icontains=term) | Q(organisationseinheit__id__icontains=term) | Q(organisationseinheit__bezeichnung__icontains=term))\n if selected_tab == \"Amt\":\n data = Funktion.history.none()\n for term in searchterms:\n data = data | Funktion.history.filter(Q(id__icontains=term) | Q(bezeichnung__icontains=term)\n | Q(organisationseinheit__id__icontains=term) | Q(organisationseinheit__bezeichnung__icontains=term)\n | Q(unterbereich__id__icontains=term) | Q(unterbereich__bezeichnung__icontains=term))\n if selected_tab == \"Recht\":\n data = Recht.history.none()\n for term in searchterms:\n data = data | Recht.history.filter(Q(id__icontains=term) | Q(bezeichnung__icontains=term))\n if selected_tab == \"AmtRecht\":\n data = FunktionRecht.history.none()\n for term in searchterms:\n data = data | FunktionRecht.history.filter(Q(funktion__id__icontains=term) | Q(funktion__bezeichnung__icontains=term)\n | Q(funktion__organisationseinheit__bezeichnung__icontains=term)\n | Q(funktion__unterbereich__bezeichnung__icontains=term)\n | Q(recht__id__icontains=term) | Q(recht__bezeichnung__icontains=term))\n \n if selected_tab == \"Checkliste\":\n data = Checkliste.history.none()\n for term in searchterms:\n data = data | Checkliste.history.filter(Q(id__icontains=term) \n | Q(mitglied__id__icontains=term) | Q(mitglied__vorname__icontains=term) | Q(mitglied__name__icontains=term) \n | Q(amt__funktion__id__icontains=term) | Q(amt__funktion__bezeichnung__icontains=term) \n | Q(amt__funktion__organisationseinheit__bezeichnung__icontains=term)\n | Q(amt__funktion__unterbereich__bezeichnung__icontains=term))\n if selected_tab == \"ChecklisteRecht\":\n data = ChecklisteRecht.history.none()\n for term in searchterms:\n data = data | ChecklisteRecht.history.filter(Q(checkliste__id__icontains=term) \n | Q(recht__id__icontains=term) | Q(recht__bezeichnung__icontains=term)\n | Q(checkliste__mitglied__id__icontains=term) | Q(checkliste__mitglied__vorname__icontains=term) | Q(checkliste__mitglied__name__icontains=term) \n | Q(checkliste__amt__funktion__id__icontains=term) | Q(checkliste__amt__funktion__bezeichnung__icontains=term) \n | Q(checkliste__amt__funktion__organisationseinheit__bezeichnung__icontains=term)\n | Q(checkliste__amt__funktion__unterbereich__bezeichnung__icontains=term))\n if selected_tab == \"ChecklisteAufgabe\":\n data = ChecklisteAufgabe.history.none()\n for term in searchterms:\n data = data | ChecklisteAufgabe.history.filter(Q(checkliste__id__icontains=term) \n | Q(aufgabe__id__icontains=term) | Q(aufgabe__bezeichnung__icontains=term)\n | Q(checkliste__mitglied__id__icontains=term) | Q(checkliste__mitglied__vorname__icontains=term) | Q(checkliste__mitglied__name__icontains=term) \n | Q(checkliste__amt__funktion__id__icontains=term) | Q(checkliste__amt__funktion__bezeichnung__icontains=term) \n | Q(checkliste__amt__funktion__organisationseinheit__bezeichnung__icontains=term)\n | Q(checkliste__amt__funktion__unterbereich__bezeichnung__icontains=term))\n\n if selected_tab == \"User\":\n data = User.history.none()\n for term in searchterms:\n data = data | User.history.filter(Q(username__icontains=term) | Q(first_name__icontains=term) | Q(last_name__icontains=term) | Q(email__icontains=term))\n\n # Paginate results\n paginator = Paginator(data, 15)\n data_page = paginator.get_page(page_number)\n\n return render(request=request,\n template_name=\"historie/row.html\",\n context={\"data\": data_page})", "def get_images_by_vulnerability(self, **kwargs):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explores() should throw if an explore/model is not found.
def test_get_explores_throws_if_model_or_explore_does_not_exist( fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str ): with pytest.raises(exceptions.NotFoundError) as exc: fc.get_explores(model=model, explore=explore) assert msg in str(exc.value)
[ "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def get_explores_from_model(client, model_name, log=LOGGER):\n try:\n return [explore.name for explore in client.lookml_model(model_name, fields='explores').explores]\n except SDKError:\n cprint(f\"ERROR:\\tModel {model_name} not found.\", \"FAIL\", log)\n exit()", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def enum_exploits(self, source):\n\n # init local list\n response = []\n\n self.cur.execute(\n \"SELECT DISTINCT id,title,file,link FROM exploits_db WHERE source = '{0}' and cve_id=? order by id\".format(source),\n self.query)\n\n for data in self.cur.fetchall():\n id = data[0]\n title = data[1]\n file = data[2]\n url = data[3]\n\n # format the response\n exploits = {\"id\": id, \"parameters\": {\"title\": title, \"file\": file, \"url\": url}}\n response.append(exploits)\n\n return response", "def get_explanations_lihgtgbm(model, x_exp, dataset, perc, load=False, save=False):\n\n if dataset not in constants.possible_datasets:\n raise NotImplementedError('Dataset {} not supported'.format(dataset))\n\n fname = 'shap_{}_lightgbm_{}'.format(\n dataset,\n perc\n )\n fpath = os.path.join(\n constants.SAVE_FILES_DIR,\n fname\n )\n\n if load:\n if os.path.isfile(fpath):\n print('Explanations file found')\n return pd.read_csv(fpath)\n\n print('Explanations file not found or load = False')\n contribs = model.predict(x_exp, pred_contrib=True)\n np_contribs = np.array(contribs)\n shap_values_df = pd.DataFrame(np_contribs[:, 0:-1])\n\n if save:\n print('Saving explanations for future use')\n shap_values_df.to_csv(fpath)\n\n return shap_values_df", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def entity_lookup(token,url,resp='json-ld'):\n parameters = {'access-token': token,\n 'format' : resp}\n r = ''\n Ntries = 0\n while r == '' and Ntries < 3:\n Ntries += 1\n try:\n with requests.get(url, params = parameters) as request:\n r = request.json()\n except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as err:\n print('Exception occurred. Suspending and trying again...')\n sleep(30)\n continue\n return r", "def explore(obj: Any) -> Any:\n try:\n e = Explorer(obj)\n return e.explore()\n except Exception as e:\n console.print_exception(show_locals=True)\n print()\n rprint(f\"[red]{random_error_quote()}\")\n formatted_link = f\"https://github.com/kylepollina/objexplore/issues/new?assignees=&labels=&template=bug_report.md&title={e}\".replace(\n \" \", \"+\"\n )\n print(\"Please report the issue here:\")\n rprint(f\" [link={formatted_link}][u]{formatted_link}[/u][/link]\")\n rprint(\n \"[yellow italic]Make sure to copy/paste the above traceback to the issue to make the issue quicker to solve!\"\n )", "def find_expsets_processedfiles_requiring_higlass_items(connection, check_name, action_name, search_queries, minutes_leeway=1):\n # Create the check\n check = CheckResult(connection, check_name)\n check.action = action_name\n check.full_output = {}\n\n # Generate the terms each Experiment Set will return.\n fields_to_include = \"&field=\" + \"&field=\".join([\n \"accession\",\n \"award.uuid\",\n \"contributing_labs.uuid\",\n \"description\",\n \"experiments_in_set.processed_files.accession\",\n \"experiments_in_set.processed_files.genome_assembly\",\n \"experiments_in_set.processed_files.higlass_uid\",\n \"experiments_in_set.processed_files.status\",\n \"lab.uuid\",\n \"processed_files.accession\",\n \"processed_files.genome_assembly\",\n \"processed_files.higlass_uid\",\n \"processed_files.status\",\n \"static_content\",\n ])\n\n # If no search query was provided, fail\n if not search_queries:\n check.summary = check.description = \"No search query provided, nothing to update.\"\n check.status = 'PASS'\n check.allow_action = False\n return check\n\n expsets_by_accession = {}\n # Use all of the search queries to make a list of the ExpSets we will work on.\n for query in search_queries:\n # Interpolate the timestamps, if needed\n query = interpolate_query_check_timestamps(connection, query, action_name, check, minutes_leeway)\n\n # Add to base search\n processed_expsets_query = \"/search/?type=ExperimentSetReplicate\" + query + fields_to_include\n\n # Query the Experiment Sets\n search_res = ff_utils.search_metadata(processed_expsets_query, key=connection.ff_keys)\n\n # Collate the results into a dict of ExpSets, ordered by accession\n for expset in search_res:\n expsets_by_accession[ expset[\"accession\"] ] = expset\n\n # Get the reference files\n reference_files_by_ga = get_reference_files(connection)\n check.full_output['reference_files'] = reference_files_by_ga\n\n # Collate all of the Higlass Items that need to be updated. Store them by genome assembly, then accession.\n target_files_by_ga = {}\n for expset_accession, expset in expsets_by_accession.items():\n # Get all of the processed files. Stop if there is an error.\n file_info = gather_processedfiles_for_expset(expset)\n\n if file_info[\"error\"]:\n continue\n\n # If there is a manually created higlass item, don't clobber it with a automatically generated one.\n if file_info[\"manual_higlass_view_config\"]:\n continue\n\n processed_file_genome_assembly = file_info[\"genome_assembly\"]\n contributing_labs = [ cl[\"uuid\"] for cl in expset.get(\"contributing_labs\", []) ]\n\n if processed_file_genome_assembly not in target_files_by_ga:\n target_files_by_ga[ processed_file_genome_assembly ] = {}\n target_files_by_ga[ processed_file_genome_assembly ][expset_accession] = {\n \"accession\" : expset_accession,\n \"award\" : expset[\"award\"][\"uuid\"],\n \"contributing_labs\" : contributing_labs,\n \"description\": expset[\"description\"],\n \"files\" : file_info[\"files\"],\n \"lab\" : expset[\"lab\"][\"uuid\"],\n \"static_content\" : expset.get(\"static_content\", []),\n }\n\n # Check for missing reference files\n for ga in target_files_by_ga:\n if ga in reference_files_by_ga and len(reference_files_by_ga[ga]) >= 2:\n full_output_key = \"ready_expsets\"\n else:\n full_output_key = \"missing_reference_files\"\n if full_output_key not in check.full_output:\n check.full_output[full_output_key] = {}\n check.full_output[full_output_key][ga] = target_files_by_ga[ga]\n\n ready_to_generate_count = 0\n if \"ready_expsets\" in check.full_output:\n ready_to_generate_count = sum([len(accessions) for x, accessions in check.full_output[\"ready_expsets\"].items()])\n\n check.summary = \"\"\n # If there are no files to act upon, we're done.\n if not target_files_by_ga:\n check.summary = check.description = \"No new view configs to generate\"\n check.status = 'PASS'\n check.allow_action = False\n return check\n\n check.summary += \"Ready to generate Higlass Items for {higlass_count} Experiment Sets. \".format(higlass_count=ready_to_generate_count)\n if \"missing_reference_files\" in check.full_output:\n check.summary += \"Missing reference files for {gas}, skipping. \".format(\n gas=\", \".join(check.full_output[\"missing_reference_files\"].keys())\n )\n\n check.status = 'WARN'\n check.description = check.summary + \"See full_output for details.\"\n\n if ready_to_generate_count <= 0:\n check.allow_action = False\n else:\n check.allow_action = True\n return check", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def _get_exp(self, experiment_id=None, experiment_name=None):\n assert (\n experiment_id is not None or experiment_name is not None\n ), \"Please input at least one of experiment/recorder id or name before retrieving experiment/recorder.\"\n if experiment_id is not None:\n try:\n # NOTE: the mlflow's experiment_id must be str type...\n # https://www.mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.get_experiment\n exp = self.client.get_experiment(experiment_id)\n if exp.lifecycle_stage.upper() == \"DELETED\":\n raise MlflowException(\"No valid experiment has been found.\")\n experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)\n return experiment\n except MlflowException as e:\n raise ValueError(\n \"No valid experiment has been found, please make sure the input experiment id is correct.\"\n ) from e\n elif experiment_name is not None:\n try:\n exp = self.client.get_experiment_by_name(experiment_name)\n if exp is None or exp.lifecycle_stage.upper() == \"DELETED\":\n raise MlflowException(\"No valid experiment has been found.\")\n experiment = MLflowExperiment(exp.experiment_id, experiment_name, self.uri)\n return experiment\n except MlflowException as e:\n raise ValueError(\n \"No valid experiment has been found, please make sure the input experiment name is correct.\"\n ) from e", "def get_multiple_explorations_by_id(\n exp_ids: List[str], strict: bool = True\n) -> Dict[str, exp_domain.Exploration]:\n result = {}\n uncached = []\n cache_result = caching_services.get_multi(\n caching_services.CACHE_NAMESPACE_EXPLORATION, None, exp_ids)\n\n for exp_obj in cache_result.values():\n result[exp_obj.id] = exp_obj\n\n for _id in exp_ids:\n if _id not in result:\n uncached.append(_id)\n\n db_exp_models = exp_models.ExplorationModel.get_multi(uncached)\n db_results_dict = {}\n not_found = []\n for i, eid in enumerate(uncached):\n model = db_exp_models[i]\n if model:\n exploration = get_exploration_from_model(model)\n db_results_dict[eid] = exploration\n else:\n logging.info(\n 'Tried to fetch exploration with id %s, but no such '\n 'exploration exists in the datastore' % eid)\n not_found.append(eid)\n\n if strict and not_found:\n raise ValueError(\n 'Couldn\\'t find explorations with the following ids:\\n%s'\n % '\\n'.join(not_found))\n\n cache_update = {\n eid: results for eid, results in db_results_dict.items()\n if results is not None\n }\n\n if cache_update:\n caching_services.set_multi(\n caching_services.CACHE_NAMESPACE_EXPLORATION, None, cache_update)\n\n result.update(db_results_dict)\n return result", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def test_fetch_issues_handler(self) -> None:\n response = self.get_json(\n '/issuesdatahandler/%s' % self.EXP_ID,\n params={'exp_version': 1})\n self.assertEqual(len(response['unresolved_issues']), 2)\n self.assertEqual(\n response['unresolved_issues'][0]['issue_type'], 'EarlyQuit')\n self.assertEqual(\n response['unresolved_issues'][1]['issue_type'],\n 'MultipleIncorrectSubmissions')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_used_explores() should return all used explores.
def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names): used_explores = fc.get_used_explores(model=test_model["name"]) assert isinstance(used_explores, dict) assert all(e in test_used_explore_names for e in used_explores)
[ "def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def get_explores_from_model(client, model_name, log=LOGGER):\n try:\n return [explore.name for explore in client.lookml_model(model_name, fields='explores').explores]\n except SDKError:\n cprint(f\"ERROR:\\tModel {model_name} not found.\", \"FAIL\", log)\n exit()", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def cachetotstoreablemisses(self) :\n\t\ttry :\n\t\t\treturn self._cachetotstoreablemisses\n\t\texcept Exception as e:\n\t\t\traise e", "def get_used_sys_no_list():\n\n print(\"loading 'used numbers'...\")\n\n if force_all or is_test:\n print(\"force or test run.\")\n used_nos = grab_used_nos()\n __write_used_nos(used_nos)\n else:\n if os.path.isfile(\"./data/tmp/existing_numbers.txt\"):\n print(\"File 'existing_numbers.txt' already exists. Loading list from cache instead.\")\n used_nos = __read_used_nos()\n else:\n used_nos = grab_used_nos()\n __write_used_nos(used_nos)\n\n used_nos = crop_list(used_nos)\n print(\"Got Already Used System Numbers now.\\n\")\n# print(used_nos)\n\n return used_nos", "def cachetotnonstoreablemisses(self) :\n\t\ttry :\n\t\t\treturn self._cachetotnonstoreablemisses\n\t\texcept Exception as e:\n\t\t\traise e", "def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)", "def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons", "def get_remaining_hits(self):\n session = self.connect_to_db()\n remaining_hits = session.query(HIT).filter(HIT.completed == False).all()\n session.close()\n\n hits = []\n\n for hit in remaining_hits:\n\n temp_hit = self.db_to_dict(hit)\n hits.append(temp_hit)\n\n return hits", "def popular_items(self):\n if self._popular_items is None:\n self._popular_items = self._get_popular_items(100)\n return self._popular_items", "def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)", "def _find_all_free_sites(self):\n sites_with_gold = []\n # for site in self.sites_container.list_of_all_sites: # tu ma byc all free sites\n for site in super(MineFinder, self)._find_all_free_sites():\n if site.good_for_mine:\n sites_with_gold.append(site)\n return sites_with_gold", "def enum_exploits(self, source):\n\n # init local list\n response = []\n\n self.cur.execute(\n \"SELECT DISTINCT id,title,file,link FROM exploits_db WHERE source = '{0}' and cve_id=? order by id\".format(source),\n self.query)\n\n for data in self.cur.fetchall():\n id = data[0]\n title = data[1]\n file = data[2]\n url = data[3]\n\n # format the response\n exploits = {\"id\": id, \"parameters\": {\"title\": title, \"file\": file, \"url\": url}}\n response.append(exploits)\n\n return response", "def continuing_less_expenses(self):\n log_str = \"continuing_less_expenses() Method.\\n\"\n non_expenses = []\n for i in self.available_cand:\n if not i.return_expenses:\n non_expenses.append(i)\n log_str += \"{} is below expenses quota.\\n\".format(i.name)\n\n self.write_log(log_str)\n return non_expenses", "def cachetotnon304hits(self) :\n\t\ttry :\n\t\t\treturn self._cachetotnon304hits\n\t\texcept Exception as e:\n\t\t\traise e", "def cachetotflashcachemisses(self) :\n\t\ttry :\n\t\t\treturn self._cachetotflashcachemisses\n\t\texcept Exception as e:\n\t\t\traise e", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_unused_explores() should return all unused explores.
def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores): unused_explores = fc.get_unused_explores(model=test_model["name"]) assert all(e in test_unused_explores for e in unused_explores)
[ "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def unused_exits(self):\n return self._unused_exits", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def get_remaining_hits(self):\n session = self.connect_to_db()\n remaining_hits = session.query(HIT).filter(HIT.completed == False).all()\n session.close()\n\n hits = []\n\n for hit in remaining_hits:\n\n temp_hit = self.db_to_dict(hit)\n hits.append(temp_hit)\n\n return hits", "def _get_nonbruteforce_exploit_modules(self, client):\n print(\"Selection of potential exploit modules, this could take a while..\")\n\n exploit_modules = client.modules.exploits\n final_modules = {}\n for module in exploit_modules:\n try:\n module_info = self._get_module_description(module, client)\n if (\"bruteforc\" not in module_info and \"brute forc\" not in module_info): #Filter out some bruteforce modules\n final_modules[module] = module_info\n except Exception as identifier:\n print(identifier)\n \n print(\"The initial list of all availabe non-bruteforce exploit modules compiled.\")\n return final_modules", "def get_explores_from_model(client, model_name, log=LOGGER):\n try:\n return [explore.name for explore in client.lookml_model(model_name, fields='explores').explores]\n except SDKError:\n cprint(f\"ERROR:\\tModel {model_name} not found.\", \"FAIL\", log)\n exit()", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def cachetotnonstoreablemisses(self) :\n\t\ttry :\n\t\t\treturn self._cachetotnonstoreablemisses\n\t\texcept Exception as e:\n\t\t\traise e", "def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons", "def get_bad_requests(self):\r\n\t\treturn [i.request for i in self.logs if str(i.status).startswith('4')]", "def unused_evals(self):\n\t\treturn self.Evals - self.nFES", "def continuing_less_expenses(self):\n log_str = \"continuing_less_expenses() Method.\\n\"\n non_expenses = []\n for i in self.available_cand:\n if not i.return_expenses:\n non_expenses.append(i)\n log_str += \"{} is below expenses quota.\\n\".format(i.name)\n\n self.write_log(log_str)\n return non_expenses", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def find_unused_entities(model, entity_type):\n\n # collect all metabolites actually used by reactions\n entities_used = []\n\n for reaction in model.reactions:\n entities = reaction.genes if entity_type == \"genes\" else reaction.metabolites\n # Loop through each entity and add if not already there\n for entity in entities:\n if entity not in entities_used:\n entities_used.append(entity)\n\n # go through entities in the model and collect unused ones\n unused_entities = []\n all_entities = model.genes if entity_type == \"genes\" else model.metabolites\n for entity in all_entities:\n if entity not in entities_used:\n unused_entities.append(entity)\n\n return unused_entities", "def get_unused_floating_ip_list(cls, cloudname):\n try:\n cloud_provider = CloudProvider(cloudname).provider\n floating_ips = cloud_provider.list_floating_ips()\n unused_floating_ips = list()\n\n for floating_ip in list(floating_ips.values()):\n if floating_ip[\"fixed_ip\"] is None and \\\n floating_ip[\"instance_id\"] is None:\n # add to unused list\n floating_ip[\"cloud\"] = cloudname\n unused_floating_ips.append(floating_ip)\n\n return unused_floating_ips\n except Exception as ex:\n Console.error(ex.message)", "def find_unassigned_items():\n assets = Asset.query.filter_by(assigned=False).all()\n return assets", "def unused(self):\n defined = set(self._data.keys())\n assigned = set(self._usage.keys())\n return defined.difference(assigned)", "def get_unassigned_tags(**kwargs):\n return Tags.get_unassigned_tags(**kwargs)", "def get_expired_requests(imaging_server_id):\n now = datetime.datetime.utcnow()\n res = sql.get_conn().execute(select(\n [model.requests.c.id],\n (model.requests.c.expires < now)\n & (model.requests.c.state != 'expired')\n & (model.requests.c.state != 'closed')\n & (model.requests.c.imaging_server_id == imaging_server_id)))\n expired = [r[0] for r in res.fetchall()]\n return expired" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explore_fields() should return an explores fields.
def test_get_explore_fields_gets_fields( fc: fetcher.Fetcher, test_model, test_explores_stats ): test_explore = test_explores_stats[0] explore = fc.get_explores(model=test_model["name"], explore=test_explore["name"]) assert isinstance(explore, list) explore = explore[0] assert isinstance(explore, models.LookmlModelExplore) assert explore.model_name == test_model["name"] assert explore.name == test_explore["name"] fields = fc.get_explore_fields(explore) assert isinstance(fields, list) assert fields == test_explore["all_fields"]
[ "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def listAvailableFields():", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def extract_fields(q):\n return q[\"simple_query_string\"][\"fields\"]", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def check_fields(client, model_name, explore_name, continue_on_errors=False, excludes=[], ix=None, log=LOGGER):\n query_id, field_set, query_url = create_query_from_fields(client, model_name, explore_name, excludes)\n num_fields = len(field_set)\n result = client.run_query(query_id, 'csv', limit=1)\n # sql = client.run_query(query_id, 'sql') # What is this for?\n if has_error(result):\n if not continue_on_errors:\n cprint(f\"{result}:\\t\",'FAIL', log)\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n return result, num_fields, query_url\n else:\n for field in field_set:\n if '.'.join(field.split('.')[1:]) in result:\n if excludes == []:\n cprint(f\"Errors found, continuing...\", \"WARNING\", log)\n excludes.append(field)\n return check_fields(client, model_name, explore_name, continue_on_errors, excludes, ix, log)\n return check_fields(client, model_name, explore_name, False, excludes, ix, log)\n else:\n cprint(f\"Success for {num_fields} fields:\\t\", 'OKGREEN', log, end='')\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n if excludes != []:\n excludes = sorted(list(set(excludes)))\n cprint(f\"Failures:\", \"FAIL\", log)\n for e in excludes:\n cprint(f\"\\t{e}\", \"FAIL\", log)\n return result, num_fields, query_url", "def demo_fields(self):\n return self._demo_fields", "def fields(self, n):\r\n \r\n return self.searcher.stored_fields(self.top_n[n])", "def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"", "def get_query_fields(cls):\n ...", "def _get_fields(self):\n return self._fields", "def create_query_from_fields(client, model_name, explore_name, excludes):\n dims, mes = get_fields_from_model_explore(client, model_name, explore_name)\n field_set = dims + mes\n field_set = [field for field in field_set if field not in excludes]\n body = looker_sdk.models.WriteQuery(model=model_name, view=explore_name, fields=field_set)\n query = client.create_query(body)\n del query.client_id\n return query.id, field_set, query.share_url", "def fields(self):\n return super(SearchCursor, self).fields", "def fields(self):\n\n if self._fields is None:\n self._fields = self.searcher.stored_fields(self.docnum)\n return self._fields", "def arcpy_get_field_objects(self):\r\n\t\tif __thou_shalt__.do_a_dry_run:\r\n\t\t\treturn []\r\n\t\treturn thou_shalt(\"Fetch field information from {}\".format(self.shortened_name_with_context()),\r\n\t\t\tlambda:arcpy.ListFields(str(self))\r\n\t\t)", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def pull_fields(self, org):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explore_fields() should return when an explore has only dimensions or only measures.
def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores( fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores ): expected = test_dimensions_or_measures_only_explores[0] explore = fc.get_explores(model=test_model["name"], explore=expected["name"]) assert isinstance(explore, list) actual = explore[0] assert actual.name == expected["name"] assert not (actual.fields.dimensions and actual.fields.measures) expected_fields = [f["name"] for f in expected["fields"]] actual_fields = fc.get_explore_fields(actual) assert actual_fields == expected_fields
[ "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)", "def extract_fields(q):\n return q[\"simple_query_string\"][\"fields\"]", "def demo_fields(self):\n return self._demo_fields", "def check_fields(client, model_name, explore_name, continue_on_errors=False, excludes=[], ix=None, log=LOGGER):\n query_id, field_set, query_url = create_query_from_fields(client, model_name, explore_name, excludes)\n num_fields = len(field_set)\n result = client.run_query(query_id, 'csv', limit=1)\n # sql = client.run_query(query_id, 'sql') # What is this for?\n if has_error(result):\n if not continue_on_errors:\n cprint(f\"{result}:\\t\",'FAIL', log)\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n return result, num_fields, query_url\n else:\n for field in field_set:\n if '.'.join(field.split('.')[1:]) in result:\n if excludes == []:\n cprint(f\"Errors found, continuing...\", \"WARNING\", log)\n excludes.append(field)\n return check_fields(client, model_name, explore_name, continue_on_errors, excludes, ix, log)\n return check_fields(client, model_name, explore_name, False, excludes, ix, log)\n else:\n cprint(f\"Success for {num_fields} fields:\\t\", 'OKGREEN', log, end='')\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n if excludes != []:\n excludes = sorted(list(set(excludes)))\n cprint(f\"Failures:\", \"FAIL\", log)\n for e in excludes:\n cprint(f\"\\t{e}\", \"FAIL\", log)\n return result, num_fields, query_url", "def create_query_from_fields(client, model_name, explore_name, excludes):\n dims, mes = get_fields_from_model_explore(client, model_name, explore_name)\n field_set = dims + mes\n field_set = [field for field in field_set if field not in excludes]\n body = looker_sdk.models.WriteQuery(model=model_name, view=explore_name, fields=field_set)\n query = client.create_query(body)\n del query.client_id\n return query.id, field_set, query.share_url", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def listAvailableFields():", "def get_query_fields(cls):\n ...", "def _get_fields(self):\n return self._fields", "def test_mineral_model_display_fields_not_all_fields(self):\n gothamite = create_mineral('Gothamite', all_fields=False)\n fields = gothamite.display_fields\n self.assertEqual(len(fields), 2)\n self.assertEqual(fields[0].name, 'category')\n self.assertEqual(fields[0].value, 'category test limited fields')\n self.assertEqual(fields[1].name, 'formula')\n self.assertEqual(fields[1].value, 'formula test limited fields')", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def get_geodata_field(level):\n\t#\n\textra_geo_fields = {}\n\n\tif level == 'country_level':\n\t\tpass\n\tif level == 'multilevel':\n\t\textra_geo_fields['geo_accuracy'] = translate_cfg_property('float')\n\t\textra_geo_fields['geo_admin_code2'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_admin_code3'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_admin_name1'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_admin_name2'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_admin_name3'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_place_name'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_zip_code'] = translate_cfg_property('keyword')\n\tif level == 'ip':\n\t\textra_geo_fields['geo_place_name'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_region_name'] = translate_cfg_property('keyword')\n\t\textra_geo_fields['geo_zip_code'] = translate_cfg_property('keyword')\n\n\textra_geo_fields['geo_country_code'] = translate_cfg_property('keyword')\n\textra_geo_fields['geo_country_name'] = translate_cfg_property('keyword')\n\textra_geo_fields['geo_location'] = translate_cfg_property('geopoint')\n\textra_geo_fields['geo_representative_point'] = translate_cfg_property('geopoint')\n\n\treturn extra_geo_fields", "def _create_dimension_queries(\n self, explore: Explore, model_name: str\n ) -> List[Query]:\n queries = []\n for dimension in explore.dimensions:\n query = self.client.create_query(model_name, explore.name, [dimension.name])\n query = Query(\n query[\"id\"], lookml_ref=dimension, explore_url=query[\"share_url\"]\n )\n queries.append(query)\n return queries", "def get_json_required_fields():\n\n return ['features', 'threshold']", "def fields(self, n):\r\n \r\n return self.searcher.stored_fields(self.top_n[n])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explore_field_stats() should get the stats of all fields in an explore.
def test_get_explore_field_stats( fc: fetcher.Fetcher, looker_sdk: methods.Looker40SDK, test_model, test_used_explore_names, test_explores_stats, ): explore = fc.get_explores( model=test_model["name"], explore=test_used_explore_names[0] )[0] actual_stats = fc.get_explore_field_stats(explore) assert isinstance(actual_stats, dict) for e in test_explores_stats: if e["name"] == test_used_explore_names[0]: expected_stats = e assert all(actual_stats[k] == 0 for k in expected_stats["unused_fields"]) assert all(actual_stats[k] > 0 for k in expected_stats["used_fields"])
[ "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields", "def stats(self, fields, fq='', stats=None):\n json_resp = self.query(fq=fq, params={'stats':'true',\n 'stats.field':fields})\n stats_data = json_resp['stats']['stats_fields']\n df = pd.DataFrame.from_dict(stats_data)\n if stats is not None:\n df = df.reindex(stats)\n df.index.name = 'stats'\n return df", "def _get_field_stats_dates(self, field='@timestamp'):\n self.loggit.debug('Cannot query closed indices. Omitting any closed indices.')\n self.filter_closed()\n self.loggit.debug('Cannot use field_stats with empty indices. Omitting any empty indices.')\n self.filter_empty()\n self.loggit.debug(\n 'Getting index date by querying indices for min & max value of %s field', field)\n self.empty_list_check()\n index_lists = chunk_index_list(self.indices)\n for lst in index_lists:\n for index in lst:\n aggs = {\n 'min' : { 'min' : { 'field' : field } },\n 'max' : { 'max' : { 'field' : field } }\n }\n response = self.client.search(index=index, size=0, aggs=aggs)\n self.loggit.debug('RESPONSE: %s', response)\n if response:\n try:\n res = response['aggregations']\n self.loggit.debug('res: %s', res)\n data = self.index_info[index]['age']\n data['min_value'] = fix_epoch(res['min']['value'])\n data['max_value'] = fix_epoch(res['max']['value'])\n self.loggit.debug('data: %s', data)\n except KeyError as exc:\n raise ActionError(f'Field \"{field}\" not found in index \"{index}\"') from exc", "def fields(self, n):\r\n \r\n return self.searcher.stored_fields(self.top_n[n])", "def _field_extract(url):\n logging.info('extracting player stats from url: {}'.format(url))\n player_summary = requests.get(url)\n parser = BeautifulSoup(player_summary.content, 'html.parser')\n player_profile = parser.select('tr')\n list_of_fields = ['Innings', 'Not Outs', 'Aggregate', 'Highest Score', '50s', '100s', 'Ducks', '4s',\n '6s', 'Scoring Rate', 'Overs', 'Runs Conceded', 'Wickets', 'Average', '4 Wickets in Innings',\n '5 Wickets in Innings', 'Best', 'Economy Rate', 'Strike Rate', 'Catches',\n 'Most Catches in Innings', 'Stumpings', 'Most Catches in Innings',\n 'Most Dismissals in Innings',\n 'Won/Lost', 'Matches/Won/Lost', 'Tosses Won', 'Runs Scored', 'Batting Average']\n mapped_fields = {} # holds series level stats\n stats_header = '' # holds series stats metric header\n for each_field in range(0, len(player_profile)):\n # get stats header\n try:\n stats = player_profile[each_field].select_one('.ProfileSection').text.strip()\n if stats in ['Batting', 'Fielding', 'Bowling', 'Wicket Keeping', 'Captaincy']:\n stats_header = stats\n except Exception as e:\n str(e) # just ignore the exception\n # update stats data\n try:\n field = player_profile[each_field].select_one('.FieldName').text.split(':')[0]\n value = player_profile[each_field].select_one('.FieldValue').text.strip()\n if field in list_of_fields:\n mapped_fields['{}_{}'.format(stats_header.lower(), field.replace(' ', '_').lower())] = value\n except AttributeError as ae:\n logging.info('skip: May be html tree doesn\\'t find search - {}'.format(ae))\n logging.info('extract completed for url: {} ..... /200'.format(url))\n return mapped_fields", "def listAvailableFields():", "def fields(self):\n\n if self._fields is None:\n self._fields = self.searcher.stored_fields(self.docnum)\n return self._fields", "def fields(self, n):\n\n return self.searcher.stored_fields(self.top_n[n][1])", "def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}", "def arcpy_get_field_objects(self):\r\n\t\tif __thou_shalt__.do_a_dry_run:\r\n\t\t\treturn []\r\n\t\treturn thou_shalt(\"Fetch field information from {}\".format(self.shortened_name_with_context()),\r\n\t\t\tlambda:arcpy.ListFields(str(self))\r\n\t\t)", "def _get_fields(self):\n return self._fields", "def demo_fields(self):\n return self._demo_fields", "def get_field_info(self):\n return self.__field_info_func()", "def _field_stat(self, field):\r\n if not field in self.stats:\r\n stat = dq.FieldStatistics(field, distinct_threshold = self.distinct_threshold)\r\n self.stats[field] = stat\r\n else:\r\n stat = self.stats[field]\r\n return stat", "def field_names(self):\n ...", "def _get_all_field_functions(self):\n get_url = 'v1/fieldFunctions'\n self.field_functions = {f['mdmId']: f for f in self.carol.call_api(get_url, params=dict(pageSize=-1))['hits']}\n self.field_functions_id = {f['mdmName']: f['mdmId'] for f in self.field_functions.values()}", "def get_query_fields(cls):\n ...", "def check_fields(client, model_name, explore_name, continue_on_errors=False, excludes=[], ix=None, log=LOGGER):\n query_id, field_set, query_url = create_query_from_fields(client, model_name, explore_name, excludes)\n num_fields = len(field_set)\n result = client.run_query(query_id, 'csv', limit=1)\n # sql = client.run_query(query_id, 'sql') # What is this for?\n if has_error(result):\n if not continue_on_errors:\n cprint(f\"{result}:\\t\",'FAIL', log)\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n return result, num_fields, query_url\n else:\n for field in field_set:\n if '.'.join(field.split('.')[1:]) in result:\n if excludes == []:\n cprint(f\"Errors found, continuing...\", \"WARNING\", log)\n excludes.append(field)\n return check_fields(client, model_name, explore_name, continue_on_errors, excludes, ix, log)\n return check_fields(client, model_name, explore_name, False, excludes, ix, log)\n else:\n cprint(f\"Success for {num_fields} fields:\\t\", 'OKGREEN', log, end='')\n cprint(f\"{query_url}\\n\", 'UNDERLINE', log)\n if excludes != []:\n excludes = sorted(list(set(excludes)))\n cprint(f\"Failures:\", \"FAIL\", log)\n for e in excludes:\n cprint(f\"\\t{e}\", \"FAIL\", log)\n return result, num_fields, query_url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetcher.get_explore_join_stats() should return the stats of all joins in an explore.
def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model): explore = fc.get_explores( model=test_model["name"], explore="explore_2_joins_1_used" )[0] field_stats = { "explore_2_joins_1_used.d1": 10, "explore_2_joins_1_used.d2": 5, "explore_2_joins_1_used.d3": 0, "explore_2_joins_1_used.m1": 0, "join1.d1": 10, "join1.d2": 10, "join1.d3": 10, "join1.m1": 0, "join2.d1": 0, "join2.d2": 0, "join2.d3": 0, "join2.m1": 0, } join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats) assert isinstance(join_stats, dict) assert len(join_stats) == 2 assert join_stats == {"join1": 30, "join2": 0}
[ "async def getRecentJoins(self, ctx):\n if not await self.has_perms(ctx.author):\n return\n recent_joins = \"__Recent Member Joins:__\"\n tracked_joins = 0\n for name, join_data in self.recently_joined_members[ctx.guild].items():\n num_joins = len(join_data['members'])\n recent_joins += \"\\n - {} ({})\".format(name, num_joins)\n tracked_joins += num_joins\n\n if tracked_joins:\n return await ctx.send(recent_joins)\n minutes = NEW_MEMBER_JOIN_TIME // 60\n return await ctx.send(\":x: No members have joined in the past {} minutes.\".format(minutes))", "def extract_join_info(node):\n operator_info = node['operatorInfo']\n analyze_info = node['AnalyzeInfo']\n\n if 'Join' in node['id']:\n # Join Node\n join_type = extract_join_type(operator_info)\n conditions = extract_join_conditions(operator_info)\n current_node = JoinPlan(join_type, conditions)\n assert 'children' in node and len(node['children']) == 2\n childrens = node['children']\n current_node.left_node = extract_join_info(childrens[0])\n current_node.right_node = extract_join_info(childrens[1])\n current_node.execute_time = analyze_info[\"time\"]\n current_node.est_rows = node[\"estRows\"]\n else:\n # Table Reader\n # assert 'TableReader' in node['id']\n # extract selection if need\n current_node = extract_table_reader(node)\n current_node.est_rows = node['estRows']\n return current_node", "async def recentjoins(self, ctx):\r\n our_list = []\r\n for member in ctx.guild.members:\r\n join = \"Unknown\"\r\n if member.joined_at:\r\n ts = int(member.joined_at.timestamp())\r\n join = \"<t:{}> (<t:{}:R>)\".format(ts,ts)\r\n name = str(member)\r\n if member.nick:\r\n name = \"{} ({})\".format(member.nick,name)\r\n our_list.append(\r\n {\r\n \"name\":name,\r\n \"value\":join,\r\n \"date\":member.joined_at\r\n }\r\n )\r\n our_list = sorted(our_list, key=lambda x:x[\"date\"].timestamp() if x[\"date\"] != None else -1, reverse=True)\r\n return await PickList.PagePicker(title=\"Most Recent Members to Join {} ({:,} total)\".format(ctx.guild.name,len(ctx.guild.members)),ctx=ctx,list=[{\"name\":\"{}. {}\".format(y+1,x[\"name\"]),\"value\":x[\"value\"]} for y,x in enumerate(our_list)]).pick()", "def netapi32_NetGetJoinInformation(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpServer\", \"lpNameBuffer\", \"BufferType\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def num_joins(self):\n return self._num_joins", "def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])", "def joins(self):\n return self._joins", "def gather_stats(self):\n stats_collection = rospy.get_param('~stats_collection', 'nav_stats')\n msg_store = MessageStoreProxy(collection=stats_collection)\n to_add=[]\n\n self.sucesses = rospy.get_param('topological_prediction/success_values', ['success','failed'])\n self.fails = rospy.get_param('topological_prediction/fail_values', ['fatal'])\n \n rospy.set_param('topological_prediction/success_values',self.sucesses)\n rospy.set_param('topological_prediction/fail_values',self.fails)\n print(\"++++++++++++++++++++++++++++++++++\")\n print(\"++++++++++++++++++++++++++++++++++\" ) \n print(\"successes:\")\n print(self.sucesses)\n print(\"fails:\")\n print(self.fails)\n print(\"++++++++++++++++++++++++++++++++++\") \n print(\"++++++++++++++++++++++++++++++++++\")\n\n\n for i in self.eids:\n if self.ignore_map_name:\n query = {\"edge_id\":i[\"edge_id\"]}\n else:\n query = {\"topological_map\": self.lnodes.name, \"edge_id\":i[\"edge_id\"]} \n query_meta={} \n \n if len(self.range) == 2: \n if self.range[1] < 1:\n upperlim = rospy.Time.now().secs\n else:\n upperlim = self.range[1]\n query_meta[\"epoch\"] = {\"$gte\": self.range[0], \"$lt\" : upperlim} \n\n #print query\n #print query_meta\n \n available = msg_store.query(NavStatistics._type, query, query_meta)\n # print len(available)\n edge_mod={}\n edge_mod[\"model_id\"]= i[\"model_id\"]#self.lnodes.name+'__'+i[\"edge_id\"]\n edge_mod[\"time_model_id\"]=i[\"time_model_id\"]\n edge_mod[\"dist\"]= i[\"dist\"]#self.lnodes.name+'__'+i[\"edge_id\"]\n edge_mod[\"models\"]=[]\n edge_mod[\"order\"]=-1\n edge_mod[\"t_order\"]=-1\n edge_mod[\"edge_id\"]=i[\"edge_id\"]\n \n for j in available: \n val = {}\n if j[0].status in self.sucesses:\n val[\"st\"] = 1\n val[\"speed\"] = i[\"dist\"]/j[0].operation_time\n if val[\"speed\"]>1:\n val[\"speed\"]=1.0\n val[\"epoch\"] = int(datetime.strptime(j[0].date_started, \"%A, %B %d %Y, at %H:%M:%S hours\").strftime('%s'))\n val[\"optime\"] = j[0].operation_time\n edge_mod[\"models\"].append(val)\n elif j[0].status in self.fails:\n val[\"st\"] = 0\n val[\"speed\"] = 0.0\n val[\"epoch\"] = int(datetime.strptime(j[0].date_started, \"%A, %B %d %Y, at %H:%M:%S hours\").strftime('%s'))\n val[\"optime\"] = j[0].operation_time\n edge_mod[\"models\"].append(val)\n\n edge_mod[\"samples\"]=len(edge_mod[\"models\"])\n if len(available) > 0 :\n to_add.append(edge_mod)\n else :\n self.unknowns.append(edge_mod)\n return to_add", "def _build_join_tables(self):\n # Always need the list of reportable visits\n self._build_visit_join_table()\n\n if self.criteria.include_vitals:\n self._build_vitals_join_table()", "def netapi32_NetGetJoinableOUs(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpServer\", \"lpDomain\", \"lpAccount\", \"lpPassword\", \"OUCount\", \"OUs\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)", "def _populate_max_join_field_coverage(self, join_fields, grain):\n for join, covered_fields in join_fields.items():\n for field in grain:\n if field in covered_fields:\n continue\n all_covered_fields = join.get_covered_fields()\n if field in all_covered_fields:\n covered_fields.add(field)", "def on_join(data):\n\n room = data['room']\n join_room(room)\n\n if (room == \"community\"):\n # And then we emit the most recent 5 users' utterances per file\n emit_utterances()\n\n # and also emit the top builders when first joining\n emit_top_builders()\n\n # We iterate through all the shared structs and emit them one by one\n emit_structs()", "async def on_member_join(self, member: Member) -> None:\n if member.guild.id != Guild.id:\n return\n\n self.bot.stats.gauge(\"guild.total_members\", len(member.guild.members))", "def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]", "def num_joins(self, num_joins):\n self._num_joins = num_joins", "def test_multi_join(self):\n self.con.simulate_recv('JOIN #mtest1,#mtest2')\n channel_joins = [\n {'user': 'nick', 'channel': '#mtest1', 'key': None },\n {'user': 'nick', 'channel': '#mtest2', 'key': None },\n ]\n self.assert_all_in(channel_joins, self.server.channel_joins)", "def _join_summaries(self, *args):\n return dict(ChainMap(*args))", "async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize audio file to range [1, 1]
def normalize(audio): norm = audio/max(audio) return norm
[ "def NormalizeAudio(data: np.ndarray) -> np.ndarray:\n return data / np.max(np.abs(data))", "def normalize_audio(file_name: str) -> None:\n os.system(f'ffmpeg-normalize \"{file_name}\" -of output -c:a libmp3lame -ext mp3 -q')\n os.remove(file_name)", "def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize_audio(song_filepath):\n subprocess.run([\"ffmpeg-normalize\", \"-u\", \"-p \" + Util.NORMALIZED_SONG_PREFIX, \"-l \" + str(Util.NORMALIZED_RMS_DECIBELS), song_filepath])\n os.remove(song_filepath)", "def normalize_audio(self, normalize_audio):\n\n self._normalize_audio = normalize_audio", "def normalisation(wav, flux):\n \n return flux / flux.max() # flux maximal = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm", "def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized", "def normalize(image, min_bound=-1000.0, max_bound=400.0):\n image = (image - min_bound) / (max_bound - min_bound)\n image[image > 1] = 1.\n image[image < 0] = 0.\n return image", "def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames", "def normalise(image):", "def normalize(arr):\n\tarr = arr.astype('float32')\n\tif arr.max() > 1.0:\n\t\tarr /= 255.0\n\treturn arr", "def pre_processing(x):\n \n import librosa\n\n # make a copy\n x = x.copy()\n\n # dither\n x = add_dither(x)\n\n # normalize input signal with infinity norm\n x = librosa.util.normalize(x)\n\n return x", "def normalize_volumes_mixmode(directory, amplitude=0.08, ext='.wav'):\n subdirectories = [x[0] for x in os.walk(directory)]\n for subdirectory in subdirectories:\n os.system(f\"normalize-audio -w 16 -a {amplitude} -b '{subdirectory}/'*{ext}\")", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img", "def itensity_normalize_one_volume(volume):\n \n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def normalize(self, ref=1):\n maximum = max(abs(self.intensities))\n return Spectrum(self.wavelengths, ref * self.intensities/maximum)", "def normalize(img):\n img_min = img.min()\n img_max = img.max()\n return (img - img_min) / (img_max - img_min)", "def normalize(waveform, top):\n\n hi = waveform.max()\n lo = waveform.min()\n return (waveform - lo) * top / (hi - lo)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load an audio file and segment into 10s increments Save each segment to the target directory. Append the gender of the speaker and the segment index to the filename.
def segment_audio(filename, y_value, split='train', clf='gender'): filepath = 'recordings/recordings/' + filename + '.mp3' audio, sr = librosa.load(filepath, sr=16000) audio = normalize(audio) # Add gender label to filename for later processing sex = y_value if sex == 'female': filename = '{}.F'.format(filename) else: filename = '{}.M'.format(filename) # Segment audio file seg_files = segment_10s(audio, sr) for key, val in seg_files.items(): new_name = '{}.{}'.format(filename, key) sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)
[ "def file_generator(files: list,\n segment_duration: float,\n sampleRate: int,\n db_thr: float or None = None,\n frame_length: int = 512,\n hop_length: int = 128,\n ) -> None:\n\n I = 0\n J = 0\n\n segment = np.zeros((int(segment_duration*sampleRate),))\n\n k = 0\n file_no = 0\n\n while True:\n if I >= len(segment):\n yield segment\n segment = np.zeros((int(segment_duration*sampleRate),))\n I = 0\n\n if k == 0 or J >= len(y):\n J = 0\n y, sr = librosa.core.load(files[file_no], mono=True, sr=sampleRate)\n file_no += 1\n\n if file_no == len(files):\n break\n\n # Normalize\n y = y/y.max()\n\n # Remix non-silent segments\n if db_thr is not None:\n # Figure out intervals of non-silence (NOTE: Is the threshold right? -- 60db quiet)\n intervals = librosa.effects.split(y, frame_length=frame_length, hop_length=hop_length, top_db=db_thr)\n\n # Remix according to those intervals\n y = librosa.effects.remix(y, intervals)\n\n if len(segment[I:]) >= len(y[J:]):\n segment[I:I+len(y[J:])] = y[J:]\n I = I + len(y[J:])\n J = J + len(y[J:])\n else:\n segment[I:] = y[J:J+len(segment[I:])]\n J = J + len(segment[I:])\n I = I + len(segment[I:])\n k += 1", "def store_audio_clips(self, audio_filename, out_folder):\n os.makedirs(out_folder, exist_ok=True)\n audio = AudioSegment.from_file(audio_filename)\n for idx, interval in enumerate(self):\n out_path = os.path.join(out_folder, '%s.wav' % idx)\n audio[interval[0]: interval[1]].export(out_path)", "def make_segments(transcript, ep_id, n_segs, file_name):\n if not os.path.exists('../timesteps_{}.csv'.format(file_name)):\n create_timesteps_csv(n_segs, file_name)\n\n times, time_idxs = [0.], [0]\n ep_audio = AudioSegment.from_file('{}.mp3'.format(ep_id),\n format='mp3', channels=1, sample_width=2)\n csv_line = [ep_id]\n fs = float(ep_audio.frame_rate)\n ep_length = (ep_audio.frame_count() / fs) / 60. # in minutes\n timesteps = [np.round(float(i), 3) for i in transcript[:, 0]]\n\n # if we aren't segmenting the file\n if n_segs <= 1:\n start = min(timesteps)\n end = max(timesteps)\n seg = '{0:.3f} '.format(start) + ':: {0:.3f}'.format(end)\n csv_line.append(seg)\n append_timestamps_csv(csv_line, file_name)\n return [0, len(timesteps)-1]\n\n # add 0.5s silence to the start + end of an audio segment\n silence = AudioSegment.silent(duration=500)\n division = ep_length / float(n_segs)\n\n # calc the most appropriate timesteps to segment at given n_segs, and\n # add them to timesteps.csv\n for ii in np.arange(1, n_segs + 1):\n seg_ts = min(timesteps, key=lambda x:abs(x - (ii * division)))\n times.append(seg_ts)\n\n idx = np.argwhere(timesteps == np.round(times[-1], 3)).ravel()[0]\n time_idxs.append(idx)\n\n start_in_ms = times[ii - 1] * 60. * 1000.\n end_in_ms = times[ii] * 60. * 1000.\n\n aud_slice = silence + ep_audio[start_in_ms:end_in_ms] + silence\n fn = '../seg_audio/{}_seg{}.wav'.format(ep_id, ii)\n aud_slice.export(fn, 'wav')\n\n seg = '{0:.3f} '.format(times[-2]) + ':: {0:.3f}'.format(times[-1])\n csv_line.append(seg)\n\n append_timestamps_csv(csv_line, file_name)\n time_idxs.append(len(timesteps) - 1)\n return time_idxs", "def split_audio(path_to_file, output_dir, chunk_size_in_bytes=10 * 1024 * 1024):\n try:\n os.makedirs(output_dir, exist_ok=True)\n title = Path(path_to_file).stem\n audio = AudioSegment.from_file(path_to_file, \"mp3\")\n except Exception as e:\n logger.error(f\"An error occurred while reading the audio file: {e}\")\n return\n\n # Estimate the average bitrate of the audio file\n file_size_in_bytes = os.path.getsize(path_to_file)\n duration_in_milliseconds = len(audio)\n estimated_bitrate = 8 * file_size_in_bytes / duration_in_milliseconds\n\n # Calculate the approximate duration of each chunk\n chunk_duration_in_milliseconds = 8 * chunk_size_in_bytes / estimated_bitrate\n\n num_chunks = math.ceil(duration_in_milliseconds / chunk_duration_in_milliseconds)\n logger.info(f\"Splitting {title} into {num_chunks} segments.\")\n\n for i in tqdm(range(num_chunks)):\n start = int(i * chunk_duration_in_milliseconds)\n end = int((i + 1) * chunk_duration_in_milliseconds)\n segment = audio[start:end]\n segment.export(\n Path(output_dir) / title / f\"{title}_prepared_{i}.mp3\", format=\"mp3\"\n )\n\n # return the path to the first chunk\n return Path(output_dir) / title / f\"{title}_prepared_0.mp3\"", "def chunk_audio(self, file_name):\n # TODO: clean this up a bit -- offsets and whatnot\n sound = AudioSegment.from_wav(file_name) # need this JUST FOR THE duration_seconds UGGGG\n video_file = ffmpeg.input(file_name, format='wav')\n chunk_count = int(sound.duration_seconds+1)//(self.CHUNK_SIZE//1000)\n for i in range(chunk_count):\n chunk_name = self.file_chunk_name(file_name, i)\n video_file.output(chunk_name, acodec='pcm_s16le', ar=16000, ac=1, ss=self.convert_to_time(i), to=self.convert_to_time(i+1)).run()\n with open(chunk_name, 'rb') as f:\n self.upload_to_s3(self.AUDIO_BUCKET_NAME, chunk_name, content=f.read())\n self.upload_to_s3(self.AUDIO_BUCKET_NAME, self.file_manifest(file_name), content=str(chunk_count))", "def save_sample(sample, sr, target_dir, file_name, index):\n # Get the file name\n file_name = file_name.split('.wav')[0]\n # Format the destination path\n destination_path = os.path.join(\n target_dir, f'{file_name}_{index}.wav')\n # Check if the file doesen't exist already\n if os.path.exists(destination_path):\n return\n # Write the file at the destination path\n wavfile.write(destination_path, sr, sample)", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def dl_audio_and_segment(transcript, ep_id, as_client, n_segs, file_name):\n episode = as_client.get_episode(ep_id)\n audio_url = episode[\"digital_location\"]\n\n os.chdir('./alignment_data/full_audio/')\n if os.path.lexists('./{}.mp3'.format(ep_id)):\n print(\"Episode file {}.mp3 already exists! Skipping\".format(ep_id))\n os.chdir('../../')\n return\n\n # first try downloading the audio from soundcloud\n # (suppress stderr to avoid cluttering the console if link is rotted)\n with open(os.devnull, 'w') as devnull:\n try:\n res = subprocess.call([\"soundscrape\", audio_url], stderr=devnull)\n except:\n print('Error decoding {}.mp3. Skipping'.format(ep_id))\n os.chdir('../../')\n return\n\n if res == 0:\n rename_file(audio_url, ep_id)\n\n # if soundcloud fails, try looking for the audio link elsewhere\n else:\n try:\n links = episode['audio_files'][0]['url']\n audio_url = [ii for ii in links if '.mp3' in ii]\n resp = requests.get(audio_url[0])\n\n with open('{}.mp3'.format(ep_id), 'w') as ff:\n ff.write(resp.content)\n ff.close()\n\n except:\n # if we're still unable to download the audio, open the debugger\n import ipdb; ipdb.set_trace()\n return\n\n # identify segment audio & transcript slice points\n seg_row_ids = make_segments(transcript, ep_id, n_segs, file_name)\n\n # write the segmented transcript to individual json files\n write_transcript_segments(transcript, seg_row_ids, ep_id)\n\n os.chdir('../../')", "def segment(sound_file, spec_file, ms_step, pix_per_s, sound_output_dir, spec_output_dir):\n pix_per_ms = pix_per_s/1000\n sound = AudioSegment.from_wav(sound_file)\n start, stop = 0, ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms\n spec = Image.open(spec_file)\n chopping = True\n while stop <= len(sound):\n \n # Split sound\n chunk = sound[start:stop]\n chunk.export(sound_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".wav\", format=\"wav\")\n\n # Split spectrogram\n w, h = spec.size\n cropped_spec = spec.crop((start_pixel, 0, stop_pixel, h))\n cropped_spec.save(spec_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".png\")\n\n start += ms_step\n stop += ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def diarization(self):\n self._status = 1\n if self._single:\n try:\n os.mkdir(self.get_file_basename())\n except OSError, err:\n if err.errno != 17:\n raise err\n fm._silence_segmentation(self._basename)\n fm._gender_detection(self._basename)\n segname = self._basename + '.seg'\n f_seg = open(segname, 'r')\n headers = []\n values = []\n differ = False\n basic = None\n gen = {'M': 0, 'F': 0, 'U': 0}\n for line in f_seg.readlines():\n if line.startswith(';;'):\n headers.append(line[line.index('['):])\n else:\n a_line = line.split(' ')\n if basic == None:\n basic = a_line[4]\n if a_line[4] != basic:\n differ = True\n gen[a_line[4]] += int(a_line[3])\n values.append(a_line)\n header = \";; cluster:S0 %s\" % headers[0]\n from operator import itemgetter\n index = 0\n while index < len(values):\n values[index][2] = int(values[index][2])\n index += 1\n values = sorted(values, key=itemgetter(2))\n index = 0\n while index < len(values):\n values[index][2] = str(values[index][2])\n index += 1\n newfile = open(segname + '.tmp', 'w')\n newfile.write(header)\n if differ: #in case the gender of the single segments differ \n# then set the prevailing\n# print 'transgender :-D'\n if gen[ 'M' ] > gen[ 'F' ]:\n basic = 'M'\n elif gen[ 'M' ] < gen[ 'F' ] :\n basic = 'F'\n else:\n basic = 'U'\n\n for line in values:\n line[4] = basic #same gender for all segs\n newfile.write(' '.join(line[:-1]) + ' S0\\n')\n f_seg.close()\n newfile.close()\n shutil.copy(self.get_file_basename() + '.wav',\n os.path.join(self.get_file_basename(), 'S0' + '.wav'))\n shutil.move(segname + '.tmp', segname)\n shutil.copy(self.get_file_basename() + '.seg',\n os.path.join(self.get_file_basename(), 'S0' + '.seg'))\n utils.ensure_file_exists(segname)\n else:\n# print str(self._diar_conf[0])\n# print str(self._diar_conf[1])\n fm.diarization(self._basename, str(self._diar_conf[0]),\n str(self._diar_conf[1]))\n self._status = 2", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def segment_mp3(filename, segment_length=300, output_frequency=44100):\n # type: (str, int, int) -> Iterator[Tuple[str, int]]\n offset = 0\n step_size = int(segment_length) #in seconds\n outfile = \"temp.wav\"\n info = mutagen.mp3.MP3(filename).info\n if output_frequency is not None and info.sample_rate != output_frequency:\n resample = True\n else:\n resample = False\n\n while offset < info.length:\n next_offset = offset + step_size\n end = min(int(info.length), next_offset)\n length = end - offset\n try:\n if resample:\n subprocess.check_output([\"ffmpeg\", \"-loglevel\", \"0\", \"-channel_layout\", \"stereo\",\n \"-i\", filename, \"-ac\", \"1\",\n \"-ar\", str(output_frequency),\n \"-ss\", str(offset), \"-t\", str(length), outfile])\n else:\n subprocess.check_output([\"ffmpeg\", \"-loglevel\", \"0\", \"-channel_layout\", \"stereo\",\n \"-i\", filename, \"-ss\", str(offset), \"-t\", str(length), outfile])\n \n yield outfile, offset\n finally:\n os.remove(outfile)\n offset = next_offset", "def slice_recording(path_recording, path_metadata_filepath_duration):\n\n metadata_filepath_duration = open(path_metadata_filepath_duration, 'r')\n\n start = 0.0\n\n for line in metadata_filepath_duration:\n filepath, duration = line.split(\" | \")\n target_filepath = re.sub('/Mixtures/', '/mic_recordings/Mixtures/', filepath)\n target_parentpath = re.sub('/mixture.wav', '', target_filepath)\n\n # creating folder if the folder doesnot exist\n try:\n os.makedirs(target_parentpath)\n except OSERROR as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target_parentpath):\n pass\n\n delta_t = float(duration)\n\n # calling ffmpeg to slice the wav file into its respective sizes\n subprocess.call([\"ffmpeg\", \"-i\", path_recording, \"-ss\", str(start), \"-t\", str(delta_t), \"-acodec\", \"copy\", target_filepath])\n\n # resetting the start for next file in line\n start += delta_t\n\n metadata_filepath_duration.close()", "def load_librosa (self):\r\n for index, data in tqdm(enumerate (self.file_list)):\r\n sound, sampling_rate = librosa.load(data, sr = self.target_sampling)\r\n librosa.output.write_wav(self.save_file_path + \"/\" + \"{:07d}\".format(index+1) + \".wav\", sound, self.target_sampling)", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def slice_dataset(data_dir, genres):\n for index, genre in enumerate(genres):\n current_genre_path = os.path.join(data_dir, genre)\n\n for root, dirs, files in os.walk(current_genre_path):\n for file in files:\n song_genre = file.split('.')[0]\n audio_file = os.path.join(current_genre_path, file)\n audio_slices = slice_audio(audio_file)\n audio_path = os.path.join(current_genre_path, song_genre)\n\n for audio in audio_slices:\n audio.export(f\"{audio_path}.{index}.wav\", format=\"wav\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load an audio file (or segment). Add random noise to the file and save with new filename.
def noisy_data(filename, split='train', clf='gender'): filepath = 'data/{}/{}/{}o.wav'.format(clf, split, filename) audio, sr = librosa.load(filepath, sr=16000) # Add noise noisy = add_noise(audio) # Write noise to file sf.write('data/{}/{}/{}n.wav'.format(clf, split, filename), noisy, sr) #print("Noise added to {}".format(filename))
[ "def create_random_wav(file_name):\n sample_rate = 44100.0\n sound_length = 50\n duration = 3000 #MS\n sounds_arr = create_sounds_arr(sample_rate, duration, sound_length)\n wav_file = create_wav_file(file_name)\n save_wav(sounds_arr, wav_file, sample_rate, sound_length)", "def add_silence(self):\n\n self._generate_meta_data(audio_segment=True)\n # Adds three seconds to the end of the audio segment\n self.audio_segment += AudioSegment.silent(duration=3000)\n self.audio_segment.export(self.path, format=self.extension)\n self._generate_meta_data()", "def repair_audio_file(self):\n\n logger.debug(f'Repairing audio file: {self.audio}')\n clip = AudioFileClip(self.audio)\n filename = f'{uuid.uuid4().hex[:10]}.{Constants.DESIRED_AUDIO_FILE_EXTENSION}'\n path = os.path.join(self.tmpdir, filename)\n logger.debug(f'Repairing audio file. New path: {path}')\n clip.write_audiofile(path)\n try:\n os.remove(self.audio)\n logger.debug(f'Removed file {self.audio}')\n except:\n logger.warning(f'Unable to remove file {self.audio}.')\n self.audio = path", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def trim_silence_file(file_path, noise_threshold=150):\n rate, audio = scipy.io.wavfile.read(file_path)\n trimmed_audio = trim_silence(audio, noise_threshold=noise_threshold)\n print()\n scipy.io.wavfile.write(file_path, rate, trimmed_audio)", "def _generate_audio_file(self):\n\n polly = boto3.client('polly', \n aws_access_key_id=self.aws_access_key_id, \n aws_secret_access_key=self.aws_secret_access_key,\n region_name= self.region_name)\n \n spoken_text = polly.synthesize_speech(Text=self.words,\n OutputFormat='mp3',\n VoiceId=self.voice)\n #TextType='ssml')\n\n FileManager.write_in_file(self.file_path, spoken_text['AudioStream'].read())", "def noise_augmentation(wave, noise_files):\n nb_noise_segments = 3\n aug_noise_files = []\n for i in range(nb_noise_segments):\n aug_noise_files.append(random.choice(noise_files))\n\n # aug_noise_files = np.random.choice(noise_files, 3, replace=False)\n dampening_factor = 0.4\n for aug_noise_path in aug_noise_files:\n (fs, aug_noise) = utils.read_wave_file(aug_noise_path)\n wave = wave + aug_noise*dampening_factor\n return wave", "def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()", "def save_audio(audio, filename):\n\n\twith open(filename, \"wb\") as f:\n\t\tf.write(audio.get_wav_data())\n\tf.close()\n\treturn", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def nextfile(self):\n file_index = random.randrange(0, len(self._files))\n\n self._filename=os.path.join(self.noise_dir, self._files[file_index])\n\n log.debug(\"Next file for reading noise: %s\" % self._filename)\n\n return self._filename", "def save_audio_file(self):\n\n # has not recorded audio\n if not self.is_audio_record:\n print(\"***you did not set the record flag!\")\n return\n\n import soundfile\n\n # save audio\n soundfile.write('{}out_audio.wav'.format(self.mic_params['plot_path']), self.collector.x_all, self.feature_params['fs'], subtype=None, endian=None, format=None, closefd=True)", "def save_sample(sample, sr, target_dir, file_name, index):\n # Get the file name\n file_name = file_name.split('.wav')[0]\n # Format the destination path\n destination_path = os.path.join(\n target_dir, f'{file_name}_{index}.wav')\n # Check if the file doesen't exist already\n if os.path.exists(destination_path):\n return\n # Write the file at the destination path\n wavfile.write(destination_path, sr, sample)", "def save(self):\n\n if self.__filename == \"\":\n raise ValueError(\"No filename set for this sound.\")\n\n scipy.io.wavfile.write(self.__filename, self.__sample_rate, self.__samples)", "def _generate_audio_file(self):\n\n with open(join(dirname(__file__), self.file_path),\n 'wb') as audio_file:\n audio_file.write(\n text_to_speech.synthesize(\n self.words, \n accept='audio/wav',\n voice=\"fr-FR_ReneeVoice\"\n )\n )", "def random_sample(input_name):\n\t#Count number of lines in original file\n\twith open(input_name) as f:\n\t\told_size = len(f.readlines())\n\t#Determine number of lines for new file\n\tnew_size=int(round(sum(1 for row in open(input_name))* args.rnd_sample))\n\t#Create name for sub-sampled file\n\tSampledFileName, SampledExten = os.path.splitext(input_name)\n\tSampledName = '%s_smpld%s' % (SampledFileName,SampledExten)\n\t#Randomly select the desired number of lines and print to new file\n\twith open(SampledName,\"wb\") as sink:\n\t\tfor i in random.sample(range(0, old_size), new_size):\n\t\t\tsink.write(linecache.getline(input_name, i))\n\tlinecache.clearcache()", "def silence_generator(path):\n nsamples = 270\n bn_path = path + '/_background_noise_'\n noise_list = [f for f in listdir(bn_path) if isfile(join(bn_path, f))]\n noise_list.remove('README.md')\n \n for i in range(nsamples):\n if i < 27:\n new_sample = np.zeros(16000, dtype='int16')\n else:\n selected = noise_list[randint(0, len(noise_list)-1)]\n _, sample = read(bn_path+'/'+selected)\n start_index = randint(0, len(sample)-16000)\n new_sample = sample[start_index:start_index+16000] * np.random.uniform(0, 1)\n new_sample = np.rint(new_sample).astype('int16')\n write(path + '/silence/silent'+str(i)+'valtest.wav', 16000, new_sample)", "def save_as(snd, filename):\n\n snd.save_as(filename)", "def load_waves(self,filename):\r\n with open(filename, 'rb') as f:\r\n self.__waves=f['waves']\r\n self.__waves_ID=f['IDs']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Release a lock on the bus
def bus_release(self): self._bus_lock.release()
[ "def release_lock(self):\n self._multistore._unlock()", "def unlock(lock):\n lock.release()", "def release_lock(cls, obj):\n try:\n obj.__lock.release()\n except AttributeError:\n pass\n except ValueError as ex: # lock release failed (maybe not acquired).\n LOG.warning(ex)", "def release_lock(self, name):\n self._run_with_lib(\"release_lock\", name, self._my_id)", "def release(self, o):\n if not self.available(o):\n raise ValueError('you do not own this lock')\n self._owner = None", "def release_api_lock(self, lock_id):\n self.perform_update(ShipyardDbAccess.UPDATE_LOCK_RELEASE,\n id=lock_id)", "def release(self, bay_uuid):\n # Only the conductor that owns the lock will be releasing it.\n result = objects.BayLock.release(bay_uuid, self.conductor_id)\n if result is True:\n LOG.warn(_LW(\"Lock was already released on bay %s!\"), bay_uuid)\n else:\n LOG.debug(\"Conductor %(conductor)s released lock on bay \"\n \"%(bay)s\" % {'conductor': self.conductor_id,\n 'bay': bay_uuid})", "def release(self):\n assert self._lockingthread == threading.current_thread()\n self._lockingthread = None\n self._lock.release()", "def release_mutex(self, *args, **kwargs) -> None:\n pass", "def release_lock(self, **kwargs) -> Decimal:\n raise NotImplementedError", "def ReleaseLock(self, lock_data=None):\n if self._acquired_lock is not None:\n if lock_data is not None:\n lock_data = LockData(lock_data)\n\n self._acquired_lock.expire_time = datetime.datetime.min # Force expire.\n self._acquired_lock.lock_data = lock_data\n self._acquired_lock.put()\n else:\n self._acquired_lock.delete()", "def release_lock(self, correlation_id: Optional[str], key: str):", "def ReleaseLock(cls, client, viewpoint_id, lock, callback):\r\n yield gen.Task(lock.Release, client)\r\n ViewpointLockTracker.RemoveViewpointId(viewpoint_id)\r\n callback()", "def unlock(self):\n\n\t\t# Release the file lock first\n\t\tfcntl.lockf(self.lockfile, fcntl.LOCK_UN)\n\t\t# Release the thread lock\n\t\tself.s.release()", "def release(self):\n\n if self.state != 1:\n raise RuntimeError(\"Lock not acquired\")\n if self.waiting.peek():\n # Task(s) waiting on lock, schedule next Task\n self.state = self.waiting.pop_head()\n core._task_queue.push_head(self.state)\n else:\n # No Task waiting so unlock\n self.state = 0", "def write_release(self):\n self.is_locked = False\n self.rwlock = RWLock().write_release()", "def release_node(self, node):\n # use the lua script to release the lock in a safe way\n try:\n node._release_script(keys=[self.resource], args=[self.lock_key])\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n pass", "def _release_imp(self):\n\n self._logger.debug(\n 'Release Lock', lock_name=self._lock_name, caler=self._holder)\n\n try:\n self._dynamodb_wrapper.put_item(\n self._table_name,\n {\n mutex_consts.MutexDynamoConfig.lock.value: self._lock_name,\n mutex_consts.MutexDynamoConfig.holder.value: mutex_consts.NO_HOLDER_DATA,\n mutex_consts.MutexDynamoConfig.ttl.value: 0,\n },\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, mutex_consts.NO_HOLDER_DATA) |\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, self._holder) |\n dynamodb_condition.Condition.not_exists(mutex_consts.MutexDynamoConfig.lock.value))\n\n except (dynamodb_exceptions.PutItemConditionException, dynamodb_exceptions.PutItemException):\n self._logger.log_and_raise(\n mutex_exceptions.MutexReleaseFailedException, self._lock_name, self._holder, str(self._ttl))", "def release_lock(self, peer_id: int, semaphore: str):\r\n semaphore = parse.quote(semaphore, safe='')\r\n\r\n def send_request():\r\n response = requests.delete(f\"{self.base_url}/peers/{peer_id}/{semaphore}\")\r\n return response\r\n\r\n self._try_request(send_request)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to be used in apimethods to serve the swaggerdocumentation for this api.
def api_documentation(api: str, summary: str, in_model: BaseModel, out_model: BaseModel, out_description: str) -> Callable: for model, name in ((in_model, 'Input'), (out_model, 'Output')): doc.Object( make_dataclass( f'Api{api[1:].title()}{name}', [(key, val.type_, val.type_) for key, val in model.__dict__['__fields__'].items()])) im_returns = doc.JsonBody({ key: val.type_ for key, val in in_model.__dict__['__fields__'].items() }) om_returns = { key: val.type_ for key, val in out_model.__dict__['__fields__'].items() } def decorator(func): @doc.summary(summary) @doc.response(412, 'Error: Precondition Failed', description='The passed request-parameters are invalid') @doc.response(500, 'Error: Server-Error occured', description='An internal error occured') @doc.consumes(im_returns, content_type='application/json', location='body') @doc.produces(om_returns, content_type='application/json', description=out_description) @wraps(func) async def function_wrapper(request, *args, **kwargs): return await func(request=request, *args, **kwargs) return function_wrapper return decorator
[ "def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp", "def api_docs():\n return render_template('api_docs.html')", "def create_api_doc(name, urls, app, swagger_api_spec, interface_name):\n spec = generate_swagger_spec(urls, name, swagger_api_spec)\n save_swagger_file(spec, name, interface_name)\n swagger_ui.tornado_api_doc(\n app,\n config_path=get_swagger_file_path(name, interface_name),\n url_prefix=\"/swagger/spec.html\",\n title=swagger_api_spec.get(\"title\", f\"{name} API\"),\n )", "def serve_swagger_file():\n return swagger.YAML_DEFINITION, 200, {'Content-Type': 'text'}", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def swagger_redirect(request: HttpRequest) -> HttpResponse:\n return HttpResponse('Use /api/v2/docs/ instead', status=410)", "def serve_api_docs():\n return redirect('https://baderlab.github.io/saber-api-docs/')", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n from rest_framework import exceptions\n from rest_framework.permissions import AllowAny\n from rest_framework.renderers import CoreJSONRenderer\n from rest_framework.response import Response\n from rest_framework.views import APIView\n from rest_framework_swagger import renderers\n from django.utils.module_loading import import_string\n from django.conf import settings\n from .doc import OpenAPIRenderer, SchemaGenerator\n apiview = getattr(settings, 'PARAER_APIVIEW', None)\n if apiview:\n apiview = import_string(apiview)\n else:\n apiview = APIView\n\n class SwaggerSchemaView(apiview):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n\n renderer_classes = [\n CoreJSONRenderer, OpenAPIRenderer, renderers.SwaggerUIRenderer\n ]\n\n def get(self, request, a_url=None):\n url_temp = url\n urlconf_temp = urlconf\n if a_url:\n urlconf_temp = '.'.join([a_url, 'urls'])\n url_temp = '/' + a_url\n\n generator = SchemaGenerator(\n title=title,\n url=url_temp,\n patterns=patterns,\n urlconf=urlconf_temp)\n schema = generator.get_schema(request=request)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document')\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "def endpoint(self):\n return \"v1/openapi\" + (\".yaml\" if self.mode == \"yaml\" else \"\")", "def index():\n return redirect(\"/apidocs\")", "def RenderApiExplorer(path):\n del path # unused\n return flask.render_template('docs.html')", "def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap", "def init_api_doc(app: Flask, api: Api):\n sw = Swagger(api)\n with app.app_context():\n for ns in sw.api.namespaces:\n for resource, urls, kwargs in ns.resources:\n for url in urls:\n _ = sw.serialize_resource(ns, resource, url, kwargs)", "async def handle_doc(self, request: web.Request) -> web.Response:\n spec = request.app[\"spec\"]\n spec_url = request.app.router[\"openapi_spec\"].url_for()\n title = spec.info.title\n html = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n <!-- needed for adaptive design -->\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n \"\"\"\n if self.font:\n html += f\"\"\"\n <link href=\"https://fonts.googleapis.com/css?{self.font}\" rel=\"stylesheet\">\n \"\"\"\n html += f\"\"\"\n <link rel=\"shortcut icon\" href=\"{self.favicon_url}\">\n <!--\n ReDoc doesn't change outer page styles\n -->\n <style>\n body {{\n margin: 0;\n padding: 0;\n }}\n </style>\n </head>\n <body>\n <redoc spec-url=\"{spec_url}\"></redoc>\n <script src=\"{self.redoc_js_url}\"> </script>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type=\"text/html\")", "def api_view(http_method_names=None):\n http_method_names = ['GET'] if (http_method_names is None) else http_method_names\n\n def decorator(func):\n\n # noinspection PyPep8Naming\n WrappedAPIView = type(\n six.PY3 and 'WrappedAPIView' or b'WrappedAPIView',\n (View,),\n {'__doc__': func.__doc__}\n )\n\n # Note, the above allows us to set the docstring.\n # It is the equivalent of:\n #\n # class WrappedAPIView(APIView):\n # pass\n # WrappedAPIView.__doc__ = func.doc <--- Not possible to do this\n\n # api_view applied without (method_names)\n assert not(isinstance(http_method_names, types.FunctionType)), \\\n '@api_view missing list of allowed HTTP methods'\n\n # api_view applied with eg. string instead of list of strings\n assert isinstance(http_method_names, (list, tuple)), \\\n '@api_view expected a list of strings, received %s' % type(http_method_names).__name__\n\n allowed_methods = set(http_method_names) | {'options'}\n WrappedAPIView.http_method_names = [method.lower() for method in allowed_methods]\n\n # noinspection PyUnusedLocal\n def handler(self, *args, **kwargs):\n return func(*args, **kwargs)\n\n for method in http_method_names:\n setattr(WrappedAPIView, method.lower(), handler)\n\n WrappedAPIView.__name__ = func.__name__\n\n WrappedAPIView.renderer_classes = getattr(func, 'renderer_classes',\n View.renderer_classes)\n\n WrappedAPIView.parser_classes = getattr(func, 'parser_classes',\n View.parser_classes)\n\n WrappedAPIView.authentication_classes = getattr(func, 'authentication_classes',\n View.authentication_classes)\n\n WrappedAPIView.throttle_classes = getattr(func, 'throttle_classes',\n View.throttle_classes)\n\n WrappedAPIView.permission_classes = getattr(func, 'permission_classes',\n View.permission_classes)\n\n # noinspection PyUnresolvedReferences\n return WrappedAPIView.as_view()\n return decorator", "def test_discovery_swagger_apis_get(self):\n pass", "def swagger_ui(\n schema_url: str = \"/openapi.json\",\n swagger_ui_title: str = \"Piccolo Swagger UI\",\n csrf_cookie_name: t.Optional[str] = DEFAULT_COOKIE_NAME,\n csrf_header_name: t.Optional[str] = DEFAULT_HEADER_NAME,\n swagger_ui_version: str = \"5\",\n):\n\n # We return a router, because it's effectively a mini ASGI\n # app, which can be mounted in any ASGI app which supports mounting.\n router = Router()\n\n class DocsEndpoint(HTTPEndpoint):\n def get(self, request: Request):\n template = ENVIRONMENT.get_template(\"swagger_ui.html.jinja\")\n html = template.render(\n schema_url=schema_url,\n swagger_ui_title=swagger_ui_title,\n csrf_cookie_name=csrf_cookie_name,\n csrf_header_name=csrf_header_name,\n swagger_ui_version=swagger_ui_version,\n )\n return HTMLResponse(content=html)\n\n class OAuthRedirectEndpoint(HTTPEndpoint):\n def get(self, request: Request):\n return get_swagger_ui_oauth2_redirect_html()\n\n router.add_route(\"/\", endpoint=DocsEndpoint)\n router.add_route(\"/oauth2-redirect/\", endpoint=OAuthRedirectEndpoint)\n\n return router", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = CustomSchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request, public=True)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to be used in apimethods to convert the requestdata to an instance of the passed `model`. This instance is passed to the decorated apiendpoint as the parameter `service_params`.
def api_inputmodel(api: str, model: BaseModel, servicename: str, service_logger: logger) -> Callable: def decorator(func): @wraps(func) async def function_wrapper(request, *args, **kwargs): try: service_params = model.parse_raw(request.body) except ValidationError as err: msg = (f'API: {api} - invalid params ({request.json}) passed ' f'to {servicename}: {err}') service_logger.warning(msg) raise PreconditionFailed(msg, status_code=412) result = await func(request=request, service_params=service_params, service_logger=service_logger, *args, **kwargs) return result return function_wrapper return decorator
[ "def api_outputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n service_result = await func(request, *args, **kwargs)\n try:\n if isinstance(service_result, model):\n result = service_result\n else:\n result = model(**service_result)\n output = response.json(result.dict())\n except Exception as err:\n msg = ('an internal error occured (service: '\n f'{servicename}, api: {api}): {err}')\n raise ServerError(msg)\n service_logger.info(f'processed result {result} => '\n f'{output.content_type} [{output.status}] '\n f'{output.body}')\n return output\n\n return function_wrapper\n\n return decorator", "def model_to_instance(model):\n pass", "def convert_to_model(self, *args):\n services_data, *_ = args\n return [Service(**service) for service in services_data]", "def to_object(self, model: Any) -> ValueObject:", "def to_domain_model(self, model: Any) -> DomainModel:", "def to_payload(self, model):\n return model", "def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls", "def instantiate_model(model_params):\n \n if model_params['model_class'] == 'SeqToSeq':\n m = SeqToSeq(model_params)\n elif model_params['model_class'] == 'SeqToPoint':\n m = SeqToPoint(model_params)\n elif model_params['model_class'] == 'PointToPoint':\n m = PointToPoint(model_params)\n\n return m", "def __input_model_to_entity(self, model: dict, id=None) -> \"Entity\":\n cleaned_model = dict()\n for field in fields(self.__MODEL):\n if field.name == \"id\":\n continue\n if field.name in model:\n value = model[field.name]\n if value is None:\n if field.default is not MISSING:\n value = field.default\n elif field.default_factory is not MISSING:\n value = field.default_factory()\n elif not isinstance(value, field.type):\n value = field.type(value)\n cleaned_model[field.name] = value\n if id is not None:\n cleaned_model[\"override_id\"] = id\n if \"id\" in cleaned_model:\n del cleaned_model[\"id\"]\n try:\n return self.__MODEL(**cleaned_model)\n except Exception as err:\n raise BadModelError(self.__MODEL, list(model.keys()))", "def from_domain_model(self, domain_model: DomainModel) -> Any:", "def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator", "def dispatch(model, args):\n args_dict = args.__dict__\n operation = args.operation\n del args_dict[\"func\"]\n del args_dict[\"operation\"]\n body, kwargs = convert_args(model, operation, args_dict)\n\n dispatch_func = partial(pcluster.cli.model.call, model[operation][\"func\"])\n\n # middleware provides an opportunity to customize the calling of the\n # underlying API function on a per-operation basis\n middleware = middleware_hooks()\n if operation in middleware:\n return middleware[operation](dispatch_func, body, kwargs)\n else:\n return dispatch_func(**kwargs)", "def deserialize_model_instance(self, model_class):\n return model_class.from_dictionary(self.request_body)", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def build_serializer(cls, model):\n serializer_meta_class = type(f'{model.__name__}Meta', (), {\n 'model': model,\n 'fields': '__all__'\n })\n\n serializer_class = type(f'{model.__name__}Serializer', (serializers.ModelSerializer,), {\n 'Meta': serializer_meta_class\n })\n\n return serializer_class", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def to_model(self, payload):\n if self.skip:\n raise SkipField\n\n model = self.get_or_initialize_model(payload)\n model = self.update_model_fields(model, payload)\n return model", "def serializer_method(self, model):\r\n get_serializer = 'serializers.' + model + 'ListSerializer'\r\n post_serializer = 'serializers.' + model + 'Serializer'\r\n if self.request.method == 'GET':\r\n return eval(get_serializer)\r\n return eval(post_serializer)", "def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to be used in apimethods to convert the responsedata of the decorated apimethod to a json based on the passed `model`.
def api_outputmodel(api: str, model: BaseModel, servicename: str, service_logger: logger) -> Callable: def decorator(func): @wraps(func) async def function_wrapper(request, *args, **kwargs): service_result = await func(request, *args, **kwargs) try: if isinstance(service_result, model): result = service_result else: result = model(**service_result) output = response.json(result.dict()) except Exception as err: msg = ('an internal error occured (service: ' f'{servicename}, api: {api}): {err}') raise ServerError(msg) service_logger.info(f'processed result {result} => ' f'{output.content_type} [{output.status}] ' f'{output.body}') return output return function_wrapper return decorator
[ "def json_view(func):\n @wraps(func)\n def _inner(*args, **kwargs):\n ret = func(*args, **kwargs)\n\n if isinstance(ret, HttpResponse):\n return ret\n\n status_code = 200\n\n if isinstance(ret, tuple) and len(ret) == 2:\n ret, status_code = ret\n\n return HttpResponse(json.dumps(ret, indent=4), mimetype='text/plain', status=status_code)\n\n return _inner", "def render_json(view_func):\n def wrap(request, *args, **kwargs):\n retval = view_func(request, *args, **kwargs)\n if isinstance(retval, HttpResponse):\n retval.mimetype = 'application/json; charset=utf-8'\n return retval\n else:\n js = json.dumps(retval)\n return HttpResponse(js, content_type='application/json; charset=utf-8')\n return wrap", "def jsonify(func):\n\n @functools.wraps(func)\n def convert(*args, **kwargs):\n\n success = True\n code = 200 # default status code - success!\n\n try:\n result = func(*args, **kwargs)\n\n if isinstance(result, BaseResponse):\n return result\n\n except exc.HTTPException as ex:\n # i'd like to be able to just re-raise e here, but the body of the\n # response is e.get_body() instead of e.description - so we have to\n # just set up the response ourselves\n result = { 'message' : ex.description }\n code = ex.code\n\n except Exception as ex:\n result = { 'message' : 'Internal Server Error', 'system_message' : ex.message }\n code = 500\n\n # build a response object, and change the content type header to json\n response = make_response(json.dumps(result))\n response.headers['Content-Type'] = 'application/json'\n response.status_code = code\n\n return response\n\n # return the function that is taking the place of (or masquerading as) our decorated function\n return convert", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n\n data = json.dumps(objects)\n if 'callback' in request:\n # a jsonp response!\n data = '%s(%s);' % (request['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n\n return HttpResponse(data, \"application/json\")\n return decorator", "def serializer_method(self, model):\r\n get_serializer = 'serializers.' + model + 'ListSerializer'\r\n post_serializer = 'serializers.' + model + 'Serializer'\r\n if self.request.method == 'GET':\r\n return eval(get_serializer)\r\n return eval(post_serializer)", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator", "def jsonify(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n return json.dumps(func(*args, **kwargs))\n return new_func", "def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except Exception as e:\n print e\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator", "def returns_json(f):\n @wraps(f)\n def decorated_handler(*args, **kwargs):\n r, status = f(*args, **kwargs)\n return fl.Response(json.dumps(r, sort_keys=True), status,\n content_type='application/json; charset=utf-8')\n return decorated_handler", "def to_json(func):\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n fields = kwargs.pop('fields', None)\n response = func(*args, **kwargs)\n if isinstance(response, Iterable):\n for resp in response:\n yield filter_fields(proto_message_to_dict(resp), fields=fields)\n elif response is None:\n yield None\n else:\n yield filter_fields(proto_message_to_dict(response), fields=fields)\n\n return inner", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def _json_default_encoder(func):\n @wraps(func)\n def inner(self, o):\n try:\n return o._redpipe_future_result # noqa\n except AttributeError:\n pass\n return func(self, o)\n\n return inner", "def create_json_from_model(self):\n json = {\n \"enabled\": self.enabled,\n \"emailAddress\": self.email_address,\n \"disposition\": self.disposition,\n }\n return json", "def api_inputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n try:\n service_params = model.parse_raw(request.body)\n except ValidationError as err:\n msg = (f'API: {api} - invalid params ({request.json}) passed '\n f'to {servicename}: {err}')\n service_logger.warning(msg)\n raise PreconditionFailed(msg, status_code=412)\n result = await func(request=request,\n service_params=service_params,\n service_logger=service_logger,\n *args,\n **kwargs)\n return result\n\n return function_wrapper\n\n return decorator", "def _json_default_encoder(func):\n\n @wraps(func)\n def inner(self, o):\n try:\n return o._redpipe_struct_as_dict # noqa\n except AttributeError:\n pass\n return func(self, o)\n\n return inner", "def to_object(self, model: Any) -> ValueObject:", "def response(schema):\n def _response(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if issubclass(schema, BaseModel):\n has_root = True if '__root__' in schema.__fields__ else False\n function_res = function(*args, **kwargs)\n\n if not function_res:\n if has_root is True:\n return jsonify([])\n return jsonify({})\n\n if type(function_res) == list:\n res = schema.parse_obj(function_res)\n else:\n res = schema.from_orm(function_res)\n\n res = res.dict()\n\n if has_root is True:\n return jsonify(res['__root__'])\n\n return jsonify(res)\n elif isinstance(schema, dict):\n return jsonify(schema)\n else:\n raise CustomException('invalid response type', code=400)\n\n return wrapper\n return _response", "def _create_response_model(self, data):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update attempt to update branch to the given SHA.
def update_branch(self, name, sha): branch_info = { 'sha': sha, } resp = self.patch('git/refs/heads/{}'.format(name), json=branch_info) try: resp.raise_for_status() except Exception: logger.error(resp.json()) raise return resp.json()
[ "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def update(branch='master'):\n\n with cd('/srv/git/mapstory/geonode'):\n sudo('git pull mapstory/geonode {0}'.format(branch), user='mapstory')\n with cd('/srv/git/mapstory/mapstory'):\n sudo('git reset --hard', user='mapstory')\n sudo('git pull', user='mapstory')", "def update_ref(ref, sha):\n pass", "def update(self, rev = 'HEAD'):\r\n self._authsvn('up', ['-r', rev])", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def update_stable(path, sha_list, origin):\n\n conn = sqlite3.connect(rebasedb)\n c = conn.cursor()\n\n cmd = ['git', '-C', path, 'log', '--no-merges', '--abbrev=12', '--oneline',\n '--reverse', sha_list]\n commits = subprocess.check_output(cmd, encoding='utf-8', errors='ignore')\n\n for commit in commits.splitlines():\n if commit != '':\n elem = commit.split(' ')[:1]\n sha = elem[0]\n c.execute(\"select sha from stable where sha is '%s'\" % sha)\n found = c.fetchall()\n if found == []:\n c.execute('INSERT INTO stable(sha, origin) VALUES (?, ?)', (\n sha,\n origin,\n ))\n\n conn.commit()\n conn.close()", "def update(cwd, rev, force=False, user=None):\n cmd = [\"hg\", \"update\", \"{}\".format(rev)]\n if force:\n cmd.append(\"-C\")\n\n ret = __salt__[\"cmd.run_all\"](cmd, cwd=cwd, runas=user, python_shell=False)\n if ret[\"retcode\"] != 0:\n raise CommandExecutionError(\n \"Hg command failed: {}\".format(ret.get(\"stderr\", ret[\"stdout\"]))\n )\n\n return ret[\"stdout\"]", "def update_from_repo():\n\treturn", "def update_base_branch(self):\n # Make sure base branch is up to date\n print(\"Checking out base branch '{}'...\".format(self.base_branch))\n self.git.checkout(self.base_branch)\n print('Updating base branch...')\n self.git.pull('--rebase')", "def sha(self, sha):\n\n self._sha = sha", "def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)", "def set_git_sha(context, sha):\n context.sha = sha", "def updateCheckoutVersion(self, checkoutId: long, checkoutVersion: int, user: unicode) -> None:\n ...", "def BranchUpdate(self, branch):\n r = self.scheduler_db_meta.tables['branches']\n if not branch.name:\n return False\n q = r.update(r.c.name.like(branch.name), branch)\n connection = self.engine.connect()\n connection.execute(q)\n return True", "def _update(self):\n if not self._system.git_update(self.get_install_path()):\n raise Exception(\"Cannot update, repository has changes\")\n self._install() # Now reinstall (compile)", "def update_sha1(self, path, sha1):\n check_param_not_none(path, \"path\")\n check_param_not_none(sha1, \"sha1\")\n item = self.get_item_by_path(path)\n item.sha1 = sha1\n self.items.update({path : item})", "async def pull(self, ctx):\r\n await ctx.send(\"Pulling changes...\")\r\n output = check_output(['git', 'pull'])\r\n if b'Already up to date.' in output:\r\n await ctx.send(f\"Already up to date!\")\r\n else:\r\n head = check_output([\"git\", \"log\", \"--pretty=format:'%h'\", \"-n 1\"]).decode(\"UTF-8\")\r\n await ctx.send(f\"Updated to version {head}!\\nRestarting...\")\r\n await self.bot.close()", "def pull_nightly_version(spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"merge\", nightly_version]\n p = subprocess.run(cmd, check=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that the release is actually read to be released If the release is new (corresponds to a release branch), then we check that the release is merged into master. If we can not find the release branch, we assume that it is a hotfix and we verify that the major version number matches the latest release.
def check_release_status(self, release_name, release_branch): logger.debug('GitHubAPI.check_release_status args: {}; {}'.format( release_name, release_branch) ) release_version = extract_release_branch_version(release_name) release_branch_base = build_release_base_name(get_config()) # Assume that this is a new release # Check if the release branch is merged into master try: merge_status = self.compare( 'master', release_branch ).get('status') except requests.exceptions.HTTPError as e: logger.debug('HTTPError: {}'.format(e.message)) if not e.response.status_code == 404: raise e else: # can be one of diverged, ahead, behind, identical according to # http://stackoverflow.com/a/23969867 if merge_status in ['diverged', 'ahead']: raise Exception( 'Release must be merged into master before release') return # if the release branch does not exist, then we end up here, # Assume that it is a hotfix raw_version = self.latest_release().get('name', '') if raw_version.startswith(release_branch_base): raw_version = raw_version[len(release_branch_base):] version = extract_year_week_version(raw_version) logger.debug(version) if extract_year_week_version(release_version) != version: raise Exception( 'New release version does not match the current release, ' 'we expected a hotfix.' ) return
[ "def check(self) -> None:\n latest_release_version = self.github_release.get_version()\n local_version = self.executable.get_version()\n\n if latest_release_version and local_version:\n\n if latest_release_version > local_version:\n self.updatable = True\n\n if not local_version:\n self.updatable = True", "def is_release_branch():\n diff_string_config_yml = run_command(\"git diff origin/master .circleci/config.yml\")\n if re.search(r'[+-][ ]+CONTENT_VERSION: \".*', diff_string_config_yml):\n return True\n\n return False", "def test_releasable():\n pytest.dbgfunc()\n staged, changed, untracked = tbx.git_status()\n assert untracked == [], \"You have untracked files\"\n assert changed == [], \"You have unstaged updates\"\n assert staged == [], \"You have updates staged but not committed\"\n\n if tbx.git_current_branch() != \"master\":\n return True\n\n last_tag = tbx.git_last_tag()\n\n msg = \"Version ({}) does not match tag ({})\"\n result = tbx.run(\"version\").rstrip()\n msg = msg.format(result, last_tag)\n assert result == last_tag, msg\n\n assert tbx.git_hash() == tbx.git_hash(last_tag), \"Tag != HEAD\"", "def is_release(semver):\n return not (semver.build or semver.prerelease)", "def assert_release_branch(release_branch='master'):\n branch = get_current_branch()\n if branch != release_branch:\n warn(ASSERT_RELEASE_BRANCH_WARNING.format(release_branch, branch))\n continue_check()", "def verify_tags(git_ref_target):\n latest_release = github_util.get_latest_release().get('name')\n latest_commit = run('git rev-list -n 1 {}'.format(latest_release)).stdout.rstrip(\"\\r\\n\")\n if not branch_check(latest_release, git_ref_target):\n print('Your branch does not contain the latest production code. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print(\"Branch contains the latest production tag\")\n fork_point = run('git merge-base remotes/origin/master remotes/origin/{}'.format(git_ref_target))\n commits_since_fork = run('git rev-list --branches={} {}^..HEAD'.format(git_ref_target,\n fork_point.stdout.rstrip(\"\\r\\n\")))\n if latest_commit not in commits_since_fork.stdout:\n print('Your branch did not fork directly from the last production tag. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print('Latest production tag is between the fork point and HEAD')", "def is_0_release(release: str) -> bool:\n if release == \"current_branch\":\n return False\n version = packaging.version.parse(release)\n return version < packaging.version.Version(\"1.0\")", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def __check_release(self, release):\n\n split_release = release.split('.')\n\n for number in split_release:\n if not number.isnumeric():\n raise UnknownReleaseError('Error: Invalid release format')", "def check():\n\n log(\"Checking the current revision id for your code.\", PROGRESS)\n chdir(ENVIRON)\n revision_id = do(\n 'git', 'show', '--pretty=oneline', '--summary', redirect_stdout=True\n ).split()[0]\n\n log(\"Checking the latest commits on GitHub.\", PROGRESS)\n commit_info = urlopen(get_conf('repo-check-url')).json\n\n latest_revision_id = commit_info['commit']['sha']\n\n if revision_id != latest_revision_id:\n exit(\"A new version is available. Please run `git pull`.\")\n\n log(\"Your checkout is up-to-date.\", SUCCESS)", "def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())", "def check_version(old_version, current_version):\n print('Checking current version number: {} vs latest release version number: {}...'\n .format(old_version, current_version))\n if version_parser.parse(old_version) >= version_parser.parse(current_version):\n print('Invalid number version! Latest release version available: {}! Current build version: {}'\n .format(old_version, current_version))\n print('Build version must be higher than the latest release version!')\n return False\n print('Version number ok!')\n return True", "def test_release_version():\n assert (\n RELEASE_TAG == f\"v{PROJECT_VERSION}\"\n ), \"RELEASE_TAG does not match the project version\"", "def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)", "def validate_commit(commit, branch_version):\n\n # this returns headers, followed by a empty line, then the message, so\n # we strip the headers\n message = subprocess.check_output([\n 'git', 'cat-file', 'commit', commit]).split('\\n\\n', 1)[1]\n\n match = RESOLVES_RE.search(message)\n if not match:\n print('Commit %s does not include the required \"Resolves:\" '\n 'footer. Modify your commit to add this.' % commit[:8])\n sys.exit(1)\n\n bug_id = int(match.group(1))\n\n req = requests.get(BUG_URL % bug_id)\n\n if req.status_code == 401:\n print('Bug %d, referenced by commit %s, is private. You will '\n 'have to match the bug to the release manually.' % (\n bug_id, commit))\n return\n\n if req.status_code == 404:\n print('Bug %d, referenced by commit %s, does not exist. Typo?' % (\n bug_id, commit))\n sys.exit(1)\n\n if req.status_code != 200 or not req.json().get('bugs'):\n print('Got unexpected response (%d).\\n\\n%s' % (\n req.status_code, json.dumps(req.json(), indent=4)))\n sys.exit(1)\n\n bug_versions = []\n\n target_releases = req.json()['bugs'][0]['target_release']\n if not target_releases or target_releases[0] == '---':\n print('Bug %d, referenced by commit %s, does not have a target '\n 'release set. This must be set first.' % (\n bug_id, commit))\n sys.exit(1)\n\n for target_release in target_releases:\n bug_version = VERSION_RE.search(target_release).group(0)\n bug_versions.append(bug_version)\n if bug_version == branch_version:\n break\n else:\n print('This patch is for OSP %s yet bug %d is for version(s) %s. '\n 'Update the bug target release then try again.' % (\n branch_version, bug_id, ', '.join(bug_versions)))\n sys.exit(1)", "def assert_ahead_of_current_release(current_release='origin/master'):\n upstream_ahead, local_ahead = diff_branch(current_release)\n if upstream_ahead > 0:\n warn(ASSERT_AHEAD_OF_CURRENT_RELEASE_WARNING.format(current_release))\n continue_check()", "def is_release():\n return VERSION[-1]", "def check_bump_range(self, current_version: str, new_version: str) -> bool:\n cur_v_split = current_version.split('.')\n new_v_split = new_version.split('.')\n\n # major update like bumping from 1.8.3 to 2.0.0\n if new_v_split[0] != cur_v_split[0]:\n log.debug('Identified major version bump')\n return new_v_split[1] == '0' and new_v_split[2] == '0' and (int(new_v_split[0]) - int(cur_v_split[0]) == 1)\n\n # minor update like bumping from 1.8.5 to 1.9.0\n elif new_v_split[1] != cur_v_split[1]:\n log.debug('Identified minor version bump')\n return new_v_split[0] == cur_v_split[0] and new_v_split[2] == '0' and (int(new_v_split[1]) - int(cur_v_split[1]) == 1)\n\n # patch update like bumping from 1.8.5 to 1.8.6\n elif new_v_split[2] != cur_v_split[2]:\n log.debug('Identified patch version bump')\n return new_v_split[0] == cur_v_split[0] and new_v_split[1] == cur_v_split[1] and (int(new_v_split[2]) - int(cur_v_split[2]) == 1)\n\n # case when we bumping like 3.0.0-SNAPSHOT to 3.0.0\n log.debug('Identified SNAPSHOT version bump')\n return True", "def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in audio file, processes it
def process_audio_file(self, file_name): sig, sr = librosa.load(file_name, mono=True) return self._extract_function(sig, sr)
[ "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def audio_reader(path):\n\n sample_rate, data = wav.read(path)\n data = data/32768\n return sample_rate, data", "def audio_file_load():\n raise NotImplementedError()", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_audio():\n\tbuf = None\n\tnum_new_bytes = BUFFER_SIZE // REFRESH_BUFFER_FACTOR\n\twith open(INFILE) as fifo:\n\t\twhile True:\n\t\t\tif buf is None:\n\t\t\t\tbuf = fifo.read(BUFFER_SIZE)\n\t\t\telse:\n\t\t\t\tbuf = buf[num_new_bytes:] + fifo.read(num_new_bytes)\n\t\t\tyield buf", "def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()", "def chunk_audio(self, file_name):\n # TODO: clean this up a bit -- offsets and whatnot\n sound = AudioSegment.from_wav(file_name) # need this JUST FOR THE duration_seconds UGGGG\n video_file = ffmpeg.input(file_name, format='wav')\n chunk_count = int(sound.duration_seconds+1)//(self.CHUNK_SIZE//1000)\n for i in range(chunk_count):\n chunk_name = self.file_chunk_name(file_name, i)\n video_file.output(chunk_name, acodec='pcm_s16le', ar=16000, ac=1, ss=self.convert_to_time(i), to=self.convert_to_time(i+1)).run()\n with open(chunk_name, 'rb') as f:\n self.upload_to_s3(self.AUDIO_BUCKET_NAME, chunk_name, content=f.read())\n self.upload_to_s3(self.AUDIO_BUCKET_NAME, self.file_manifest(file_name), content=str(chunk_count))", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def wavread(filename):\n return audiolab.wavread(filename)", "async def process_audio_chunks(self, chunk: bytes):\n if self.processor is not None:\n await self.processor.process(chunk)", "def receive_audio(self):\n print(\"got to receive audio\")\n self.receive_audio_socket = self.start_socket(IP, RECEIVE_AUDIO_PORT)\n self.send_chunk(self.my_name.encode(), self.receive_audio_socket)\n print(self.receive_mes(self.receive_audio_socket))\n\n print(\"receive stream made\")\n i = 0\n done = False\n while not done:\n try:\n i += 1\n data = self.receive_audio_socket.recv(CHUNK) # gets audio chunk\n #print(\"got audio chunk number {} of length {}\".format(i, len(data)))\n self.lock.acquire()\n self.voice_stream.write(data) # plays\n self.lock.release()\n # if len(data) == 0:\n # done = True\n #print(\"wrote chunk #{}\".format(i))\n except socket.error as msg:\n print(\"socket failure receive audio: {}\".format(msg))\n done = True\n except KeyboardInterrupt:\n print(\"exception receive audio\")\n done = True\n self.receive_audio_socket.close()\n # stream_receive.close()\n # p_receive.terminate()", "def get_audio_file(file_name):\n pass", "def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()", "def read_sound_file(self, file_name, sound_name=None):\n if sound_name == None:\n sound_name = os.path.splitext(os.path.split(file_name)[-1])[0]\n\n rate, data = wavfile.read(file_name)\n # create stream data assuming 2 channels, i.e. stereo data, and use np.float32 data format\n stream_data = data.astype(np.int16)\n\n # check data formats - is this stereo sound? If so, we need to fix it.\n wf = wave.open(file_name, 'rb')\n # print sound_name\n # print wf.getframerate(), wf.getnframes(), wf.getsampwidth(), wf.getnchannels()\n if wf.getnchannels() == 2:\n stream_data = stream_data[::2]\n\n self.sounds.update({sound_name: stream_data})", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def _on_audio(self, audio):\n\n # Put Microphone Audio at the end of the Audio Buffer\n self._audio_buffer.extend(audio.tobytes())\n\n # Process Each Frame-Length of Audio in Buffer\n # 2 * self._frame_size, because we're counting bytes, not np.int16's\n while len(self._audio_buffer) > 2 * self._frame_size:\n frame = np.frombuffer(self._audio_buffer[:2*self._frame_size], np.int16)\n self._process_frame(frame)\n self._process_voice(frame)\n del self._audio_buffer[:2*self._frame_size]", "def read_audio_metadata_codec(filepath): # pylint: disable=too-complex\n import re\n\n def _read_codec_error_output(filepath):\n command = [CODEC_EXEC, \"-i\", filepath]\n\n popen_params = {\n \"bufsize\": 10**5,\n \"stdout\": sp.PIPE,\n \"stderr\": sp.PIPE,\n \"stdin\": DEVNULL\n }\n\n if os.name == 'nt':\n popen_params[\"creationflags\"] = 0x08000000\n\n proc = sp.Popen(command, **popen_params)\n proc.stdout.readline()\n proc.terminate()\n\n # Ref: http://stackoverflow.com/questions/19699367\n infos = proc.stderr.read().decode('ISO-8859-1')\n del proc\n\n return infos\n\n def _read_samplerate(line):\n try:\n match = re.search(\" [0-9]* Hz\", line)\n matched = line[match.start():match.end()]\n samplerate = int(matched[1:-3])\n return samplerate\n except:\n raise RuntimeError(\n \"Failed to load sample rate of file %s from %s\\n the infos from %s are \\n%s\"\n % (filepath, CODEC_EXEC, CODEC_EXEC, infos)\n )\n\n def _read_n_channels(line):\n try:\n match1 = re.search(\" [0-9]* channels\", line)\n\n if match1 is None:\n match2 = re.search(\" stereo\", line)\n match3 = re.search(\" mono\", line)\n if match2 is None and match3 is not None:\n channels = 1\n elif match2 is not None and match3 is None:\n channels = 2\n else:\n raise RuntimeError()\n else:\n channels = int(line[match1.start() + 1:match1.end() - 9])\n\n return channels\n except:\n raise RuntimeError(\n \"Failed to load n channels of file %s from %s\\n the infos from %s are \\n%s\"\n % (filepath, CODEC_EXEC, CODEC_EXEC, infos)\n )\n\n def _read_duration(line):\n try:\n keyword = 'Duration: '\n line = [l for l in lines if keyword in l][0]\n match = re.findall(\"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])\", line)[0]\n duration_seconds = cvsecs(match)\n return duration_seconds\n except:\n raise RuntimeError(\n \"Failed to load duration of file %s from %s\\n the infos from %s are \\n%s\"\n % (filepath, CODEC_EXEC, CODEC_EXEC, infos)\n )\n\n # to throw error for FileNotFound\n # TODO: [A] test error when FileNotFound\n with open(filepath):\n pass\n\n if not get_codec():\n raise RuntimeError(\"No codec available\")\n\n infos = _read_codec_error_output(filepath)\n lines = infos.splitlines()\n lines_audio = [l for l in lines if ' Audio: ' in l]\n if lines_audio == []:\n raise RuntimeError(\n \"%s did not find audio in the file %s and produced infos\\n%s\" %\n (CODEC_EXEC, filepath, infos)\n )\n\n samplerate = _read_samplerate(lines_audio[0])\n channels = _read_n_channels(lines_audio[0])\n duration_seconds = _read_duration(lines)\n\n n_samples = int(duration_seconds * samplerate) + 1\n\n warnings.warn(\n \"Metadata was read from %s, duration and number of samples may not be accurate\" %\n CODEC_EXEC, RuntimeWarning\n )\n\n return AudioMetadata(\n filepath=filepath,\n format=os.path.splitext(filepath)[1][1:], # extension after the dot\n samplerate=samplerate,\n nchannels=channels,\n seconds=duration_seconds,\n nsamples=n_samples\n )", "def wavread(filename):\n\n if (os.path.isfile(filename) == False): # raise error if wrong input file\n print(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\n fs, x = read(filename)\n\n if (len(x.shape) !=1): # raise error if more than one channel\n raise ValueError(\"Audio file should be mono\")\n\n if (fs !=44100): # raise error if more than one channel\n raise ValueError(\"Sampling rate of input sound should be 44100\")\n\n #scale down and convert audio into floating point number in range of -1 to 1\n x = np.float32(x)/norm_fact[x.dtype.name]\n return fs, x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Return ``n`` independent symbolic matrices in dimension ``d``.
def symbolic_max_plus_matrices(d, n, ch=None, typ='sym'): d = int(d) n = int(n) if d <= 0: raise ValueError("d (= {}) must be postive".format(d)) nvar = n * d * d V = FreeModule(ZZ, nvar) B = ((b,) for b in V.basis()) matrices = [] if d == 1: typ = 'full' if typ == 'sym' or typ == 'quick': z = [0]*nvar for i in range(n): z[i*d*d] = 1 diag = (V(z),) z[i*d*d] = 0 z[i*d*d+1] = 1 nondiag = (V(z),) z[i*d*d+1] = 0 if typ == 'sym': matrices.append(SymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch)) else: matrices.append(QuickSymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch)) elif typ == 'full': for i in range(n): mat = [] for j in range(d): mat.append([next(B) for k in range(d)]) matrices.append(SymbolicMaxPlusMatrix(d, nvar, mat, ch)) else: raise ValueError return matrices
[ "def basis(d, symbolic=True):\n X = sym.symbols('X')\n if d == 0:\n phi_sym = [1]\n else:\n if symbolic:\n h = sym.Rational(1, d) # node spacing\n nodes = [2*i*h - 1 for i in range(d+1)]\n else:\n nodes = np.linspace(-1, 1, d+1)\n \n phi_sym = [Lagrange_polynomials(X, r, nodes) for r in range(d+1)]\n \n # Transform to Python functions\n phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy') for r in range(d+1)]\n return phi_sym if symbolic else phi_num", "def define_matrix(n):\n\n return [[0 for x in range(n)] for x in range(n)]", "def matrix_chain_dynamic(dimensions, n):\n\n m = [[-1 for _ in range(n)] for _ in range(n)]\n s = [[0 for _ in range(n)] for _ in range(n)]\n\n # multiplying matrix by itself\n for i in range(1, n):\n m[i][i] = 0\n\n for length in range(2, n):\n for i in range(1, n - length + 1):\n j = i + length - 1\n for k in range(i, j):\n cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j]\n if cost > m[i][j]:\n m[i][j] = cost\n # index if splitting\n s[i][j] = k\n return m, s", "def find_linear_recurrence(self,n,d=None,gfvar=None):\n from sympy.simplify import simplify\n x = [simplify(expand(t)) for t in self[:n]]\n lx = len(x)\n if d is None:\n r = lx//2\n else:\n r = min(d,lx//2)\n coeffs = []\n for l in range(1, r+1):\n l2 = 2*l\n mlist = []\n for k in range(l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m.det() != 0:\n y = simplify(m.LUsolve(Matrix(x[l:l2])))\n if lx == l2:\n coeffs = flatten(y[::-1])\n break\n mlist = []\n for k in range(l,lx-l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m*y == Matrix(x[l2:]):\n coeffs = flatten(y[::-1])\n break\n if gfvar is None:\n return coeffs\n else:\n l = len(coeffs)\n if l == 0:\n return [], None\n else:\n n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l\n for i in range(l-1):\n n += x[i]*gfvar**i\n for j in range(l-i-1):\n n -= coeffs[i]*x[j]*gfvar**(i+j+1)\n d -= coeffs[i]*gfvar**(i+1)\n return coeffs, simplify(factor(n)/factor(d))", "def simplex_monomial_basis(dims, n):\n mode_ids, basis = simplex_monomial_basis_with_mode_ids(dims, n)\n return basis", "def generate_random_regular(n: int, d: int):\n return nx.to_numpy_matrix(\n nx.generators.random_graphs.random_regular_graph(n, d),\n )", "def _identity_dense(d, dtype=complex):\n return np.eye(d, dtype=dtype)", "def _matId(n):\n Id = []\n n2 = n.quo_rem(2)[0]\n for j in range(n2):\n MSn = MatrixSpace(_F, n2-j, n2-j)\n Id.append(MSn.identity_matrix())\n return Id", "def diag_indices(n, ndim=2):\n\n if not use_origin_backend():\n return dpnp_diag_indices(n, ndim)\n\n return call_origin(numpy.diag_indices, n, ndim)", "def random_planted_matrix(d, n, replace='True'):\n all_idx = np.asarray(list(zip(*np.tril_indices(d,-1))))\n chosen_idx_positions = np.random.choice(len(all_idx), size=n, replace=replace)\n subspaces = all_idx[chosen_idx_positions]\n angles = 2*np.pi * (np.random.rand(len(subspaces)) - 0.5)\n U = np.eye(d)\n for s, alpha in zip(subspaces, angles):\n U = right_givens(math.cos(alpha), math.sin(alpha), U, s[0], s[1])\n return U", "def all_basis_vectors(n: int) -> list:\n assert n >= 0, \"n must be > 0\"\n basis_1dim = ['0', '1']\n\n if n == 0:\n return []\n if n == 1:\n return basis_1dim\n else:\n current_basis = basis_1dim\n for i in range(1, n):\n # can be made more efficient (e.g. by current_basis, current basis until we reach sqrt(n))\n current_basis = outer_subspace_product(basis_1dim, current_basis)\n\n return current_basis", "def fourierMatrix(n):\n i,j = np.meshgrid(np.arange(n), np.arange(n))\n A = np.multiply.outer(i.flatten(), i.flatten())\n B = np.multiply.outer(j.flatten(), j.flatten())\n omega = np.exp(-2*np.pi*1J/n)\n return np.power(omega, A+B)", "def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat", "def symv(s, n):\n return sym.symbols(\" \".join([\"%s%i,\" % (s, i) for i in range(n)]))", "def elimination_matrix(n: int) -> Array:\n L = np.zeros((n * (n + 1) // 2, n**2), np.int64)\n\n for i in range(n):\n for j in range(i + 1):\n u = np.zeros((n * (n + 1) // 2, 1), np.int64)\n u[j * n + i - j * (j + 1) // 2] = 1\n\n E = np.zeros((n, n), np.int64)\n E[i, j] = 1\n\n L += u @ vec(E)[None, :]\n\n return L", "def all_matrices(n):\n complete = int(n * (n-1) / 2)\n least = (n-1)*2 - 1 # the number of edges is at least 2(n-1)-1\n all_possible_list = [i for i in itertools.product([0, 1], repeat=complete)\n if sum(i) >= least]\n all_mats = [create_matrix(i, n) for i in all_possible_list]\n return all_mats", "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)", "def rand_matrix_product_state(n, bond_dim, phys_dim=2, dtype=complex,\n cyclic=False, trans_invar=False):\n from quimb.tensor import MPS_rand_state\n\n mps = MPS_rand_state(n, bond_dim, phys_dim=phys_dim, dtype=dtype,\n cyclic=cyclic, trans_invar=trans_invar)\n return mps.to_dense()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Return a string that describes the convex hull engine.
def convex_hull_engine(self): return self.convex_hull._name
[ "def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty polyhedron'\n else:\n desc += 'A ' + repr(self.dim()) + '-dimensional polyhedron'\n desc += ' in '\n if self.field()==QQ: desc += 'QQ'\n else: desc += 'RDF'\n desc += '^' + repr(self.ambient_dim()) \n\n if self.n_vertices()>0:\n desc += ' defined as the convex hull of ' \n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n \n if self.n_rays()>0:\n if self.n_lines()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_rays())\n if self.n_rays()==1: desc += ' ray'\n else: desc += ' rays'\n \n if self.n_lines()>0:\n if self.n_rays()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_lines())\n if self.n_lines()==1: desc +=' line'\n else: desc +=' lines'\n\n return desc + \".\\n\";", "def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def _repr_(self):\n return \"Affine hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def save_convex_hulls():\n\timport json\n\tfrom scipy.spatial import ConvexHull\n\tqhull = {}\n\tfor dom in ['lsl', 'lsp', 'mtl_lano']:\n\t\tx, y = GLSLio.get_scen(8, dom, ('X', 'Y'))\n\t\tpts = np.array([x,y]).T\n\t\tqh = ConvexHull( pts )\n\t\tqhull[dom] = pts[qh.vertices].tolist()\n\t\t#T = get_tesselation(dom)\n\t\t#qhull[dom] = np.array([T.x, T.y]).T[T.convex_hull].reshape(-1,2).tolist()\n\n\n\twith open('../analysis/qhulls.json', 'w') as f:\n\t\tjson.dump(qhull, f)", "def convex_hull(self):", "def convex_hull(self):\n return self._topology(capi.geos_convexhull(self.ptr))", "def __str__(self):\n return \"%s: \\t %s (%s), hull: %s\" % (self, self.name, str(self.ship_class), str(self.hull))", "def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)", "def hull(self):\n capacity = self._getAttribute(Attribute.hullCapacity)\n em = self._getAttribute(Attribute.hullEM)\n explosive = self._getAttribute(Attribute.hullExplosive)\n kinetic = self._getAttribute(Attribute.hullKinetic)\n thermal = self._getAttribute(Attribute.hullThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def create_hull(self):\n if self.args.get(\"uniq\"):\n from matador.utils.cursor_utils import filter_unique_structures\n\n if self.args.get(\"uniq\") is True:\n sim_tol = 0.1\n else:\n sim_tol = self.args.get(\"uniq\")\n print_notify(\"Filtering for unique structures...\")\n self.cursor = filter_unique_structures(\n self.cursor,\n args=self.args,\n quiet=True,\n sim_tol=sim_tol,\n hull=True,\n energy_key=self.energy_key,\n )\n\n self.construct_phase_diagram()\n\n if not self.hull_cursor:\n print_warning(\"No structures on hull with chosen chemical potentials.\")\n else:\n print_notify(\n \"{} structures found within {} eV of the hull, including chemical potentials.\".format(\n len(self.hull_cursor), self.hull_cutoff\n )\n )\n\n display_results(\n self.hull_cursor, hull=True, energy_key=self.energy_key, **self.args\n )\n\n if self.compute_voltages:\n print(\n \"Constructing electrode system with active ion: {}\".format(\n self.species[0]\n )\n )\n self.voltage_curve()\n\n if self.compute_volumes:\n self.volume_curve()\n\n if not self.args.get(\"no_plot\"):\n if self.compute_voltages and self.voltage_data:\n self.plot_voltage_curve(show=False)\n if self.compute_volumes and self.volume_data:\n self.plot_volume_curve(show=False)\n\n self.plot_hull(\n **self.args[\"plot_kwargs\"], debug=self.args.get(\"debug\"), show=True\n )", "def __str__(self):\n vertices = []\n for idx in range(3):\n v = self.vertices[idx]\n if v is not None:\n vertices.append(str(v))\n else:\n orig_idx, dest_idx = (idx - 1) % 3, (idx + 1) % 3\n orig, dest = self.vertices[orig_idx], self.vertices[dest_idx]\n halfway = (orig.x + dest.x) * .5, (orig.y + dest.y) * .5\n# print(halfway)\n d = orig.distance(dest)\n dx = dest.x - orig.x\n# print(d)\n# print(dx)\n dx /= d\n dy = dest.y - orig.y\n# print(dy)\n dy /= d\n dx *= d\n dy *= d\n pt_halfway = halfway[0] + dy, halfway[1] - dx\n# print(\"outside\", orig_idx, dest_idx, pt_halfway)\n vertices.append(\"{0[0]} {0[1]}\".format(pt_halfway))\n vertices.append(vertices[0])\n return \"POLYGON(({0}))\".format(\", \".join(vertices))", "def __str__(self):\n if not self.exterior.coords:\n return \"POLYGON EMPTY\"\n rings = [ring.coords for ring in chain((self.exterior,), self.interiors)]\n rings = (\"(%s)\" % ', '.join(\"%r %r\" % (x, y) for x, y in ring) for ring in rings)\n return \"POLYGON (%s)\" % ', '.join(rings)", "def give_convex_hull(rand_points):\n return ConvexHull(rand_points)", "def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]", "def convex_hull(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull", "def _repr_(self):\n return \"Polygon defined by %s points\"%len(self)", "def display_and_label_hulls(self, hulls, src):\n \n labels = []\n\n for hull in hulls:\n\n angle = 0\n MA = 1\n ma = 1\n try:\n _,(MA,ma),angle = cv.fitEllipse(hull)\n except:\n pass\n cosAngle = np.abs(np.cos(angle*np.pi/180))\n\n # Only human-classify hulls if it is reasonably a vertically oriented rectangle\n # This is a hueristic to not have to waste time clasifying hulls clearly not poles\n if (cosAngle < 1.75) and (cosAngle > 0.85) and (MA/ma < 0.28):\n cpy = src.copy()\n hull_img = cv.polylines(cpy, [hull], True, (0,0,255), 3)\n cv.imshow(\"Hull\", hull_img)\n keycode = cv.waitKey(0)\n if keycode == 49:\n labels.append((hull, 0))\n print(\"Not a Pole\")\n elif keycode == 50:\n labels.append((hull, 1))\n print(\"A Pole!\")\n else:\n raise Exception(\"Unexpected Key Pressed\")\n else:\n labels.append((hull, 0))\n cv.destroyAllWindows()\n return labels", "def str_geometry_code(self, cmtext='') :\n hat = '\\n#----------\\n'+\\\n '##### from utils import object_from_python_code\\n'+\\\n '##### SENS2X1 = object_from_python_code(\"geoseg_cspad2x1.py\", \"SENS2X1\")\\n'+\\\n '#####\\n'+\\\n '#####\\n'+\\\n '#####\\n'+\\\n '##### global SENS2X1\\n'+\\\n '##### SENS2X1 = geoseg_cspad2x1()\\n'+\\\n '#----------\\n\\n'+\\\n 'from geometry_access import geometry_access\\n'+\\\n 'class %s(geometry_access) :' % self.__class__.__name__\n doc = self.__doc__ + cmtext\n hdr = self.list_of_geos[0].str_header()\n body = self.str_geometry_code_body()\n return '%s\\n \"\"\"%s\"\"\"\\n #%s\\n detparts = (\\n%s\\n )'%\\\n (hat, doc, hdr, body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Return the list of equal coefficients between self and other.
def equal_coefficients(self, other): d = self._d return [(i,j) for i in range(d) for j in range(d) \ if self[i][j] == other[i][j]]
[ "def __xor__(self, other):\n\n sym_diff = [value for value in self if value not in other]\n sym_diff.extend([value for value in other if value not in self])\n\n return sym_diff", "def GetEqualConstrains(self):\n return _gmat_py.Spacecraft_GetEqualConstrains(self)", "def coefficients(self) :\n raise NotImplementedError", "def __coeffM__(self, p1, p2):\r\n return np.array([self.__coeff__(point1, point2) for point1, point2 in zip(p1, p2)])", "def coefficients(self):\n\t\tpolynomials=self.dom.getElementsByTagName('polynomial')\n\t\tassert len(polynomials)==2, \"Was expecitng two sets of coefficients\"\n\t\t# I'm assuming the low T comes first. \n\t\tassert (float(polynomials[0].getElementsByTagName('bound')[0].firstChild.data) < \n\t\t float(polynomials[1].getElementsByTagName('bound')[0].firstChild.data) ) # check ordering assumption\n\t\thighTcoeffNodes=polynomials[1].getElementsByTagName('coefficient')\n\t\tlowTcoeffNodes=polynomials[0].getElementsByTagName('coefficient')\n\t\thighTcoeffs=[]\n\t\tfor coef in highTcoeffNodes:\n\t\t\tname=coef.getAttribute('label')\n\t\t\tvalue=float(coef.firstChild.data)\n\t\t\thighTcoeffs.append(value)\n\t\tlowTcoeffs=[]\n\t\tfor coef in lowTcoeffNodes:\n\t\t\tname=coef.getAttribute('label')\n\t\t\tvalue=float(coef.firstChild.data)\n\t\t\tlowTcoeffs.append(value)\n\t\tallCoeffs=numpy.array([lowTcoeffs, highTcoeffs])\n\t\treturn allCoeffs", "def coefficients(self) :\n return self.__coefficients", "def get_difference(self, other: \"EpsilonNFA\")\\\n -> \"EpsilonNFA\":\n other = other.copy()\n for symbol in self._input_symbols:\n other.add_symbol(symbol)\n return self.get_intersection(other.get_complement())", "def __eq__(self, p):\n\n new_list1 = self.coefficients[:]\n new_list2 = p.coefficients[:]\n\n while new_list1[-1] == 0:\n if len(new_list1) == 1:\n break\n else:\n del new_list1[-1]\n\n while new_list2[-1] == 0:\n if len(new_list2) == 1:\n break\n else:\n del new_list2[-1]\n\n return new_list1 == new_list2", "def coeffs(self):\r\n return self.onefun.coeffs", "def getEquates(self) -> Iterator[ghidra.program.model.symbol.Equate]:\n ...", "def coefficients(self):\n\t\tcoefficientsets=self.dom.getElementsByTagName('coefficients')\n\t\tassert len(coefficientsets)==1, \"there should only be one set of coefficients\"\n\t\tcoeffs=coefficientsets[0]\n\t\thighTcoeffNodes=coeffs.getElementsByTagName('range_1000_to_Tmax')[0].getElementsByTagName('coef')\n\t\tlowTcoeffNodes=coeffs.getElementsByTagName('range_Tmin_to_1000')[0].getElementsByTagName('coef')\n\t\thighTcoeffs=[]\n\t\tfor coef in highTcoeffNodes:\n\t\t\tname=coef.getAttribute('name')\n\t\t\tvalue=float(coef.firstChild.data.replace(' ','').replace('D','E'))\n\t\t\thighTcoeffs.append(value)\n\t\tlowTcoeffs=[]\n\t\tfor coef in lowTcoeffNodes:\n\t\t\tname=coef.getAttribute('name')\n\t\t\tvalue=float(coef.firstChild.data.replace(' ','').replace('D','E'))\n\t\t\tlowTcoeffs.append(value)\n\t\tallCoeffs=numpy.array([lowTcoeffs, highTcoeffs])\n\t\treturn allCoeffs", "def compute_cosets(self):\n cosets = []\n\n if self.mode == \"general\":\n # In the general mode, for a system p^mn, we can really only take\n # out one copy of p in the most general case by using this method.\n # However, we can also apply the general mode to square dimensions\n # By choosing a 'small' basis and use the subfield elements\n # as coset representatives.\n first_coset = []\n\n # First generate all combinations of the coefficients with the\n # subfield except the first element (which is 1 in most cases).\n coefficient_lists = product(self.subfield, repeat = len(self.basis) - 1) \n for c in coefficient_lists:\n l_comb = [c[i-1] * self.basis[i] for i in range(1, len(c) + 1)]\n s = self.field[0]\n for el in l_comb:\n s += el\n first_coset.append(s)\n\n for i in range(len(self.subfield)):\n next_coset = [self.subfield[i] * self.basis[0]\n + first_coset[t] for t in range(len(first_coset))]\n cosets.append(next_coset)\n\n else:\n l_om = self.field[1] # Primitive element of original field\n b_om = self.subfield[1] # Primitive element of subfield\n\n for c_idx in range(len(self.subfield)):\n if c_idx == 0:\n cosets.append(self.subfield)\n elif c_idx == 1: # Special case because of how we defined pow\n next_coset = [x + l_om for x in self.subfield]\n cosets.append(next_coset)\n else:\n next_coset = [x + l_om * (b_om ** (c_idx - 1)) \\\n for x in self.subfield]\n cosets.append(next_coset)\n\n return cosets", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def __eq__(self, other):\n if self.coeff != other.coeff:\n return False\n \n if self.GetKeggID() != other.GetKeggID():\n return False\n \n if self.phase.Name() != other.phase.Name():\n return False\n \n return True", "def coefficients(self):\n\t return self.coef_['x']", "def eq_prices(self) -> List[float]:\n return self._eq_prices", "def __add__(self, other):\n # Two cases:\n #\n # self: X X X X X X X\n # other: X X X\n #\n # or:\n #\n # self: X X X X X\n # other: X X X X X X X X\n\n # Start with the longest list and add in the other\n if len(self.coeff) > len(other.coeff):\n result_coeff = self.coeff[:] # copy!\n for i in range(len(other.coeff)):\n result_coeff[i] += other.coeff[i]\n else:\n result_coeff = other.coeff[:] # copy!\n for i in range(len(self.coeff)):\n result_coeff[i] += self.coeff[i]\n return Polynomial(result_coeff) #return a Polynomial (not a list of coeff.)", "def test_coefficient_orders(self):\n for i in range(2, 5):\n spec = {2*j: 0 for j in range(i)}\n bcs_ref = BoundaryConditions(spec, 2*i-2)\n bcs_main = BoundaryConditions(spec, 2*i)\n\n coeffs_ref = get_ext_coeffs(bcs_ref)[i-1]\n coeffs_main = get_ext_coeffs(bcs_main)[i-1]\n\n assert coeffs_ref == coeffs_main", "def get_coefficients(self):\n coefficients = self.regression_model.coef_.copy()\n # if bias included change first coeficient (bias) to the intercept\n if self.include_bias:\n coefficients[0] = self.regression_model.intercept_\n # get features names\n index_list = self.polynomial_feature.get_feature_names(self.independent_values)\n # return a series with the coefficients and their names\n return pd.Series(coefficients, index = index_list )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Evaluates this symbolic matrix at the integer point ``p``.
def eval(self, p): from max_plus.max_plus_int import minus_infinity, IntegerMaxPlusMatrix F = FreeModule(ZZ, self._nvars) p = F(p) mat = [] d = self.dim() for i in range(d): row = [] for j in range(d): pts = self[i,j] row.append(minus_infinity() if not pts else max(p.dot_product(v) for v in pts)) mat.append(row) return IntegerMaxPlusMatrix(self._d, self._d, mat)
[ "def eval_poly(self, p):\n A = self\n m, n = A.shape\n\n if m != n:\n raise DMNonSquareMatrixError(\"Matrix must be square\")\n\n if not p:\n return self.zeros(self.shape, self.domain)\n elif len(p) == 1:\n return p[0] * self.eye(self.shape, self.domain)\n\n # Evaluate p(A) using Horner's method:\n # XXX: Use Paterson-Stockmeyer method?\n I = A.eye(A.shape, A.domain)\n p_A = p[0] * I\n for pi in p[1:]:\n p_A = A*p_A + pi*I\n\n return p_A", "def linearMethod(self):\n P = self.getTransitionMatrix()\n size = P.shape[0]\n dP = P - eye(size)\n A = vstack([np.ones(size), dP.T[1:,:]]).tocsr()\n rhs = np.zeros((size,))\n rhs[0] = 1\n \n self.pi = spsolve(A, rhs)", "def Eval(self, T, ip):\n return _fancy_coefficient.PiecewiseLinearMaterialProperty_Eval(self, T, ip)", "def EvaluateLocation(self, p_int, , p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def _eval_coeff(self, pt):\n val = 1\n for a in self.args:\n val *= a.coeff(pt)\n return val", "def __pow__(self, p):\n return Matrix(*[Vector(*i) for i in\n np.linalg.matrix_power(self._columns, p).T])", "def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)", "def eval_perm(p):\r\n t0 = sum([p[0], p[6], p[7]]) # initial sum\r\n for (i,j,k) in IDX:\r\n if sum([p[i], p[j], p[k]]) != t0:\r\n return False # p fails!\r\n\r\n return p # p is valid!\r", "def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state", "def mat_prod(p):\n gram_sqrt = getattr(p, savefield).flatten(end_dim=start_dim - 1)\n\n letters = get_letters(mat.dim() + gram_sqrt.dim() - 1)\n sum_idx = letters[0]\n out1_idx = letters[1:-1]\n out2_idx = letters[-1]\n\n equation = f\"{sum_idx}{out1_idx},{sum_idx}{out2_idx}->{out1_idx}{out2_idx}\"\n\n return torch.einsum(equation, gram_sqrt, mat)", "def p(self, *, pr, ix, ix2=None, k=None):\n if pr == True:\n return self.pr(row=ix, row2=ix2, k=k)\n else:\n return self.pc(col=ix, col2=ix2, k=k)", "def Eval(self, M, T, ip):\n return _fancy_coefficient.ComplexInverseMatrixCoefficient_Eval(self, M, T, ip)", "def find_mult_inverse(self, p):\n for i in self.table:\n prod = self.mult(p,i)\n while prod and prod[-1] == 0:\n prod.pop()\n if prod == [1]:\n return self.simplified(i)\n break\n return None", "def __calc_p_vector(self):\n for j in range(len(self.__table[0])):\n s = 0\n for i in range(len(self.__table)):\n s -= self.__table[i][j]\n self.__table[-1][j] = s", "def __evaluate(self, point):\n assert len(point) == len(self.weight)-1\n result = self.weight[0]\n for i in range(0,len(point)):\n result += self.weight[i+1] * point[i]\n return result", "def _eval_coeff(self, pt):\n return sum(a.coeff(pt) for a in self.args)", "def __imul__(self, s):\n val = _hypre.HypreParMatrix___imul__(self, s)\n\n # val.thisown = 0\n return self\n\n\n return val", "def primitive_root(p):\n pd = FactoredInteger(p - 1).proper_divisors()\n for i in bigrange.range(2, p):\n for d in pd:\n if pow(i, (p - 1)//d, p) == 1:\n break\n else:\n return i", "def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Perform a cyclic swap on the vertices. This is used in multiplication of symbolic upper matrices. Currently it is suboptimal but on the other hand, this cost much less than whatever convex hull computation.
def vertex_cyclic_swap(nvars, l, i): if i == 0 or not l: return l ll = [] F = l[0].parent() for v in l: assert not v[-i:] ll.append(F(tuple(v[-i:]) + tuple(v[:-i]))) for v in ll: v.set_immutable() return tuple(ll)
[ "def swap_vertices(self, i, j):\r\n store_vertex_i = self.vertices[i]\r\n store_vertex_j = self.vertices[j]\r\n self.vertices[j] = store_vertex_i\r\n self.vertices[i] = store_vertex_j\r\n for k in range(len(self.vertices)):\r\n for swap_list in [self.vertices[k].children, self.vertices[k].parents]:\r\n if i in swap_list:\r\n swap_list[swap_list.index(i)] = -1\r\n if j in swap_list:\r\n swap_list[swap_list.index(j)] = i\r\n if -1 in swap_list:\r\n swap_list[swap_list.index(-1)] = j", "def apply_cycle(points, cycle):\n j = cycle[0]\n for i in cycle:\n points[i], points[j] = points[j], points[i] # swap points i and j", "def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff", "def edge_swap(self,path):\n\t\tfor i in range(len(path)-1):\n\t\t\tself.graph.reverse_edge(path[i+1],path[i])\n\t\t\tself.colour_vertex[path[i+1]]+=[self.graph.edge_label(path[i],path[i+1])]\n\t\t\tif(self.graph.edge_label(path[i],path[i+1]) in self.colour_vertex[path[i]]):\n\t\t\t\tself.colour_vertex[path[i]].remove(self.graph.edge_label(path[i],path[i+1]))\n\t\t\telse:\n\t\t\t\tself.graph.set_edge_label(path[i],path[i+1],self.colour_vertex[path[i]][0])\n\t\t\t\tdel self.colour_vertex[path[i]][0]\n\t\t\tself.graphlst.append(self.graph.copy())\n\t\t\tself.pebs[path[i]]=self.pebs[path[i]]-1\n\t\t\tself.pebs[path[i+1]]=self.pebs[path[i+1]]+1", "def swap(ix, jx, ax, ay):\n tempx, tempy = ax[ix], ay[ix]\n ax[ix] = ax[jx]\n ay[ix] = ay[jx]\n ax[jx] = tempx\n ay[jx] = tempy", "def test_swap(self, dim):\r\n graph = nx.complete_graph(dim)\r\n graph.remove_edge(0, dim - 1)\r\n s = list(range(dim - 1))\r\n assert set(clique.swap(s, graph)) == set(range(1, dim))", "def swap(self, *args):\n return _osgAnimation.VertexList_swap(self, *args)", "def deterministic_swaps(self, path):\n\t\tone_swap = self.get_one_swappable()\n\t\tfor index1 in one_swap:\n\t\t\tfor index2 in one_swap:\n\t\t\t\t# if they are in line and work for swaps\n\t\t\t\tif self.valid_swap(index1, index2):\n\t\t\t\t\tself.swap(index1, index2)\n\t\t\t\t\tpath.append((index1, index2))\n\t\treturn path", "def flip_vert():\r\n pass", "def contiguousFlip(currPath, i, j):\n if i != j and (i+1)%len(currPath)!=j and (j+1)%len(currPath)!=i:\n iP = i\n jP = j\n if (i < j):\n maxx=(j-i+1)//2\n else:\n maxx=(j+1+len(currPath)-i)//2\n for _ in range(maxx):\n temp = currPath[iP]\n currPath[iP] = currPath[jP]\n currPath[jP] = temp\n iP = (iP + 1)%len(currPath)\n jP = (jP - 1)%len(currPath)", "def swap(self, *args):\n return _osgAnimation.mapVertexInfluence_swap(self, *args)", "def test_swap_degree(self, dim):\r\n graph = nx.lollipop_graph(dim, 1)\r\n graph.remove_edge(0, dim - 1)\r\n graph.remove_edge(0, dim - 2)\r\n s = list(range(dim - 2))\r\n result = set(clique.swap(s, graph, node_select=\"degree\"))\r\n expected = set(range(1, dim - 2)) | {dim - 1}\r\n assert result == expected", "def flip_edge(self,j):\n c_left,c_right=self.edges['cells'][j,:]\n self.log.debug(\"Flipping edge %d, with cells %d, %d nodes %d,%d\"%(j,c_left,c_right,\n self.edges['nodes'][j,0],\n self.edges['nodes'][j,1]) )\n assert c_left>=0 # could be relaxed, at the cost of some complexity here\n assert c_right>=0\n # could work harder to preserve extra info:\n #c_left_data = self.cells[c_left].copy()\n #c_right_data = self.cells[c_right].copy()\n\n\n # This is dangerous! - deleting the cells means that topo_sort is no good,\n # and that breaks half-edge ops.\n # moving to happen a bit later -\n # self.delete_cell(c_left)\n # self.delete_cell(c_right)\n he_left=unstructured_grid.HalfEdge(self,j,0)\n he_right=unstructured_grid.HalfEdge(self,j,1)\n\n na,nc = self.edges['nodes'][j]\n nd=he_left.fwd().node_fwd()\n nb=he_right.fwd().node_fwd()\n\n # DBG\n if 0:\n for n,label in zip( [na,nb,nc,nd],\n \"abcd\" ):\n plt.text( self.nodes['x'][n,0],\n self.nodes['x'][n,1],\n label)\n # keep the time where the cells are deleted to a minimum\n self.delete_cell(c_left)\n self.delete_cell(c_right)\n \n self.modify_edge(j,nodes=[nb,nd])\n new_left =self.add_cell(nodes=[na,nb,nd])\n new_right=self.add_cell(nodes=[nc,nd,nb])\n return new_left,new_right", "def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N", "def swap(C):\n \n return [c.swap() for c in C]", "def Swap():\n\n return Operator((1.0 + 0.0j) * np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 1.0, 0.0]]],\n [[[ 0.0, 1.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 0.0, 1.0]]]]))", "def reduce_sequential(edges, start, end):\n dd = get_degrees_dictionary(edges) # O(len(edges))\n tvs = get_transition_vertexes(dd, start, end) # O(len(dd))\n logger.debug(\"dd: {}\".format(dd))\n logger.debug(\"tvs: {}\".format(tvs))\n\n for v in tvs: # for each vertex in transitional vertexes\n # edges\n ei1 = tvs[v][0]\n ei2 = tvs[v][1]\n\n e1 = edges[ei1] # e1 is going to save resulted edge\n e2 = edges[ei2] # e2 is going to become cycled and then removed\n\n # vertexes\n # v - vertex to be removed\n # v1 - vertex, connected to v by e1 edge (unchanged)\n # v2 - vertex, connected to v by e2 edge\n # will be moved to e1 substituting v there\n # edges list in transitional vertex dictionary will be updated\n\n logger.debug(\"Substituted {}: {}:{}, {}:{} -> \".format(\n v, ei1, e1, ei2, e2))\n\n # v is going to be substituted in e1 by value of \"not v\" vertex in e2\n substitute_index_in_ei2 = 1 - e2.index(v) # if vi=0 s=1; v=1 s=0\n\n # replace v in ei1 by substitute from ei2\n v2 = e2[substitute_index_in_ei2]\n\n e1[e1.index(v)] = v2\n e2[substitute_index_in_ei2] = v\n\n # here we will have 2 edges\n # edges[ei1] -> ['v1', 'v2', ?] #\n # edges[ei2] -> ['v', 'v', 5] # delay not changed\n\n # updated edges for substituted vertex in tvs dict to point to\n # ei1 edge instead of ei2\n # e.g. 'v2' was connected by ei2, now is connected by ei1\n\n if v2 != start and v2 != end:\n # v2 is not present in tvi and shouldn't be updated\n v2ei = tvs[v2] # list of edges indexes for v2\n vei = tvs[v] # list of edges indexes for v\n v2ei[v2ei.index(ei2)] = ei1\n\n logger.debug(\"tvs[{}][2] = t[1] : {} = {}\".format(\n v2,\n tvs[v2][2],\n t[1]))\n\n # update weight\n new_weight = e1[2] + e2[2]\n e1[2] = new_weight\n\n # normalize result edge\n redirect_edge_alpabetically(e1)\n\n # here we will have 2 edges\n # edges[ei1] -> ['v1', 'v2', 8] #\n # edges[ei2] -> ['v', 'v', 5] # delay not changed\n\n # only thing left is to remove the ei2 edge, this will be done later\n # not to break iteration over edges\n\n logger.debug(\"{}:{}, {}:{}\".format(ei1, e1, ei2, e2))\n\n # get indexes of edges to be removed\n indexes = [i for i in reversed(sorted([tvs[v][1] for v in tvs]))]\n logger.debug(\"Edges index removed after sequential update: {}\".format(\n indexes))\n\n for i in indexes:\n edges.pop(i)\n\n return len(tvs) # amount of edges removed", "def _swap(self, i, j, k):\n\t\tif self.verbose:\n\t\t\tprint(i, k)\n\t\t\tprint(i, j)\n\t\t\tprint(j, k)\n\t\tself.arrangement[i],self.arrangement[k] = self.arrangement[k],self.arrangement[i]\n\t\tself.arrangement[i],self.arrangement[j] = self.arrangement[j],self.arrangement[i]\n\t\tself.arrangement[j],self.arrangement[k] = self.arrangement[k],self.arrangement[j]", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dashboard of plots for time steps, potential, kintetic, and total energy
def create_dashboard(h, t, k, p): plt.style.use('seaborn') # Initialize the dashboard fig = plt.figure(figsize=(20, 8)) ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) # Create individual graphs dt_line, = ax1.plot(h, lw=3, c='k') total_line, = ax2.plot(t, lw=3, c='#d62728') k_line, = ax3.plot(k, lw=3, c='#1f77b4') p_line = ax4.plot(p, lw=3, c='#2ca02c') ax1.set_title(r'Variation in $\Delta t$') ax1.set_ylabel(r'$\Delta t$') ax2.set_title(r'Total Energy over Time') ax2.set_ylabel('Total Energy') ax3.set_title('Kinetic Energy over Time') ax3.set_ylabel('Kinetic Energy') ax3.set_xlabel('Time Steps') ax4.set_title('Potential Energy over Time') ax4.set_ylabel('Potential Energy') ax4.set_xlabel('Time Steps') plt.show() """im = ax[0, 0].imshow(model.lattice, cmap='Greys', vmin=-1, vmax=1) energy_line, = ax[0, 1].plot([], [], lw=3) mag_line, = ax[1, 0].plot([], [], lw=3) heat_line, = ax[1, 1].plot([], [], lw=3) susceptibility_line, = ax[2, 0].plot([], [], lw=3) acceptance_line, = ax[2, 1].plot([], [], lw=3)"""
[ "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def return_figures():\n # read the energy data\n energy_df = pd.read_csv(\"data/all_energy_statistics.csv\")\n \n color1 = 'rgb(0,153,0)'\n color2 = 'rgb(02,102,255)'\n color3 = 'rgb(255,204,153)'\n color4 = 'rgb(153,0,153)'\n \n # CHART 1 ================================================\n # select data about Aviation gasoline - Final Consumption\n # as a line chart\n selected_energy_df = energy_df[energy_df[\"commodity_transaction\"].isin([\"Aviation gasoline - Final consumption\"])]\n fr_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"France\"])]\n uk_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"United States\"])]\n pt_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"Portugal\"])]\n mx_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"Mexico\"])]\n x1 = fr_dt[\"year\"].values\n y1 = fr_dt[\"quantity\"].values\n x2 = uk_dt[\"year\"].values\n y2 = uk_dt[\"quantity\"].values\n x3 = pt_dt[\"year\"].values\n y3 = pt_dt[\"quantity\"].values\n x4 = mx_dt[\"year\"].values\n y4 = mx_dt[\"quantity\"].values\n\n graph_one = [] \n graph_one.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_one = dict(title = 'Aviation Gasoline Consumption',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n # CHART 2 ================================================\n # select data about Aviation gasoline - Exports\n # as a line chart\n aviation_gas_exp = energy_df[energy_df[\"commodity_transaction\"].isin([\"Aviation gasoline - Exports\"])]\n fr_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"France\"])]\n uk_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"United States\"])]\n pt_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"Portugal\"])]\n mx_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_av_exp[\"year\"].values\n y1 = fr_av_exp[\"quantity\"].values\n x2 = uk_av_exp[\"year\"].values\n y2 = uk_av_exp[\"quantity\"].values\n x3 = pt_av_exp[\"year\"].values\n y3 = pt_av_exp[\"quantity\"].values\n x4 = mx_av_exp[\"year\"].values\n y4 = mx_av_exp[\"quantity\"].values\n\n graph_two = [] \n graph_two.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n line=dict(color=color1),\n mode = 'lines'\n )\n )\n graph_two.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_two.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_two.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_two = dict(title = 'Aviation gasoline - Exports',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n# CHART 3 ================================================\n # select data about Crude Petroleum - Refinery Capacity\n # as a line chart\n petroleum_ref_cap = energy_df[energy_df[\"commodity_transaction\"].isin([\"Crude petroleum - refinery capacity\"])]\n fr_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"France\"])]\n uk_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"United States\"])]\n pt_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"Portugal\"])]\n mx_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_ref_cap[\"year\"].values\n y1 = fr_ref_cap[\"quantity\"].values\n x2 = uk_ref_cap[\"year\"].values\n y2 = uk_ref_cap[\"quantity\"].values\n x3 = pt_ref_cap[\"year\"].values\n y3 = pt_ref_cap[\"quantity\"].values\n x4 = mx_ref_cap[\"year\"].values\n y4 = mx_ref_cap[\"quantity\"].values\n\n graph_three = [] \n graph_three.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_three = dict(title = 'Crude Petroleum - Refinery Capacity',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n# CHART 4 ================================================\n # select data about Conventional crude oil - total energy supply\n # as a line chart\n petroleum_supply = energy_df[energy_df[\"commodity_transaction\"].isin([\"Conventional crude oil - total energy supply\"])]\n fr_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"France\"])]\n uk_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"United States\"])]\n pt_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"Portugal\"])]\n mx_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_petr_sup[\"year\"].values\n y1 = fr_petr_sup[\"quantity\"].values\n x2 = uk_petr_sup[\"year\"].values\n y2 = uk_petr_sup[\"quantity\"].values\n x3 = pt_petr_sup[\"year\"].values\n y3 = pt_petr_sup[\"quantity\"].values\n x4 = mx_petr_sup[\"year\"].values\n y4 = mx_petr_sup[\"quantity\"].values\n\n graph_four = [] \n graph_four.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_four = dict(title = 'Conventional crude oil - total energy supply',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n\n return figures", "def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def show_dashboard():\n script, div = plots.make_plot()\n script_tab, div_tab = plots.make_tabs()\n script_trend, div_trend = plots.make_trend()\n\n return render_template('layout.html',\n script=script,\n div=div,\n script_trend=script_trend,\n div_trend=div_trend,\n script_tab=script_tab,\n div_tab=div_tab)", "def calcRunTime(self):\n \n time = tkSimpleDialog.askfloat(title = \"Time\", prompt = \"Approx Time(min) per Pose: \")\n try:\n import matplotlib.pyplot as plt\n import numpy as np\n #totalNodes=tkSimpleDialog.askinteger(title = \"Nodes\", prompt = \"Total Nodes on cluster\")\n totalNodes = 100\n x_axes = []; y_axes = []\n m = ((time/60)*self.calcTotal())\n for x in range(1, totalNodes+1):\n y = (((time/60)*self.calcTotal())/x)/24\n x_axes.append(x),y_axes.append(y)\n plt.xlabel('Nodes')\n plt.ylabel('Days')\n plt.plot(x_axes, y_axes, 'o')\n plt.grid(True)\n plt.show()\n return\n \n except ImportError:\n self.textHelp.delete(1.0, END)\n nodes = tkSimpleDialog.askinteger(title = \"Nodes\", prompt=\"Approx Number of Nodes: \")\n \n time = time/60\n TotalCpuTime = self.calcTotal() * time\n TotalCpuTimeDays = TotalCpuTime/24\n self.textHelp.insert(1.0, \"Total CPU time is: \"+repr(TotalCpuTime)+\" Hours or \"+repr(TotalCpuTimeDays)+\" Days\")\n TotalTime = TotalCpuTime/nodes\n TotalTimeDays = TotalTime/24\n self.textHelp.insert(1.0, \"Total Time is: \"+repr(TotalTime)+ \" Hours or \"+ repr(TotalTimeDays)+ \" Days\"+\"\\n\")", "def view_grid(self):\n avgs2 = self.smooth(2)\n avgs24 = self.smooth(23)\n row_labels = list('1234')\n column_labels = list('1234')\n arr = []\n mins = []\n maxs = []\n j = 0\n for i in range(len(avgs2[0])):\n j = 0\n darr = [[],[],[],[]]\n for avg in avgs2:\n if j > 3:\n j = 0\n darr[j].append(avg[i][1])\n j+=1\n\n time = avgs2[0][i][0]\n point = (time, np.array(darr))\n arr.append(point)\n\n for time, data in arr:\n mini = data.min()\n maxi = data.max()\n\n mins.append(mini)\n maxs.append(maxi)\n\n minis = np.array(mins)\n maxis = np.array(maxs)\n\n mini = int(minis.min())\n maxi = int(maxis.max())\n fig = plt.figure(figsize=(11, 8))\n fig.suptitle(self._sensor_name, fontsize='x-large')\n sub_plots = []\n grid = (4, 8)\n for i in range(2):\n for j in range(6):\n sub_plot = plt.subplot2grid(grid, (i, j), rowspan=1, colspan=1)\n sub_plots.append(sub_plot)\n \n hmap = plt.subplot2grid(grid, (0, 6), rowspan=2, colspan=2)\n tser = plt.subplot2grid(grid, (2, 0), rowspan=2, colspan=8)\n \n sub_plots.append(hmap)\n sub_plots.append(tser)\n \n i = 0\n for time, data in arr:\n sub_plot = sub_plots[i]\n title = str(time.hour) + \":00\"\n sub_plot.pcolor(data, cmap=cmaps.Reds, vmin=mini, vmax=maxi)\n\n sub_plot.set_xticks(np.arange(data.shape[0])+0.5, minor=False)\n sub_plot.set_yticks(np.arange(data.shape[1])+0.5, minor=False)\n\n sub_plot.invert_yaxis()\n sub_plot.xaxis.tick_top()\n\n sub_plot.set_xticklabels(row_labels, minor=False)\n sub_plot.set_yticklabels(column_labels, minor=False)\n sub_plot.set_title(title, y=1.14)\n i+=1\n \n arr1 = []\n mins1 = []\n maxs1 = []\n j = 0\n for i in range(len(avgs24[0])):\n j = 0\n darr = [[],[],[],[]]\n for avg in avgs24:\n if j > 3:\n j = 0\n darr[j].append(avg[i][1])\n j+=1\n\n time = avgs24[0][i][0]\n point = (time, np.array(darr))\n arr1.append(point)\n\n for time, data in arr1:\n mini = data.min()\n maxi = data.max()\n\n mins1.append(mini)\n maxs1.append(maxi)\n\n minis1 = np.array(mins1)\n maxis1 = np.array(maxs1)\n\n mini1 = int(minis1.min())\n maxi1 = int(maxis1.max())\n \n hmap = sub_plots[12]\n time = arr[0][0]\n data = arr[0][1]\n title = str(time.hour) + \":00\"\n hmap.pcolor(data, cmap=cmaps.Reds, vmin=mini1, vmax=maxi1)\n\n hmap.set_xticks(np.arange(data.shape[0])+0.5, minor=False)\n hmap.set_yticks(np.arange(data.shape[1])+0.5, minor=False)\n\n hmap.invert_yaxis()\n hmap.xaxis.tick_top()\n\n hmap.set_xticklabels(row_labels, minor=False)\n hmap.set_yticklabels(column_labels, minor=False)\n hmap.set_title(title, y=1.06)\n \n tser = sub_plots[13]\n self.plot_timeseries(tser)\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n fig.savefig('IR_Grid.png')\n plt.show()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def performance_plot(df_inv_in, df_wor_in, avg_month):\n\n df_inv = df_inv_in.set_index(c.cols.DATE)\n df_wor = df_wor_in.set_index(c.cols.DATE)\n\n df_inv = u.time_average(df_inv, avg_month)\n df_wor = u.time_average(df_wor, avg_month)\n\n # If indexs have different lenghts, normalize them\n df_inv, df_wor = u.normalize_index(df_inv, df_wor)\n\n data = [\n go.Scatter(\n x=df_wor.index,\n y=df_wor[c.names.TOTAL] / df_inv[c.names.TOTAL],\n marker={\"color\": \"black\"},\n name=c.names.TOTAL,\n )\n ]\n\n # If config file use it\n if c.yml.INVEST in YML:\n for name, config in YML[c.yml.INVEST].items():\n\n # Check that accounts are in the config\n mlist = [x for x in config[c.yml.ACCOUNTS] if x in df_wor.columns]\n color = u.get_colors((config[c.yml.COLOR_NAME], config[c.yml.COLOR_INDEX]))\n\n data.append(\n go.Scatter(\n x=df_wor.index,\n y=df_wor[mlist].sum(axis=1) / df_inv[mlist].sum(axis=1),\n marker={\"color\": color},\n name=name,\n )\n )\n\n # If not, simply plot the present columns\n else:\n for col in df_wor.columns:\n if col != c.names.TOTAL:\n data.append(go.Scatter(x=df_wor.index, y=df_wor[col] / df_inv[col], name=col))\n\n layout = go.Layout(title=\"Total worth evolution\", barmode=\"stack\")\n return go.Figure(data=data, layout=layout)", "def anharm_plot2():\n set_tag(qdt, \"EjdivEc\", log=False)\n set_tag(qdt, \"Ej\", log=False)\n pl=Plotter(fig_width=9.0, fig_height=6.0)\n #qdt.epsinf=qdt.epsinf/3.72\n #qdt.Np=10\n #qdt.Ec=qdt.fq*0.1*h\n print qdt.max_coupling, qdt.coupling_approx\n #flux_o_flux0=qdt.call_func(\"flux_over_flux0\", voltage=yoko)\n #Ej=qdt.call_func(\"Ej\", flux_over_flux0=flux_o_flux0)\n #EjdivEc=Ej/qdt.Ec\n anharm=qdt.call_func(\"anharm\", EjdivEc=EjdivEc)\n anharmp=qdt.call_func(\"lamb_shifted_anharm\", EjdivEc=EjdivEc)\n fq=qdt.call_func(\"fq\", Ej=EjdivEc*qdt.Ec)\n ls_fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n ls_fq2=qdt.call_func(\"lamb_shifted_fq2\", EjdivEc=EjdivEc)\n #pl, pf=line(fq, anharm/h, linewidth=0.5, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\")\n\n pl, pf=line(EjdivEc, anharmp/h/1e9, linewidth=1.0, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\", plotter=pl)\n line(EjdivEc, anharm/h/1e9, linewidth=1.0, color=\"purple\", label=r\"anharm\", plotter=pl)\n\n line(EjdivEc, (ls_fq-fq)/1e9, plotter=pl, color=\"blue\", linewidth=1.0, label=r\"$\\Delta_{1,0}$\")\n E0, E1, E2=qdt.call_func(\"transmon_energy_levels\", EjdivEc=EjdivEc, n_energy=3)\n fq2=(E2-E1)/h\n line(EjdivEc, (ls_fq2-fq2)/1e9, plotter=pl, color=\"red\", linewidth=1.0, label=r\"$\\Delta_{2,1}$\")\n pl.set_ylim(-2, 1.5)\n #pl.set_xlim(0.0, 70)\n pl.xlabel=r\"$E_j/E_c$\"\n pl.ylabel=r\"$\\Delta (GHz)$\"\n #pl.legend(loc='lower right')\n #fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n #line(EjdivEc, fq, plotter=pl, color=\"green\", linewidth=0.5)\n\n #line(EjdivEc, E1p, plotter=pl, color=\"green\", linewidth=0.5)\n #line(EjdivEc, E2p, plotter=pl, color=\"purple\", linewidth=0.5)\n return pl", "def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()", "def plot_steps(out_dict, units):\n from bokeh.models import BoxAnnotation\n from bokeh.plotting import figure, show, output_notebook\n import bokeh.models as bmd\n\n tooltips = [\n (\"Step (total)\", \"@index\"),\n (\"Step (stage)\", \"@step\"),\n (\"Energy\", \"@energy eV/atom\"),\n (\"Energy (dispersion)\", \"@dispersion_energy_au Ha\"),\n (\"SCF converged\", \"@scf_converged\"),\n (\"Cell A\", \"@cell_a_angs Angs\"),\n (\"Cell Vol\", \"@cell_vol_angs3 Angs^3\"),\n (\"MAX Step\", \"@max_step_au Bohr\"),\n (\"Pressure\", \"@pressure_bar bar\")\n ]\n hover = bmd.HoverTool(tooltips=tooltips)\n TOOLS = [\"pan\", \"wheel_zoom\", \"box_zoom\", \"reset\", \"save\", hover]\n\n natoms = out_dict['natoms']\n values = [ x/natoms*ha2u[units] for x in out_dict['step_info']['energy_au'] ]\n values = [ x-min(values) for x in values ]\n\n data = bmd.ColumnDataSource(data=dict( index=range(len(values)),\n step=out_dict['step_info']['step'],\n energy=values,\n dispersion_energy_au=out_dict['step_info']['dispersion_energy_au'],\n scf_converged=out_dict['step_info']['scf_converged'],\n cell_a_angs=out_dict['step_info']['cell_a_angs'],\n cell_vol_angs3=out_dict['step_info']['cell_vol_angs3'],\n max_step_au=out_dict['step_info']['max_step_au'],\n pressure_bar=out_dict['step_info']['pressure_bar'],\n ))\n\n p = figure(tools=TOOLS, title='Energy profile of the DFT minimization',\n height=350, width=550)\n\n p.xgrid.grid_line_color=None\n p.xaxis.axis_label = 'Steps'\n p.yaxis.axis_label = 'Energy ({}/atom)'.format(units)\n\n # Colored background\n colors = ['red','orange','green','yellow','cyan','pink','palegreen']\n start = 0\n for i,steps in enumerate(out_dict['stage_info']['nsteps']):\n end = start+steps\n p.add_layout(BoxAnnotation(left=start, right=end, fill_alpha=0.2, fill_color=colors[i]))\n start = end\n\n # Trace line and markers\n p.line('index', 'energy', source=data, line_color='blue')\n p.circle('index', 'energy', source=data, line_color='blue', size=3)\n return p", "def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )", "def plot_metrics(self, instance_type, directory='.'):\n max_mem_utilization_percent_chunks_all_pts = []\n max_mem_used_MB_chunks_all_pts = []\n min_mem_available_MB_chunks_all_pts = []\n max_cpu_utilization_percent_chunks_all_pts = []\n max_disk_space_utilization_percent_chunks_all_pts = []\n max_disk_space_used_GB_chunks_all_pts = []\n for i in range(0, self.nTimeChunks):\n self.starttime = self.starttimes[i]\n self.endtime = self.endtimes[i]\n # saving all points for the chunck\n max_mem_utilization_percent_chunks_all_pts.append(self.max_memory_utilization_all_pts())\n max_mem_used_MB_chunks_all_pts.append(self.max_memory_used_all_pts())\n min_mem_available_MB_chunks_all_pts.append(self.min_memory_available_all_pts())\n max_cpu_utilization_percent_chunks_all_pts.append(self.max_cpu_utilization_all_pts())\n max_disk_space_utilization_percent_chunks_all_pts.append(self.max_disk_space_utilization_all_pts())\n max_disk_space_used_GB_chunks_all_pts.append(self.max_disk_space_used_all_pts())\n # writing values as tsv\n input_dict ={\n 'max_mem_used_MB': (max_mem_used_MB_chunks_all_pts, 1),\n 'min_mem_available_MB': (min_mem_available_MB_chunks_all_pts, 1),\n 'max_disk_space_used_GB': (max_disk_space_used_GB_chunks_all_pts, 1),\n 'max_mem_utilization_percent': (max_mem_utilization_percent_chunks_all_pts, 1),\n 'max_disk_space_utilization_percent': (max_disk_space_utilization_percent_chunks_all_pts, 1),\n 'max_cpu_utilization_percent': (max_cpu_utilization_percent_chunks_all_pts, 5)\n }\n self.list_files.append(self.write_tsv(directory, **input_dict))\n self.list_files.append(self.write_metrics(instance_type, directory))\n # writing html\n self.list_files.append(self.write_html(instance_type, directory))", "def main():\n max_v = np.amax(vLF1) * 3.6 # in km/h\n tmax_v = tLF1[np.argmax(vLF1)] # in seconds\n final_v = vLF2[-1] # in m/s\n total_time = tLF2[-1] # in seconds\n\n print('The maximum velocity is {:f} km/h'.format(max_v))\n print('The time when max velocity is reached is {:f} s'.format(tmax_v))\n print('The final velocity on the ground is {:f} m/s'.format(final_v))\n print('The total duration of the jump is {:f} s'.format(total_time))\n\n plt.subplot(2, 1, 1)\n plt.plot(tLF1, zLF1, label='Phase 1')\n plt.plot(tLF2, zLF2, label='Phase 2', color='red', linewidth=2.0)\n plt.title('Simulation of Felix Baumgartner\\'s 2012 Jump')\n plt.ylabel('Altitude (m)')\n plt.grid('on')\n plt.legend()\n\n plt.subplot(2, 1, 2)\n plt.plot(tLF1, vLF1, label='Phase 1')\n plt.plot(tLF2, vLF2, label='Phase 2', color='red', linewidth=2.0)\n plt.ylabel('Velocity (m/s) [Down]')\n plt.xlabel('Time (s)')\n plt.grid('on')\n plt.legend()\n\n plt.savefig('skydive.pdf')\n plt.show()\n\n return (max_v, tmax_v, final_v, total_time)", "def render(self, agents, episode):\n fig = plt.figure(figsize=(12, 12))\n fig.suptitle(f\"Episode {episode}\")\n gs = gridspec.GridSpec(2, 2)\n\n a_thousand = 1000\n a_million = 1000000\n\n # Price Process\n ax = plt.subplot(gs[0, 0])\n ax.set(title=\"Price Process\")\n plt.plot(self.step_array * self.tau, self.S_tilde, label=\"Price Including Temporary Price Impact\")\n plt.plot(self.step_array * self.tau, self.S, label=\"Price\")\n plt.legend()\n ax.set(ylabel=\"Price\")\n ax.grid(True)\n\n # Revenue Process\n ax = plt.subplot(gs[0, 1])\n ax.set(title=\"Revenue Process\")\n for a in range(len(agents)):\n plt.plot(self.step_array * self.tau, agents[a].R / a_thousand, label=f\"Agent {a+1}\")\n plt.legend()\n ax.grid(True)\n ax.set(ylabel=\"Revenue ($k)\")\n ax.set(xlabel=\"Time Step\")\n\n # Inventory Process\n ax = plt.subplot(gs[1, :])\n ax.set(title=\"Inventory Process\")\n for a in range(len(agents)):\n plt.plot(self.step_array * self.tau, agents[a].x / a_million, label=f\"Agent {a+1}\")\n plt.legend()\n ax.grid(True)\n ax.set(ylabel=\"Inventory (M)\")\n ax.set(xlabel=\"Time Step\")\n\n filename = PPODirectories.tmp + f\"episode-{episode}-simulation.png\"\n\n plt.savefig(filename)\n plt.close()\n\n return filename", "def show_results(self):\n\n N = split_list(self.N)\n # create subplot\n fig = make_subplots(rows=1,cols=2,\n subplot_titles=('Fish population', 'Harvested fish'),\n specs=[[{'type': 'xy'}, {'type': 'pie'}]])\n #Add population line graph\n fig.add_trace(go.Scatter(y=N['odds'], x=np.linspace(1, 11, 6), name='odd year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.add_trace(go.Scatter(y=N['evens'], x=np.linspace(2, 12, 6), name='even year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.update_xaxes(title_text=\"year\", row=1, col=1)\n fig.update_yaxes(title_text=\"population\", row=1, col=1)\n\n # cannot use 'paper' as yref due to bug in sublplot.\n fig.add_shape(type='line',\n xref='x', yref='y',\n x0=2.5, y0=-10, x1=2.5, y1=1000,\n line=dict(color='Black', width=3),\n row=1, col=1)\n\n # create pie chart\n colors = ['#636EFA', '#EF553B'] \n labels = ['total odd year harvest', 'total even year harvest']\n M = split_list(self.harvest_record)\n values = [sum(M['odds']), sum(M['evens'])]\n fig.add_trace(go.Pie(labels=labels, values=values, hoverinfo='label', textinfo='value', marker=dict(colors=colors)), \n row=1, col=2)\n\n # add title\n fig.update_layout(title_text='Results') \n fig.write_html(\"fish_trap_simulation.html\")\n\n \n return fig", "def visualise_food_consumption(data: LogData, directory: Path):\n\n figure, axes = plot.subplots()\n\n food_history = get_food_history(data)\n\n axes.plot(food_history.keys(), food_history.values(), label=\"Food\", color=\"blue\", **{\"ls\": \"--\"})\n\n axes.legend(loc=\"upper left\")\n axes.set_xlim(0, data.duration_secs())\n axes.set_xlabel(\"Time (seconds)\")\n axes.set_ylabel(\"Amount\")\n axes.set_title(\"Food availability\")\n\n plot.savefig(directory / Path(\"food_consumption.png\"))\n plot.close()", "def energy_plot(self):\n return az.plot_energy(self.res)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download and unpack the Zenodo minted data for the current stitches distribution.
def fetch_zenodo(self): # full path to the stitches root directory where the example dir will be stored if self.data_dir is None: data_directory = pkg_resources.resource_filename('stitches', 'data') else: data_directory = self.data_dir # build needed subdirectories if they do not already exist tas_data_path = os.path.join(data_directory, "tas-data") temp_data_path = os.path.join(data_directory, "temp-data") if not os.path.exists(tas_data_path): os.mkdir(tas_data_path) if not os.path.exists(temp_data_path): os.mkdir(temp_data_path) # get the current version of stitches that is installed current_version = pkg_resources.get_distribution('stitches').version try: data_link = InstallPackageData.DATA_VERSION_URLS[current_version] except KeyError: msg = f"Link to data missing for current version: {current_version}. Using default version: {InstallPackageData.DEFAULT_VERSION}" data_link = InstallPackageData.DEFAULT_VERSION print(msg) # retrieve content from URL print("Downloading example data for stitches version {}. This may take a few minutes...".format(current_version)) response = requests.get(data_link) with zipfile.ZipFile(BytesIO(response.content)) as zipped: # extract each file in the zipped dir to the project for f in zipped.namelist(): extension = os.path.splitext(f)[-1] # Extract only the csv and nc files if all([len(extension) > 0, extension in (".csv", ".nc")]): basename = os.path.basename(f) # Check to see if tas-data is in the file path if "tas-data" in f: basename = os.path.join("tas-data", basename) out_file = os.path.join(data_directory, basename) # extract to a temporary directory to be able to only keep the file out of the dir structure with tempfile.TemporaryDirectory() as tdir: # extract file to temporary directory zipped.extract(f, tdir) # construct temporary file full path with name tfile = os.path.join(tdir, f) print(f"Unzipped: {out_file}") # transfer only the file sans the parent directory to the data package shutil.copy(tfile, out_file)
[ "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def fetch_zenodo(self):\n\n # full path to the cerf root directory where the example dir will be stored\n if self.data_dir is None:\n data_directory = pkg.get_data_directory()\n else:\n data_directory = self.data_dir\n\n # get the current version of cerf that is installed\n current_version = get_distribution('cerf').version\n\n try:\n data_link = InstallSupplement.DATA_VERSION_URLS[current_version]\n\n except KeyError:\n msg = f\"Link to data missing for current version: {current_version}. Please contact admin.\"\n\n raise KeyError(msg)\n\n # retrieve content from URL\n print(\"Downloading example data for cerf version {}...\".format(current_version))\n r = requests.get(data_link)\n\n with zipfile.ZipFile(BytesIO(r.content)) as zipped:\n\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n\n extension = os.path.splitext(f)[-1]\n\n if len(extension) > 0:\n\n basename = os.path.basename(f)\n out_file = os.path.join(data_directory, basename)\n\n # extract to a temporary directory to be able to only keep the file out of the dir structure\n with tempfile.TemporaryDirectory() as tdir:\n\n # extract file to temporary directory\n zipped.extract(f, tdir)\n\n # construct temporary file full path with name\n tfile = os.path.join(tdir, f)\n\n print(f\"Unzipped: {out_file}\")\n # transfer only the file sans the parent directory to the data package\n shutil.copy(tfile, out_file)", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_data():\n url = 'https://www.dropbox.com/s/p9wmkvbqt1xr6lc/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "async def download(self):\n await self.bot.say(\"How to download and play the Infinix ModPack: \\n```markdown\\n1. Download Technic Launcher \\n2. Open Technic Launcher and click on the `ModPacks` tab \\n3. Type `InfiniX Reborn` into the search \\n4. Select `InfiniX Reborn` and Click on install \\n5. Click on `Launcher Options` on the top right \\n6. Click on the Java tab \\n7. Set the Memory to a value more than 2.5Gb but less than 6Gb and close the Settings window \\n8. Press play, offical server is already saved under multiplayer \\n```\")\n await self.bot.say(\"Download Technic Launcher: http://www.technicpack.net/download\")\n await self.bot.say(\"Installing Java8 x64 Is Required: http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html\")\n await self.bot.say(\"Link to pack on technic: http://www.technicpack.net/modpack/radioactive-project-bionix.53041\")", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def _download_original_weights(self):\n\n weight_url = \"https://dl.fbaipublicfiles.com/video-pose-3d/pretrained_h36m_detectron_coco.bin\"\n try:\n url_request = request.urlopen(weight_url)\n path = Path(self.CKPT_FILE)\n path.parent.mkdir(exist_ok=True)\n path.write_bytes(url_request.read())\n except URLError:\n print(\"Could not download weight file. Please check your internet \\\n connection and proxy settings\")", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def electroweakinos_likelihoods_download():\n oneLbb_HEPData_URL = \"https://www.hepdata.net/record/resource/1267798?view=true\"\n targz_filename = \"oneLbb_workspaces.tar.gz\"\n response = requests.get(oneLbb_HEPData_URL, stream=True)\n assert response.status_code == 200\n with open(targz_filename, \"wb\") as file:\n file.write(response.content)\n assert (\n hashlib.sha256(open(targz_filename, \"rb\").read()).hexdigest()\n == \"64bbbef9f1aaf9e30d75c8975de4789484329b2b825d89331a6f2081661aa728\"\n )\n # Open as a tarfile\n yield tarfile.open(targz_filename, \"r:gz\")\n os.remove(targz_filename)", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st", "def download(remove_archive):\n download_url = requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download', params={\n 'public_key': 'https://yadi.sk/d/TX5k2hkEm9wqZ',\n 'path': '/classification/rtsd-r3.tar.lzma'\n }).json()['href']\n\n archive_path = Path('./rtsd-r3.tar.lzma')\n with open(archive_path, 'wb') as file:\n archive_ = requests.get(download_url).content\n file.write(archive_)\n\n extract_to = Path('./datasets')\n extract_to.mkdir(parents=True, exist_ok=True)\n shutil.unpack_archive(archive_path, extract_to, format='xztar')\n os.remove(extract_to / 'rtsd-r3/.crop.swp')\n if (remove_archive):\n os.remove(archive_path)", "def _download_and_extract(self) -> None:\n\n # To be implemented here, the code to download from self._archive_url and to extract the\n # data into the self._path. This is the code for the case \"b\".\n print(\"Raw dataset downloaded and extracted.\")", "def _download( self ):\n self._system.execute_command( \"git\", [\"clone\", \"git@github.com:snoplus/snogoggles.git\", \n self.get_install_path()], cwd=os.getcwd(), verbose=True)", "def download_mission(self): #--- download the mission\n self.vehicle.commands.download()\n self.vehicle.commands.wait_ready() # wait until download is complete. \n self.mission = self.vehicle.commands", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def download_binary(self):\n self.archive = download_file(self.download_dir, self.client_version)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all morbidities by war name.
def get_morbidities_for_war_era(): war_era_name = request.args.get('warEra') if not war_era_name: raise BadRequestError("warEra parameter is missing") return datasources_service.get_morbidities_for_war_era(war_era_name)
[ "def get_clanwar_leagues(self, tag):\n return self.request(\"clanwarleagues/wars/{warTag}\".format(warTag=tag))", "def get_war_eras():\n return datasources_service.get_war_eras()", "def query_tiles(name):\n return db.MapTile.query.filter(\n db.MapTile.name == TileSet[name].name).first()", "def get_clan_war(self, tag):\n return self.request(\"clans/{}/currentwar\".format(self.valid_tag(tag)))", "def get_movies_list(self, world):\n api_url = self.api_url_base + '/api/{}/movies'.format(world)\n movies_dict = self.get_dict_from_apis(api_url)\n ret_dict = {world: None}\n if movies_dict is not None:\n ret_dict[world] = movies_dict['Movies']\n return ret_dict", "def get_weathers():\n names = [\n name for name in dir(carla.WeatherParameters)\n if re.match('[A-Z].+', name)\n ]\n weathers = {x: getattr(carla.WeatherParameters, x) for x in names}\n return weathers", "def get_all_wines():\n wines = []\n for wine in Wine.query.all():\n information = response_builder(wine, Wine)\n wines.append(information)\n return jsonify({'error_code': OK, 'result': wines}), 200", "def _getAllMinistries(date):\n session = Session()\n mfilter=sql.or_( \n sql.between(date, schema.groups.c.start_date, schema.groups.c.end_date),\n sql.and_(\n (schema.groups.c.start_date < date ),\n (schema.groups.c.end_date == None)\n )\n )\n query = session.query(domain.Ministry).filter(mfilter)\n return query.all()", "def load_towns():\n if not hasattr(g, 'towns'):\n #g.towns = run_query('select id, name from municipios')\n g.towns = get_towns()\n return g.towns", "def get_war_cards(self):\n lst = [self.get_card() for _ in range(self.__war_type)]\n if None in lst:\n return None\n else:\n return lst", "def update_weather_for_all_stations():\n\n weather.get_metars(airport_render_config.keys(), logger=LOGGER)", "def get_clan_war(self, tag, league=False):\n league = \"currentwar/leaguegroup\" if league else \"currentwar\"\n return self.request(\"clans/{}/{}\".format(self.valid_tag(tag), league))", "def arms_by_name(self) -> Dict[str, Arm]:\n return self._arms_by_name", "def names(self):\n for wool in self.wools.items():\n yield wool.name", "def query_interstate_wars(req_war_id):\n\tthis_query = Query('interstate', req_war_id)\n\tthis_query.send_query()\n\tresponse = this_query.pull_result()\n\treturn jsonify(response)\n\t#return render_template('response.html', response=response)", "async def fetch_guild_wars(self, name, *, test=False):\n response = await self._request(\"GET\", GuildWars.get_url(name), test=test)\n start_time = time.perf_counter()\n guild_wars = GuildWars.from_content(response.content)\n parsing_time = time.perf_counter() - start_time\n return TibiaResponse(response, guild_wars, parsing_time)", "def get_all_by_name(self, name):\n return [t for t in self if t.name == name]", "def get_all_machines(self) -> List[Dict]:\n cache_key = \"{}_machines\".format(self.cache_key_prefix)\n if self.cache:\n data = self.cache.get(cache_key, {})\n if data:\n return data\n r = self.session.get(\"{}/machines.json\".format(self.BASE_URL))\n if r.status_code != requests.codes.ok:\n logger.error(\n \"Getting list of all pinball map games failed with status code {}\".format(\n r.status_code\n )\n )\n r.raise_for_status()\n data = r.json()[\"machines\"]\n # patch raw data with searchable cleaned names\n for g in data:\n g[\"cleaned_name\"] = clean_name(g[\"name\"])\n if self.cache:\n self.cache.set(cache_key, data, 15 * 60)\n return data", "def retrieve_workspaces(account):\n fp = WebServicesAPI(account.server,account.user,account.password,False)\n workspace_data = fp.listWorkspaces() \n workspaces = []\n for data in workspace_data:\n projectid = data['counterid']\n projectname = data['counter'].replace('ProjectName:','')\n workspace = Workspace(projectid, projectname)\n workspaces.append(workspace)\n return workspaces" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of all war eras.
def get_war_eras(): return datasources_service.get_war_eras()
[ "def ewriters():\n return dict(_ewriters)", "def get_morbidities_for_war_era():\n war_era_name = request.args.get('warEra')\n if not war_era_name:\n raise BadRequestError(\"warEra parameter is missing\")\n return datasources_service.get_morbidities_for_war_era(war_era_name)", "def list_shelves(self):\n shelflist = []\n for i in self.get_shelves():\n shelflist.append(i)\n return shelflist", "def list_escher_maps():\n import escher\n\n maps = escher.list_available_maps()\n return [entry['map_name'] for entry in maps]", "def archers_list(self):\n return self._archers_list", "def get_all_elbs(f=None):\n cfn_config = get_config()\n elbs = [x.get('name') for x in cfn_config.data.get('elb', {}) if x.get('scheme') in ['internet-facing', 'internal']]\n return filter(f, elbs) if f else elbs", "def get_rw_hosts(self):\n\n rules = self._get_rules()\n return self.filer._xmltree_to_list(rules, 'read-write', 'name')", "def get_all_resellers():\n data = _get_dump()\n resellers = [item for item in data if item['model'] == 'fallballapp.reseller']\n return resellers", "def get_all_ribs_per_router(self):\n return self._get_all_ribs(lambda r: r.name)", "def get_all_environments():\n return ENVIRONMENTS", "def get_all_servers(self) -> List[Server]:", "def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]", "def find_all():\n return ItopapiPrototype.find_all(ItopapiWebServer)", "def equipments(self):\n try:\n # At first, this variable will not exist... will be created\n return self._list_of_equip\n except AttributeError:\n print('Reading equipments for this site...')\n self._add_equip()\n return self._list_of_equip", "def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))", "def all_errata(self):\n return self._all_errata", "def get_all_reporters():\r\n for ep in iter_entry_points('attest.reporters'):\r\n yield ep.name", "def get_all_lights(self):\n obj = self.make_request(relative_url=\"/lights\", method=\"get\")\n return self.parse_response(obj, parser=dict_parser(Light))", "def all(self):\n\n return self.__routes;" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancel an withdraw request.
def post_cancel_withdraw(self, withdraw_id: 'int') -> int: params = { "withdraw-id": withdraw_id } from huobi.service.wallet.post_cancel_withdraw import PostCancelWithdrawService return PostCancelWithdrawService(params).request(**self.__kwargs)
[ "def cancel_draw(self):\n self.accept_draw(clear=True)", "def cancel(self, order_id: int):", "def cancel_drawing(self):\n self.accept_drawing(clear=True)", "def cancel():\n context = {'user': toolkit.g.get('user') or toolkit.g.get('author')}\n organization_id = toolkit.request.args.get('organization_id', None)\n try:\n toolkit.get_action('member_request_cancel')(\n context, {\"organization_id\": organization_id})\n id = 'cancel'\n return toolkit.redirect_to('member_request.mylist', id=id)\n except logic.NotAuthorized:\n toolkit.abort(401, not_auth_message)\n except logic.NotFound:\n toolkit.abort(404, request_not_found_message)", "def create_cancel_request(self):\n req = CancelRequest(\n orderid=self.orderid, symbol=self.symbol, exchange=self.exchange\n )\n return req", "def cancel(request, pk=None):\n # Check request is still valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n # if request is not valid\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete request\n friend_request.delete()\n return Response({'status': '200', 'code': 'OK_CANCEL_FRIEND_REQUEST',\n 'detail': code['OK_CANCEL_FRIEND_REQUEST']}, status=200)", "def requestCancelled(builder, request):", "def cancel(self, invoice_id, **kwargs):\n url = \"{}/{}/cancel\".format(self.base_url, invoice_id)\n return self.post_url(url, {}, **kwargs)", "def cancel_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request = FriendRequest.objects.filter(\n from_user=request.user,\n to_user=user\n )\n f_request.delete()\n messages.success(\n request,\n f'Your friend request to {user} has cancelled.'\n )\n return redirect('profiles:profile')", "def hook_cancel_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"CANCEL req:%s\", request_id)\n self.send_message(assignee_chat_id, c.MSG_REQUEST_CANCELED)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update(\n {\"current_request\": None, \"reviewed_request\": None, \"state\": c.State.AVAILABLE}\n )\n del self.updater.dispatcher.bot_data[request_id]\n self.updater.dispatcher.update_persistence()", "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "def cancel(self, membership, callback=None):", "def cancel(self, request, pk=None):\n try:\n batch_obj = Batch.objects.get(pk=pk)\n except Batch.DoesNotExist:\n return Response('Batch Record Not Found', status=status.HTTP_404_NOT_FOUND)\n\n batch_obj.batch_status = \"CANCELLED\"\n batch_obj.cancelled_at = datetime.datetime.now()\n batch_obj.save()\n\n cancel_dict = {\"batch_id\": pk, \"action\": \"cancel\"}\n json_val = JSONRenderer().render(cancel_dict)\n kafka_topic = 'batch'\n\n producer.send(kafka_topic, json_val)\n producer.flush()\n\n serializer = BatchSerializer(batch_obj)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def AbortRequest(self, request_id):\n request_path = self._GetRequestPathname(request_id, self._ABORTING)\n open(request_path, 'w').close()", "def DismissApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def cancel(self, update, context):\n\n output = \"\"\n user = self.User(update)\n message = update.message.text.lower().split(\" \")\n print(message[1], message[2])\n if user.id not in self.__users.keys():\n output = \"looks like you don't have any requests at all.\"\n elif message[1].isnumeric() and message[2].isnumeric():\n user = self.__users[user.id]\n line_num = int(message[1])\n station_num = int(message[2])\n found_match = False\n for station in user.stations:\n if station.line_number == line_num and station.station_number == station_num:\n user.remove_station(station)\n self.bus_controller.remove_person_from_the_station(station)\n output = \"Canceled the request\"\n found_match = True\n break\n if not found_match:\n output = \"this doesn't match with any of your active requests, so you can't cancel it.\\n\" \\\n \"make sure that you don't have any typing mistakes\"\n else:\n output = \"the values you entered seem wrong, the values must be number.\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)", "def cancel(self, order: Order):\n pass", "def cancel(self):\n self.cancelled = True", "def cancel_game(self, request):\n\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if not game:\n raise endpoints.NotFoundException('Game not found!')\n elif game.game_over is True:\n raise endpoints.ForbiddenException('Game already over')\n else:\n game.key.delete()\n return StringMessage(message='Game canceled.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the withdraw quota for currencies
def get_account_withdraw_quota(self, currency: 'str') -> list: check_should_not_none(currency, "currency") params = { "currency": currency, } from huobi.service.wallet.get_account_withdraw_quota import GetAccountWithdrawQuotaService return GetAccountWithdrawQuotaService(params).request(**self.__kwargs)
[ "def options_to_withdraw(self, amount):\n counter = PaperMoneyCounter() # aux class\n options = [] # options to withdraw\n remaining_cash = 0 # aux var\n\n if (amount % 20 == 0 or amount % 50 == 0) and (amount <= 1000): # is it allowed to withdraw?\n # prioritizing 100-dollar bills\n qtt_100s = counter.how_many_100s(amount)\n remaining_cash = counter.remaining_cash_without_100s(amount)\n\n qtt_50s = counter.how_many_50s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_50s(remaining_cash)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 50-dollar bills\n qtt_100s = 0\n\n qtt_50s = counter.how_many_50s(amount)\n remaining_cash = counter.remaining_cash_without_50s(amount)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 20-dollar bills\n qtt_100s = 0\n\n qtt_50s = 0\n\n qtt_20s = counter.how_many_20s(amount)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n if not(options[1] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n return options\n\n return None # if it wasn't allowed to withdraw", "def quota(self) -> 'outputs.CommitmentQuotaResponse':\n return pulumi.get(self, \"quota\")", "def get_quota(self):\n raise NotImplementedError", "def getActiveCurrencies():", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def withdraw(self, currency, amount, address):\n return self.api_query('withdraw', {\"currency\": currency, \"amount\": amount, \"address\": address})", "def getCurrencies():", "def getBaseCurrency():", "def quota_price(self):\n raise NotImplementedError(\"scm PriceManager getQuotaPrice not implemented.\")", "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def quota_price(self):\n payload = \"scm PriceManager getQuotaPrice\"\n return self.cli_request(payload)", "def GetBuyRate(ticker):\r\n if ticker[\"result\"][\"Ask\"] / ticker[\"result\"][\"Last\"] >= BID_COEFFICIENT:\r\n rate = ticker[\"result\"][\"Last\"] * BID_COEFFICIENT\r\n else:\r\n rate = ticker[\"result\"][\"Ask\"]\r\n return rate", "def withdraw(self, currency, amount, address):\n return self.__call__('balance', 'withdrawcurrency',\n {\"currencyname\": currency, \n \"quantity\": amount, \n \"address\": address})", "def quota_get(self, context, project_id, resource_name):", "def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used", "def progress_get_quota(self):\n\t\t\n\t\treturn self.quota", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def getUserCurrency():", "def get_usdt_amount(self):\n spot_json = self._request_without_params(GET, SPOT_ACCOUNT_INFO)\n amount = 0\n for item in spot_json:\n if item['currency'] == str.upper(\"usdt\"):\n amount = item['available']\n return float(amount)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parent get sub user depoist history.
def get_sub_user_deposit_history(self, sub_uid: 'int', currency: 'str' = None, start_time: 'int' = None, end_time: 'int' = None, sort: 'str' = None, limit: 'int' = None, from_id: 'int' = None) -> DepositHistory: check_should_not_none(sub_uid, "sub_uid") params = { "subUid": sub_uid, "currency": currency, "startTime": start_time, "endTime": end_time, "sort": sort, "limit": limit, "fromId": from_id } from huobi.service.wallet.get_sub_user_deposit_history import GetSubUserDepositHistoryService return GetSubUserDepositHistoryService(params).request(**self.__kwargs)
[ "def get_subaccount_transfer_history(self, **params):\n return self._request_margin_api('get', 'sub-account/transfer/subUserHistory', True, data=params)", "def get_sub_account_transfer_history(self, **params):\n return self._request_withdraw_api('get', 'sub-account/transfer/history.html', True, data=params)", "def get_sub_account_transfer_history(self, **params):\n return self._request_margin_api('get', 'sub-account/sub/transfer/history', True, data=params)", "def get_subaccount_deposit_history(self, **params):\n return self._request_margin_api('get', 'capital/deposit/subHisrec', True, data=params)", "def last_history(self, user):\n return History.objects(user=user).order_by('-created_at').first()", "def getHistoryBookingsUser(self,userId):\n url = self.urlBookings + 'history?iduser=' + userId\n return ExecuteQuery().Query(url,'GET')", "def getUsernameHistory(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}/username-history\"\n r = requests.get(url)\n j = json.loads(r.text)\n data = j['data']\n return data", "def dl_history(self):\n return self._get_json('user/download-history')", "def get_user_purchase_history_admin(user_name, other_user_name):\n\n user_name = auth.get_username_from_hash(user_name)\n # user_handler.is_permitted_to_do(user_name, None, 1 << Action.USER_PURCHASE_HISTORY.value)\n # check if admin\n return purchase_handler.get_user_purchase_history(other_user_name)", "def CallHistory(self):", "def get_history(self):\n return self.history", "def _UpdateUserHistory(user, t, auth):\r\n ts = _history.get(user, [])[-30:]\r\n ts.append((t, auth))\r\n _history[user] = ts\r\n return ts", "def history():", "def _UpdateUserHistory(user, t, auth):\n ts = _history.get(user, [])[-30:]\n ts.append((t, auth))\n _history[user] = ts\n return ts", "def _get_history_control_owner(self):\n return self.__history_control_owner", "def history(self):\n\n \"\"\"Return history in reverse order.\"\"\"\n history = []\n main = {\n 'state_id': self.id,\n 'state_data': self,\n 'date_edited': None,\n }\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state_id': log.state.id,\n 'state_data': log.state,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=self.id\n ).order_by('-id').first()\n\n if log:\n main = {\n 'state_id': log.state.id,\n 'state_data': log.state,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n\n while not done_searching:\n # if there is no parents, then break out immediately\n if (\n log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n break\n\n # initialize the tree to None everytime. If not new tree is found, then we will not iterate\n tree = None\n\n # Check if parent2 has any other parents or is the original import creation. Start with parent2\n # because parent2 will be the most recent import file.\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n\n if log.parent1:\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n elif log.parent1.name == 'System Match' and log.parent1.parent1.name == 'Import Creation' and log.parent1.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent1.parent2)\n history.append(record)\n record = record_dict(log.parent1.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, main", "def history():\n transactions = Transaction.query.filter(Transaction.user_id == session.get('user_id')).all()\n\n return render_template(\"history.html\", transactions=transactions)", "def get_user_purchases_history(user_name):\n user_name = auth.get_username_from_hash(user_name)\n return purchase_handler.get_user_purchase_history(user_name)\n # return user_handler.get_user_purchase_history(user_name)", "def get_history(self):\n return self.ringmaster.historyfile.getvalue()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an obstacle to the map
def add_obstacle(self, obstacle_to_add): if self.obstacles.size != 0: self.obstacles = np.hstack((self.obstacles, obstacle_to_add)) else: self.obstacles = np.array([obstacle_to_add])
[ "def add_obstacle(self, obstacle):\n color = [1.0, 1.0, 1.0]\n frame_name = \"obstacle{}\".format(len(self._obstacles))\n frame = self._add_polydata(obstacle.to_polydata(), frame_name, color)\n self._obstacles.append((obstacle, frame))\n self._update_moving_object(obstacle, frame)", "def ajout_Obstacle(self,obstacle):\n self._obstacles.append(obstacle)", "def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1", "def add_new_obstacle(self, obstacle):\n self.obstacle_list.append(obstacle)\n for edge in self.edge_list:\n if not self.check_collision(edge, [obstacle]):\n node1 = edge.start_node\n node2 = edge.end_node\n # seems a bit redundant to do it twice, but unsure if doing it once is a better way\n self.dist_dict[(node1, node2)] = float(\"inf\")\n self.dist_dict[(node2, node1)] = float(\"inf\")\n if node1.parent == node2:\n self.verify_orphan(node1)\n if node2.parent == node1:\n self.verify_orphan(node2)\n # TODO: if v_bot is in pi(v,u) then pi_bot = None (see line 6 of alg 11)", "def addObstacle(self, coord):\r\n \r\n self.lock.acquire()\r\n self._obstacles.append(coord)\r\n self.updated.set()\r\n self.changes.append('obstacles')\r\n self.lock.release()", "def add_obstacle(self, *points: Tuple[float, float]):\n self.obstacles.append(o.Obstacle(*points))", "def set_obstacle(self, pos: tuple):\n if self.within_map(pos):\n self.map[round(pos[0]), round(pos[1])] = OBSTACLE\n return True\n else:\n return False", "def add_obstacle(self, name, position, influence_radius):\n\t\tif len(self.obstacle_ids) == 0:\n\t\t\tself.obstacle_ids = np.array([name])\n\t\t\tself.obstacle_positions = np.array(position, dtype=np.float64)\n\t\t\tself.obstacle_rois = np.array([influence_radius], dtype=np.float64)\n\t\telse:\n\t\t\tself.obstacle_ids = np.vstack((self.obstacle_ids, name))\n\t\t\tself.obstacle_positions = np.vstack((self.obstacle_positions, position))\n\t\t\tself.obstacle_rois = np.vstack((self.obstacle_rois, influence_radius))", "def update_obstacle_location(self):\n\n # find the previous location of the obstacle\n old_y = self.map_obstacle.y\n old_x = self.map_obstacle.x\n\n # remove it from the main graph\n self.main_graph[old_y][old_x].contents.remove(self.map_obstacle)\n\n # get the latest location\n self.map_obstacle.update_location()\n (new_y, new_x) = (self.map_obstacle.y, self.map_obstacle.x)\n\n # add it back into the main graph\n self.main_graph[new_y][new_x].contents.add(self.map_obstacle)\n\n # update the map obstacle (not necessary, but it doesn't hurt)\n self.map_obstacle.y = new_y\n self.map_obstacle.x = new_x", "def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass", "def set_obstacle(self):\n self.state = self.Obstacle", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def cb_obstacle_waypoint(self, msg):\n #rospy.logdebug('WaypointUpdater::obstacle_cb %s', msg)\n # TODO: implement obstacle_cb(...) after traffic light detection is implemented", "def __init__(self, map_obstacle, main_graph):\n\n self.map_obstacle = map_obstacle\n self.main_graph = main_graph\n\n self.sight_range = self.calculate_sight_range()\n\n self.top_left_y = None\n self.top_left_x = None\n self.bottom_right_y = None\n self.bottom_right_x = None\n self.height = None\n self.width = None\n self.size = self.calculate_size()\n\n # nodes specific to this threat zone\n self.nodes = []", "def addObstacle2D(self, obstacle3d, nObstacleId):\n if (None != self.aObstacles2D.pop(nObstacleId, None)):\n logger.info(\"INF addObstacle2D, object with same id (%s) already presented in dictionary, overwriting it\" % nObstacleId)\n\n polygon2D = obstacle3d.get2DPolygon()\n if polygon2D is None:\n logger.info(\"INF addObstacle2D: obstacle not crossing the 2D plane\")\n return\n self.aObstacles2D[nObstacleId] = Obstacle2D(polygon2D, rProbability=obstacle3d._rProbability, rTimestamp=obstacle3d._rTimestamp)", "def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)", "def add_dynamic_obstacle(self, pos, radius):\n self.space.add_dynamic_obstacle(pos, radius)\n if self.goal is not None : self.set_goal(self.goal.pos)", "def placeObstacle(self, point, color):\r\n\t\tnode = self.getNodeFromPoint(point)\r\n\r\n\t\t# If the node is not already an obstacle, make it one\r\n\t\tif node.isWalkable:\r\n\t\t\t# Indicate that this node cannot be traversed\r\n\t\t\tnode.isWalkable = False\t\t\r\n\r\n\t\t\t# Set a specific color for this obstacle\r\n\t\t\tnode.color = color\r\n\t\t\tfor neighbor in node.neighbors:\r\n\t\t\t\tneighbor.neighbors.remove(node)\r\n\t\t\tnode.neighbors = []\r\n\t\t\tself.obstacles += [node]", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a waypoint to the drone
def add_waypoint(self, waypoint): self.drone.add_waypoint(waypoint)
[ "def waypoint_add_rel(self):\n pass", "def update(self, waypoint):\n self.waypoints.append(waypoint)", "def create_waypoint(self, waypoint):\n connection = self.__create_connection()\n try:\n waypoint_list = list(waypoint)\n key = self.__compound_key(waypoint)\n waypoint_list.insert(0, key)\n\n keyed_waypoint = tuple(waypoint_list)\n\n sql = ''' INSERT INTO waypoints(waypoint_id, x, y, z, distance, heading, visit_count)\n VALUES(?,?,?,?,?,?,?) '''\n cur = connection.cursor()\n cur.execute(sql, keyed_waypoint)\n connection.commit()\n cur.close()\n return\n except sqlite3.Error as e:\n print(e)\n finally:\n connection.close()", "def add_waypoints(self, waypoints):\n while len(waypoints) > 0:\n self.waypoints.append(waypoints.popleft())", "def add_to_path(self,path_name: str,waypoint_str: str):\n self.__paths[path_name].append(self.__way_points[waypoint_str])", "def set_waypoint(self, waypoint: str) -> None:\n self.set_mode('manual')\n data = dict(type='SetAiWaypoint')\n data['target'] = waypoint\n self._send(data).ack('AiWaypointSet')", "def addWaypoints(self, config):\n # TODO: Rename for clarity. Only used for init and goal\n e = \"Loop | f\"\n robot = self.ps.robot\n rank1 = robot.rankInConfiguration[\"talos/arm_left_4_joint\"]\n rank2 = robot.rankInConfiguration[\"talos/arm_right_4_joint\"]\n q = config[::]\n # move elbows\n q[rank1] = -1.7\n q[rank2] = -1.7\n # Project q on state 'free'\n res, wp, err = self.graph.generateTargetConfig(e, config, q)\n if res:\n # test collision for wp\n res, msg = robot.isConfigValid(wp)\n if res:\n # call steering method\n res, p, msg = self.ps.directPath(config, wp, True)\n self.erasePath(p)\n if res:\n # add node and edge\n self.ps.addConfigToRoadmap(wp)\n self.ps.addEdgeToRoadmap(config, wp, p, True)\n # store wp\n return wp\n return config", "def set_waypoint(self,name_str: str):\n self.__way_points[name_str] = WayPoint(self.__longitude,\n self.__latitude)", "def waypoint_callback(self, wp):\n if self.trajectory_constructed == False: \n NextwpPosition = np.array([wp.position.x, wp.position.y, wp.position.z])\n NextwpOrientation = np.array([wp.orientation.x, wp.orientation.y, wp.orientation.z, wp.orientation.w])\n self.pc_x, self.pc_y, self.pc_z, self.seg_times, self.traj_t0 = self.make_trajectory(NextwpPosition, NextwpOrientation) \n self.trajectory_constructed = True", "def create_waypoint(api_data):\r\n #print(api_data)\r\n messages=[]\r\n if 'Info' in api_data and api_data['Info']:\r\n messages.append(api_data['Info'])\r\n for message in api_data.get('Meddelanden', []):\r\n if message.get('Serviceanlaggningsnamn') == 'Sugtömning':\r\n date=message['Datum']\r\n status=\"Trasig\" if message.get('Fungerande')==False else \"Funkar\"\r\n text=message['Meddelande']\r\n messages.append(', '.join([date, status, text]))\r\n messages='\\n'.join(messages)\r\n waypoint = gpxpy.gpx.GPXWaypoint(\r\n latitude=api_data['Latitud'],\r\n longitude=api_data['Longitud'],\r\n name=api_data['Hamn'],\r\n symbol=\"Service-Pump-Out-Facility\",\r\n type='WPT',\r\n description=messages\r\n )\r\n return waypoint", "def waypoint_transition(self):\n print(\"waypoint transition. next = \", self.target_position)\n\n if self.target_position is None:\n print(\"no next waypoint. landing\")\n self.landing_transition()\n else:\n print(\"travelling to: \", self.target_position)\n self.cmd_position(self.target_position[0], self.target_position[1], self.target_position[2], 0.0)\n self.flight_state = States.WAYPOINT", "def do_send_waypoint(self, args):\n self.send_waypoint()\n return", "def addPoint(self, nextPoint):\n NewPath = Path(Point(0,0))\n NewPath.head = PathNode.fromNode(self.head)\n NewPath.last = PathNode.fromNode(self.last)\n NewPath.distance = self.distance\n NewPath.size = self.size\n NewPath.degreesTurned = self.degreesTurned\n nextNode = PathNode(nextPoint)\n NewPath.append(nextPoint)\n NewPath.last = nextNode\n NewPath.distance += ROBOT_TRAVEL_DISTANCE\n NewPath.size += 1\n \n return NewPath", "def add_turn_point(self, dir_x, dir_y):\n\n self.turn_points.append(dict(x=self.head.x, y=self.head.y, dir_x=dir_x, dir_y=dir_y,\n from_dir_x=self.head.dir_x, from_dir_y=self.head.dir_y))\n self.head.turn(dir_x, dir_y)", "def onAddButtonPress(self, button):\n\t\twp_x = float(self.traj_to_x_entry.get_text())\n\t\twp_y = float(self.traj_to_y_entry.get_text())\n\t\twp_z = float(self.traj_to_z_entry.get_text())\n\t\twp_yaw = float(self.traj_to_yaw_entry.get_text())\n\n\t\t# add waypoint to list\n\t\twaypoints_gui.append([wp_x, wp_y, wp_z, wp_yaw])\n\n\t\t# reset entry fields\n\t\tself.traj_to_x_entry.set_text('')\n\t\tself.traj_to_y_entry.set_text('')\n\t\tself.traj_to_z_entry.set_text('')\n\t\tself.traj_to_yaw_entry.set_text('')", "def add_traffic_light(self, x, y, direction, turn=''):\n traffic_light = Traffic_light(self.id, self, direction, turn)\n self.grid.place_agent(traffic_light, (x, y))\n self.schedule.add(traffic_light)\n self.id += 1\n self.traffic_lights.append(traffic_light)", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))", "def addWay(self,way):\n self.ways[way.osmId] = way", "def add_pathway(self, pathway):\n if pathway not in self._pathways:\n self._pathways.append(pathway)\n #fi" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the drone's location in the map
def set_drone_position(self, new_point): self.drone.set_drone_position(new_point)
[ "def set_location(self, x, y):\r\n self.__location = (x, y)", "def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']", "def set_new_location(self, xPos, yPos):", "def set_location(self, new_x, new_y):\n self.x = new_x\n self.y = new_y", "def setCoordinates(self):\n try:\n place, (lat, lng) = geocoders.Google().geocode(\"%s, %s, %s\" % \n (self.address_street, self.address_city, self.address_state))\n self.longitude = lng\n self.latitude = lat\n except: pass", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def __landmark_location( self, data ):\n\t\t\n\t\tself.landmarkLocation = data", "def set_location(self, location):\r\n self._location = location", "def set_onmap(self, mapping):\n mapping.set_bad_guy(self.y_pos, self.x_pos)", "def set_location(self,row,col):\n\n self.row=row\n self.col=col", "def set_location_coord(self, longitude, latitude):\n self._msis.set_location_coord(longitude, latitude)\n self._clear_cache()", "def set_geo(self, **kwargs):\n self.lat = kwargs['lat']\n self.lon = kwargs['lon']", "def set_geo(self, lon: float, lat: float, alt: float) -> None:\n self.lon = lon\n self.lat = lat\n self.alt = alt", "def __ball_location( self, data ):\n\t\t\n\t\tself.ballLocation = data", "def set_coords(self, data):\n try: # Upon setting coords before selecting a variable\n var = self.kwargs['Variables']\n except:\n var = None\n self.data = data\n self.coord_setter.set_coords(self.data)\n self.displayer.set_coords(self.data)\n self.describer.set_coords(self.data, var)\n self.fields.set_coords(self.data, var)", "def configure_location(self):\n # Set floor correctly\n self.floor.set(\"pos\", array_to_string(self.bottom_pos))", "def set_location_coords(self, container, row, col):\n return self.set_location_well(container, '{0}:{1}'.format(row, col))", "def setLoc(self, loc): # loc must be a tuple or list of x,y coordinates\r\n self.loc = loc\r\n self.rect[0] = self.loc[0]\r\n self.rect[1] = self.loc[1]", "def setLoc(self, loc): # loc must be a tuple or list of x,y coordinates\n self.loc = loc\n self.rect[0] = self.loc[0]\n self.rect[1] = self.loc[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the obstacles' positions within the map (should be called when map is refreshed to clean the array)
def reset_obstacles(self): self.obstacles = np.array([])
[ "def clearObstacles(self):\r\n \r\n self.lock.acquire()\r\n self._obstacles = []\r\n self.updated.set()\r\n self.changes.append('obstacles')\r\n self.lock.release()", "def reset_map(self):\n self.grid = np.zeros((self.width, self.height), np.uint8)", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def reset_map(self):\n self.map_buffer = ''\n self.elements = []\n self.load_map()", "def reset_map(self):\n self.x = None\n self.X = None\n self.y = None\n self.Y = None\n self.data = None\n self.sampling = None\n self.size = None", "def reset(self):\n\n #Create a grid of zeros\n self._grid = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n # _available_new_tiles will be refilled every 10 moves\n self._available_new_tiles = TOTAL_AVAILABLE_MOVES[:]\n for dummy_i in range(2):\n self.new_tile()\n self._game_over = False", "def specific_reset(self) -> None:\n\n # first, set agent xy and adjust its height\n self.agent.specific_reset()\n agent_pos = np.zeros(3)\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # second, reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=np.array([]), # no goal in gather task\n world=self.world,\n min_allowed_distance=self.obstacle_obstacle_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i, ob in enumerate(self.obstacles):\n ob.set_position(obs_init_pos[i])\n\n # finally, make all collected objects visible again\n [ob.update_visuals(make_visible=True) for ob in self.obstacles]", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\r\n self._initial_grid = [[0 for dummy_col in range(self.get_grid_width())] for dummy_row in range(self.get_grid_height()) ]\r\n for dummy_idx in range(2):\r\n self.new_tile()", "def discretize_positions():\n\trobot_position[0] = int( round( robot_pos_cm[0] / square_size ) )\n\trobot_position[1] = int( round( robot_pos_cm[1] / square_size ) )\n\n\tfor i in range( len(obst_pos_cm) ):\n\t\tobstacles_positions[i][0] = int( round( obst_pos_cm[i][0] / square_size ) )\n\t\tobstacles_positions[i][1] = int( round( obst_pos_cm[i][1] / square_size ) )", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def reset(self):\n self._grid = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()", "def initObstacles(self):\n for obstacle in self.obstacles:\n if type(obstacle.movement) == type([]) and abs(obstacle.movement[TYPE]) != ROTATIONAL:\n obstacle.movementData.assignLinearVelocity(obstacle.movement)\n if obstacle.growth != NONE: obstacle.growthTicks = 0", "def reset(self):\n self._grid = [[0 for dummy_col in range(self.get_grid_width())]\n for dummy_row in range(self.get_grid_height())]\n for dummy in range(2):\n self.new_tile()", "def resetNeighborhood(self, sol):\n if sol not in self.pareto:\n return\n self.pareto[sol] = 1", "def reset(self) :\n for i in range(len(self.playerCellList)) :\n for j in range(len(self.playerCellList[i])) :\n self.playerCellList[i][j].hasPawn = False", "def clear_map(self):\n self.mapData = []", "def clear_map(self):\n self.canvas.delete(\"all\")\n self.draw_map()\n self.car = self.build_car(self.car_pos, 12)", "def clear_map(self):\n self.rooms = []\n\n self.dungeon.clear_dungeon()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate possible paths around the passed obstacle
def generate_possible_paths(self, obstacle): if self.does_uav_intersect_obstacle_vertically(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()): if self.does_path_intersect_obstacle_2d(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()): new_attempt_pos_points = [ [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]], [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]], [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]], [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]], [obstacle.get_point()[0], obstacle.get_point()[1] + obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)], [obstacle.get_point()[0], obstacle.get_point()[1] - obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)], [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)], [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)] ] new_paths = [] for new_pos_point in new_attempt_pos_points: if not self.does_path_intersect_obstacle_3d(obstacle, self.drone.get_point(), new_pos_point) and self.flight_boundary.is_point_in_bounds(new_pos_point): for recursive_new_pos_point in new_attempt_pos_points: if self.flight_boundary.is_point_in_bounds(recursive_new_pos_point) and abs(recursive_new_pos_point[2] - new_pos_point[2]) < 5: if recursive_new_pos_point[0] != new_pos_point[0] or recursive_new_pos_point[1] != new_pos_point[1]: if not self.does_path_intersect_obstacle_3d(obstacle, new_pos_point, recursive_new_pos_point) and not self.does_path_intersect_obstacle_3d(obstacle, recursive_new_pos_point, self.drone.get_waypoint_holder().get_current_waypoint()): new_paths.append([new_pos_point, recursive_new_pos_point]) # Uncomment for DEBUGGING ONLY for path in new_paths: print("Point:", str(path)) return new_paths return []
[ "def find_path(self, start_point: Pos, end_point: Pos, obstacles: list) -> list:\n pass", "def get_all_paths(grid):\n\n print(\"Calculating all possible paths ... \")\n all_paths = {}\n\n origins = init_origins(grid) # all not wall and obstacle cells\n destinations = init_destinations(grid) # exits, desks, shelves, and helpDesks\n\n for d in destinations:\n for o in origins:\n path = a_star_search(grid, o, d)\n all_paths[(o, d)] = path\n\n print('All paths have been calculated!')\n return all_paths", "def find_paths(self, mover, sx: int, sy: int) -> List[Path]:\n t0 = time.time()\n self.nodes[sx][sy].cost = 0\n self.nodes[sx][sy].depth = 0\n self.nodes[sx][sy].moves = 0\n self.closed.clear()\n self.open.clear()\n self.open.append(self.nodes[sx][sy]) # Start with starting node.\n\n while len(self.open) > 0:\n current = self.get_first_in_open()\n self.remove_from_open(current)\n self.add_to_closed(current)\n if current.moves == self.max_search_distance:\n continue\n if current.moves >= self.max_search_distance:\n current.parent = None\n continue\n for x in range(-1, 2):\n for y in range(-1, 2):\n if x == 0 and y == 0:\n continue\n\n xp = x + current.x\n yp = y + current.y\n\n if self.is_valid_location(mover, sx, sy, xp, yp):\n mover.cur_depth = current.depth\n next_step_cost = 1.0 - (1.0 - current.cost) * (1.0 - self.get_cost(mover, current.x, current.y, xp, yp))\n next_step_moves = current.moves + self.get_moves(mover, current.x, current.y, xp, yp)\n neighbour = self.nodes[xp][yp]\n self.tile_map.path_finder_visited(xp, yp)\n\n if next_step_cost < neighbour.cost:\n if self.in_open_list(neighbour):\n self.remove_from_open(neighbour)\n\n if self.in_closed_list(neighbour):\n self.remove_from_closed(neighbour)\n if (not self.in_open_list(neighbour)) and (not self.in_closed_list(neighbour)):\n neighbour.cost = next_step_cost\n neighbour.moves = next_step_moves\n neighbour.parent = current\n self.add_to_open(neighbour)\n\n paths = self.create_paths(sx, sy)\n return paths", "def get_path(self, start_idx, goal_idx):\n goal_found = False\n occ_grid = self.occ_grid\n open_list = OpenList()\n\n # get number of rows ni (x) and number of columns nj (y)\n ni, nj = occ_grid.num_idx\n path = []\n\n #print('Map rows, columns: {}'.format((ni, nj)))\n \n # resets h-cost, g-cost, update and occ for all cells\n for i in xrange(ni):\n for j in xrange(nj):\n occ_grid.idx2cell((i,j)).reset_for_planner(goal_idx)\n # !(settled) use occ_grid.idx2cell() and the cell's reset_for_planner()\n # pass\n \n # put start cell into open list\n \n start_cell = occ_grid.idx2cell(start_idx)\n start_cell.set_g_cost(Distance(0, 0))\n open_list.add(start_cell)\n #print('start_cell: ')\n #print(start_cell)\n \n # !(settled) get the start cell from start_idx\n # !(settled) set the start cell distance using set_g_cost and Distance(0, 0)\n # !(settled) add the cell to open_list\n # m = 0\n # now we non-recursively search the map\n while open_list.not_empty():\n # m += 1\n # if m == 1000: \n # print(open_list)\n # raise Exception()\n #print(open_list)\n #print(\"bp0\")\n cell = open_list.remove()\n #print (cell)\n # skip if already visited, bcos a cheaper path was already found\n if cell.visited: \n continue\n \n cell.visited = True \n #print(\"bp1\") \n # !(settled) set the cell as visiteds\n \n if goal_found:\n cell = occ_grid.idx2cell(goal_idx)\n\n # goal\n if cell.idx == goal_idx: \n while True:\n path.append(cell.idx)\n #print(\"bp1.5\")\n cell = cell.parent\n #print(cell)\n if cell == None:\n #print(\"bp2\")\n break\n # !(settled) append the cell.idx onto path\n # !(settled) let cell = cell's parent\n # !(settled) if cell is None, break out of the while loop\n #pass\n break # breaks out of the loop: while open_list.not_empty()\n \n # if not goal or not visited, we try to add free neighbour cells into the open list\n #print(\"bp2.5\")\n for nb_cell in self.get_free_neighbors(cell):\n change_g = Distance.from_separation(cell.idx, nb_cell.idx)\n \n\n if nb_cell.is_inflation():\n change_g = Distance(change_g.axes[0]*30, change_g.axes[1]*30)\n \n tent_g_cost = cell.g_cost + change_g\n #print(\"bp3\")\n #print (cell)\n if tent_g_cost < nb_cell.g_cost:\n nb_cell.set_g_cost(tent_g_cost)\n nb_cell.parent = cell\n open_list.add(nb_cell)\n #print(\"bp4\")\n idx = nb_cell.idx; ni = idx[0]; nj = idx[1]\n if (ni == goal_idx[0] and nj == goal_idx[1]):\n goal_found = True\n break\n # !(unsettled) calculate the tentative g cost of getting from current cell (cell) to neighbouring cell (nb_cell)...\n # !(unsettled) use cell.g_cost and Distance.from_separation()\n # !(settled) if the tentative g cost is less than the nb_cell.g_cost, ...\n # !(settled) 1. assign the tentative g cost to nb_cell's g cost using set_g_cost\n # !(settled) 2. set the nb_cell parent as cell\n # !(settled) 3. add the nb_cell to the open list using open_list.add()\n #pass\n \n return path", "def calc_hybrid_astar_path(sx, sy, syaw, gx, gy, gyaw, ox, oy, xyreso, yawreso, obreso):\n\n syaw, gyaw = reeds_shepp.pi_2_pi(syaw), reeds_shepp.pi_2_pi(gyaw)\n\n c = calc_config(ox, oy, xyreso, yawreso, obreso)\n data = []\n for i in zip(ox, oy):\n data.append({0: i[0], 1: i[1]})\n root = kdtree.create(data, dimensions=2)\n\n obmap, gkdtree = calc_obstacle_map(ox, oy, c)\n # obmap = loaddata('obmap.pkl')\n # gkdtree = loaddata('gkdtree.pkl')\n\n nstart = Node(round(sx/xyreso), round(sy/xyreso), round(syaw/yawreso), True, [sx], [sy], [syaw], 0.0, 0.0, -1)\n ngoal = Node(round(gx/xyreso), round(gy/xyreso), round(gyaw/yawreso), True, [gx], [gy], [gyaw], 0.0, 0.0, -1)\n\n if USE_HOLONOMIC_WITH_OBSTACLE_HEURISTIC:\n h_dp = calc_holonomic_with_obstacle_heuristic(ngoal, ox, oy, xyreso)\n # h_dp = loaddata('h_dp.pkl')\n else:\n h_dp = []\n\n if USE_NONHOLONOMIC_WITHOUT_OBSTACLE_HEURISTIC:\n h_rs = calc_nonholonomic_without_obstacle_heuristic(ngoal, c)\n else:\n h_rs = []\n\n open, closed = {}, {}\n open[calc_index(nstart, c)] = nstart\n pq = queue.PriorityQueue()\n pq.put((calc_cost(nstart, h_rs, h_dp, ngoal, c), calc_index(nstart, c)))\n\n u, d = calc_motion_inputs()\n nmotion = len(u)\n\n while True:\n if len(open) == 0:\n print(\"Error: Cannot find path, No open set\")\n return None, None, None\n\n c_v, c_id = pq.get()\n current = open[c_id]\n\n isupdated, current = update_node_with_analystic_expantion(current, ngoal, obmap, c, root, ox, oy)\n\n if isupdated:\n closed[calc_index(ngoal, c)] = current\n break #goal\n\n #move current node from open to closed\n open.pop(c_id)\n closed[c_id] = current\n\n for i in range(nmotion):\n node = calc_next_node(current, c_id, u[i], d[i], c, gkdtree)\n\n if not verify_index(node, obmap, c, root, ox, oy):\n continue\n\n node_ind = calc_index(node, c)\n\n # If it is already in the closed set, skip it\n if node_ind in closed.keys():\n continue\n\n if node_ind not in open.keys():\n open[node_ind] = node\n pq.put((calc_cost(node, h_rs, h_dp, ngoal, c), calc_index(node, c)))\n\n # closed = loaddata('closed.pkl')\n rx, ry, ryaw = get_final_path(closed, ngoal, nstart, c)\n\n return rx, ry, ryaw", "def construct_path(aux_structures, node_ids): \n nodes, ways, max_speed_dic = aux_structures\n path = []\n for id_ in node_ids:\n path.append(map_node_id_to_coordinates(aux_structures, id_))\n return path", "def allPaths(graph, start, end, maxTotalDist, maxDistOutdoors, path = []):\n\n path = path + [start]\n\n if start == end:\n totLength, outLength = pathLength(graph, path)\n if (totLength <= maxTotalDist) and (outLength <= maxDistOutdoors):\n return [path]\n if not (graph.hasNode(start)):\n return []\n paths = []\n for node in graph.childrenOf(start):\n if node[0] not in path:\n #print \"current path \" + str(path)\n extended_paths = allPaths(graph, node[0], end, maxTotalDist, maxDistOutdoors, path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def bfs(obstacles, start, end_coords):\n\n\n\n seen = set([start])\n queue = deque([[start]])\n end_coords = set(end_coords)\n paths = {}\n\n\n\n while queue and end_coords:\n path = queue.popleft()\n current = path[-1]\n\n # Found an objective, yield objective and path\n if current in end_coords:\n end_coords.remove(current)\n paths[current] = path\n\n # Try each direction\n for coord in [tuple(map(add, current, i)) for i in MOVE_COORDS]:\n if not (0 <= coord[0] < FIELD_HEIGHT and 0 <= coord[1] < FIELD_WIDTH):\n continue\n if coord in seen or coord in obstacles:\n continue\n\n queue.append(path + [coord])\n seen.add(coord)\n\n\n return paths", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def walk(self) -> List[Tuple[Cell, Direction]]:\n start_cell = self.get_random_unvisited_cell()\n visits = {}\n cell = start_cell\n\n while True:\n neighbor = random.choice(list(self.maze.neighbors(cell))) # pick a random neighbor\n direction = Direction.between(cell, neighbor)\n visits[cell] = direction\n if neighbor in self.included_cells:\n break\n cell = neighbor\n\n path = []\n cell = start_cell\n while cell in visits:\n direction = visits[cell]\n path.append((cell, direction))\n cell = self.maze.neighbor(cell, direction)\n return path", "def find_good_paths(self):\n return self.robot_step((0,0),[])", "def find_path(self,enemy_type):\n visited = {self.destination:()} #Stores already visited positions on the terrain.\n\n #distance_from_end = distance from the closest terrain vertex.\n distance_from_end = min([self.destination[0],self.destination[1],\n len(self.terrain)-self.destination[0],len(self.terrain[1])-self.destination[1]])\n start_node = (self.destination[0],self.destination[1],0,distance_from_end+0) #Creates start node.\n\n processing_queue = [start_node] #Priority queue containing all nodes that should be processed.\n\n while processing_queue:\n #Sorts the processing_queue so nodes with lowest heuristic values are at the end.\n processing_queue.sort(key=itemgetter(3))\n processing_queue.reverse()\n\n current_node = processing_queue.pop() #Takes the node with lowest heuristic value from the queue.\n\n if ((current_node[0] == 0 or current_node[0] == len(self.terrain)-1) and\n (current_node[1] == 0 or current_node[1] == len(self.terrain[0])-1)):\n #Current node is at the vertex of the terrain, what means that goal is reached.\n #Pathfinding cycle will be ended because it is no longer required.\n break\n\n #Tuple of neighbouring nodes is created.\n neighbouring_nodes = ((current_node[0]-1,current_node[1]),(current_node[0]+1,current_node[1]),\n (current_node[0],current_node[1]-1),(current_node[0],current_node[1]+1))\n\n for node in neighbouring_nodes: #Iterates through all neighbouring nodes.\n if not node in visited: #Checks if the node hasn't been already visited\n #Checks if the node position can be passed by that enemy type\n if self.terrain_encoding[self.terrain[node[0]][node[1]]] in self.ENEMY_TYPES[enemy_type][\"Can_cross\"]:\n #Obtains distance of the new node from the closest terrain vertex.\n distance_from_end = min([node[0],node[1],\n len(self.terrain)-node[0],len(self.terrain[1])-node[1]])\n \n #Creates new node for processing_queue.\n new_node = (node[0],node[1],current_node[2]+1,distance_from_end+current_node[2]+1)\n\n visited[node] = (current_node[0],current_node[1])\n processing_queue.append(new_node)\n\n #Creates the path itself.\n current_node = (current_node[0],current_node[1])\n path = []\n\n while current_node: #While the destination hasn't been reached.\n path.append(current_node) #Adds current_node to the path.\n current_node = visited[current_node] #Selects next node on the way to the destination.\n\n return path", "def _get_path(self, pos, controls):\n \n path = []\n\n for phi, m, steps in controls:\n for _ in range(steps):\n car_state = self.get_car_state(pos, phi)\n path.append(car_state)\n pos = self.step(pos, phi, m)\n \n car_state = self.get_car_state(pos, phi)\n path.append(car_state)\n\n return path", "def traverse_path(self, right, down):\n objects_along_path = []\n number_of_iterations = ceil(len(self)/down)\n for n in range(1, number_of_iterations):\n objects_along_path.append(\n toboggan_map.get_object(right * n + 1, down * n + 1)\n )\n return objects_along_path", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def test_multiple_working_paths(self):\n # (1) A 5x5 dungeon\n rooms = list(range(25))\n hallways = [(22, 23), (5, 6), (15, 20), (23, 22), (9, 8), (2, 1), (10, 15), (8, 9), (1, 6), (16, 21), (21, 16),\n (18, 19), (9, 4), (14, 9), (1, 2), (18, 23), (21, 20), (19, 18), (4, 9), (20, 21), (19, 14), (5, 0),\n (16, 15), (3, 4), (24, 23), (22, 21), (3, 2), (24, 19), (9, 14), (14, 19), (15, 10), (19, 24),\n (0, 5), (2, 3), (1, 0), (23, 24), (6, 5), (0, 1), (10, 5), (8, 3), (5, 10), (21, 22), (6, 1),\n (3, 8), (4, 3), (20, 15), (15, 16), (23, 18)]\n dungeon: Dungeon = Dungeon(rooms, hallways)\n\n \"\"\"\n Dungeon looks like:\n \n -----\n --W--\n -W-W-\n --W--\n -----\n \n where \"-\" is an empty room (connected vertically and horizontally NOT diagonally to other available rooms) \n and where the \"W\" is a locked room (unavailable from all sides)\n \n The S is where the start point is, the E is where the endpoint is (these are a \"-\").\n \"\"\"\n\n start_room = 0\n end_room = 24\n stamina_limit = 100\n\n path, student_stamina = dungeon_escape(dungeon, start_room, end_room, stamina_limit)\n self.path_checker(dungeon, path, stamina_limit, start_room, end_room, student_stamina)\n\n # (2) A 10x10 dungeon\n rooms = list(range(60))\n hallways = [(24, 30), (54, 55), (55, 54), (35, 41), (32, 31), (52, 58), (11, 5), (44, 45), (52, 53), (57, 56),\n (47, 53), (53, 59), (12, 6), (54, 48), (17, 23), (27, 28), (4, 10), (3, 2), (42, 36), (5, 11), (4, 5),\n (6, 0), (14, 15), (49, 48), (15, 14), (55, 56), (0, 1), (21, 15), (56, 55), (27, 21), (2, 1), (29, 23),\n (45, 44), (27, 33), (11, 10), (24, 18), (58, 59), (55, 49), (12, 13), (21, 27), (53, 52), (17, 11),\n (28, 27), (5, 4), (10, 4), (12, 18), (1, 0), (42, 48), (57, 58), (48, 54), (49, 55), (30, 36),\n (36, 42), (28, 29), (33, 27), (30, 31), (35, 29), (31, 30), (36, 30), (32, 33), (13, 12), (18, 12),\n (23, 17), (41, 47), (30, 24), (59, 53), (2, 3), (48, 42), (15, 21), (0, 6), (3, 4), (29, 28), (10, 11),\n (14, 13), (29, 35), (33, 32), (31, 32), (1, 2), (56, 57), (47, 41), (59, 58), (18, 24), (23, 29),\n (41, 35), (6, 12), (58, 57), (48, 49), (11, 17), (53, 47), (58, 52), (13, 14), (4, 3)]\n dungeon: Dungeon = Dungeon(rooms, hallways)\n\n \"\"\"\n Dungeon looks like:\n \n S-----\n -WWW--\n ----W-\n -WW-W-\n -WW---\n ----W-\n -WWWW-\n -W--W-\n --WWE-\n ------\n \n Where \"-\" is an empty room (connected vertically and horizontally NOT diagonally to other available rooms) \n and where the \"W\" is a locked room (unavailable from all sides)\n \n The S is where the start point is, the E is where the endpoint is (these are a \"-\").\n \"\"\"\n\n start_room = 0\n end_room = 53\n stamina_limit = 100\n\n path, student_stamina = dungeon_escape(dungeon, start_room, end_room, stamina_limit)\n self.path_checker(dungeon, path, stamina_limit, start_room, end_room, student_stamina)\n\n # (3) A 25x15 dungeon\n rooms = list(range(375))\n hallways = [(31, 6), (125, 126), (156, 157), (356, 357), (257, 232), (18, 19), (222, 223), (206, 181), (155, 130),\n (205, 180), (264, 239), (336, 337), (113, 114), (77, 76), (213, 212), (144, 145), (299, 298),\n (186, 161), (332, 333), (350, 325), (138, 163), (3, 2), (130, 105), (4, 5), (229, 204), (197, 172),\n (46, 45), (98, 97), (105, 106), (55, 56), (107, 108), (180, 155), (371, 372), (132, 157), (33, 34),\n (273, 274), (181, 182), (109, 110), (211, 186), (272, 297), (361, 336), (169, 144), (42, 67),\n (215, 190), (23, 24), (274, 249), (325, 326), (254, 253), (258, 257), (172, 147), (211, 236),\n (19, 44), (230, 205), (366, 341), (203, 178), (29, 4), (111, 110), (21, 46), (211, 210), (19, 18),\n (58, 59), (235, 260), (198, 199), (185, 210), (20, 21), (64, 39), (332, 357), (237, 262), (31, 56),\n (336, 335), (144, 143), (367, 368), (10, 9), (251, 250), (63, 38), (225, 250), (195, 170), (329, 330),\n (1, 0), (255, 254), (187, 162), (139, 140), (229, 254), (324, 349), (231, 206), (199, 174), (205, 206),\n (345, 344), (335, 360), (143, 168), (132, 131), (182, 157), (248, 247), (124, 123), (34, 59),\n (100, 75), (357, 356), (35, 34), (251, 276), (182, 183), (214, 215), (42, 43), (228, 229), (160, 185),\n (36, 37), (265, 264), (190, 215), (78, 77), (253, 278), (33, 8), (302, 303), (354, 355), (352, 351),\n (160, 159), (303, 302), (209, 208), (351, 352), (337, 336), (26, 25), (230, 229), (305, 306),\n (212, 187), (235, 234), (363, 362), (164, 189), (148, 123), (299, 324), (209, 234), (337, 362),\n (230, 255), (30, 5), (167, 168), (231, 256), (39, 64), (157, 158), (8, 33), (334, 359), (204, 179),\n (140, 115), (156, 181), (18, 17), (209, 184), (337, 312), (145, 120), (297, 298), (232, 231), (8, 7),\n (108, 107), (148, 147), (253, 228), (61, 36), (154, 179), (113, 112), (18, 43), (53, 78), (139, 164),\n (336, 361), (144, 169), (231, 230), (150, 175), (278, 303), (300, 325), (205, 230), (188, 187),\n (374, 349), (3, 4), (335, 336), (132, 107), (143, 144), (71, 72), (138, 139), (50, 25), (5, 6),\n (321, 346), (105, 104), (131, 156), (256, 255), (46, 21), (33, 32), (223, 222), (273, 272), (52, 77),\n (129, 130), (181, 180), (331, 330), (107, 132), (109, 108), (197, 222), (247, 272), (305, 330),\n (364, 365), (273, 298), (265, 240), (164, 163), (214, 189), (216, 215), (24, 23), (299, 274),\n (354, 329), (68, 69), (28, 3), (220, 195), (334, 335), (204, 203), (308, 307), (123, 98), (211, 212),\n (19, 20), (232, 207), (58, 57), (192, 193), (320, 321), (97, 98), (198, 197), (154, 155), (358, 359),\n (138, 113), (21, 22), (198, 223), (185, 186), (62, 37), (159, 134), (123, 148), (189, 190), (348, 349),\n (190, 189), (322, 321), (350, 351), (110, 111), (270, 245), (198, 173), (333, 308), (177, 178),\n (369, 370), (205, 204), (96, 97), (6, 7), (183, 208), (311, 336), (122, 123), (52, 53), (181, 156),\n (7, 6), (199, 198), (96, 71), (173, 198), (248, 249), (345, 320), (130, 155), (35, 36), (182, 181),\n (214, 213), (266, 291), (42, 41), (259, 258), (248, 223), (322, 297), (260, 261), (161, 186),\n (325, 300), (357, 332), (35, 10), (110, 135), (226, 225), (354, 353), (361, 362), (37, 38), (169, 170),\n (174, 173), (114, 115), (305, 304), (173, 148), (183, 184), (235, 236), (311, 312), (363, 364),\n (256, 257), (175, 150), (303, 278), (39, 14), (143, 118), (26, 1), (173, 174), (373, 374), (24, 49),\n (208, 183), (240, 215), (56, 55), (234, 259), (263, 264), (157, 156), (99, 98), (106, 107), (22, 23),\n (9, 34), (215, 214), (297, 296), (30, 55), (23, 22), (300, 275), (60, 35), (157, 182), (232, 233),\n (8, 9), (189, 214), (275, 300), (65, 40), (358, 333), (97, 72), (367, 366), (145, 144), (65, 66),\n (149, 174), (203, 202), (11, 10), (234, 209), (362, 337), (177, 202), (236, 237), (12, 13), (145, 170),\n (192, 167), (159, 184), (319, 344), (359, 360), (138, 137), (374, 373), (5, 4), (55, 30), (319, 294),\n (71, 96), (72, 97), (177, 152), (369, 344), (40, 39), (72, 71), (43, 18), (50, 75), (250, 275),\n (331, 332), (212, 237), (38, 39), (333, 334), (39, 38), (240, 239), (162, 187), (164, 139), (163, 162),\n (349, 348), (28, 29), (43, 42), (334, 333), (349, 374), (129, 104), (30, 31), (78, 53), (31, 30),\n (97, 96), (276, 251), (358, 357), (208, 209), (288, 289), (16, 17), (21, 20), (58, 33), (298, 273),\n (185, 184), (288, 263), (12, 37), (158, 133), (189, 188), (67, 66), (167, 192), (0, 25), (41, 66),\n (332, 307), (110, 109), (348, 373), (273, 248), (2, 3), (179, 204), (140, 139), (372, 371), (177, 176),\n (335, 334), (317, 292), (119, 120), (6, 5), (40, 15), (4, 29), (309, 334), (369, 368), (331, 356),\n (53, 54), (6, 31), (333, 358), (233, 208), (41, 16), (126, 101), (106, 131), (259, 260), (135, 136),\n (260, 235), (324, 299), (351, 326), (37, 36), (104, 129), (232, 257), (133, 134), (261, 262),\n (361, 360), (40, 65), (114, 113), (169, 168), (38, 13), (174, 149), (75, 50), (257, 258), (325, 350),\n (180, 205), (173, 172), (70, 71), (373, 372), (25, 50), (116, 117), (310, 335), (56, 57), (278, 253),\n (156, 155), (356, 355), (208, 207), (16, 15), (330, 355), (239, 240), (56, 31), (106, 105), (22, 21),\n (127, 102), (59, 58), (263, 288), (101, 102), (60, 61), (261, 236), (67, 42), (99, 74), (9, 10),\n (62, 63), (332, 331), (251, 226), (63, 62), (275, 276), (151, 152), (203, 204), (11, 12), (225, 226),\n (353, 354), (335, 310), (65, 64), (71, 46), (122, 147), (330, 305), (48, 49), (13, 14), (120, 145),\n (147, 122), (248, 273), (48, 23), (207, 232), (15, 40), (4, 3), (54, 29), (120, 119), (98, 123),\n (151, 126), (54, 55), (100, 101), (32, 57), (55, 54), (224, 249), (124, 99), (40, 41), (34, 35),\n (238, 239), (32, 31), (262, 237), (224, 223), (139, 114), (223, 224), (38, 37), (352, 353), (228, 253),\n (179, 178), (333, 332), (250, 251), (134, 135), (160, 161), (36, 61), (76, 77), (180, 181), (212, 213),\n (262, 263), (38, 63), (44, 45), (160, 135), (170, 145), (25, 26), (373, 348), (163, 164), (29, 30),\n (131, 106), (163, 138), (206, 231), (30, 29), (28, 53), (220, 245), (242, 243), (370, 371), (20, 19),\n (125, 100), (346, 371), (17, 18), (148, 173), (135, 110), (151, 176), (203, 228), (154, 129), (11, 36),\n (208, 233), (365, 366), (16, 41), (13, 38), (185, 160), (184, 183), (312, 311), (207, 208), (15, 16),\n (67, 68), (150, 151), (234, 235), (362, 363), (1, 26), (137, 162), (69, 70), (158, 183), (2, 1),\n (188, 163), (225, 200), (193, 168), (3, 28), (195, 220), (321, 296), (5, 30), (321, 322), (53, 52),\n (119, 144), (122, 97), (179, 154), (118, 143), (256, 231), (76, 101), (137, 112), (36, 35), (266, 265),\n (133, 132), (183, 158), (135, 160), (261, 260), (34, 9), (200, 225), (105, 130), (233, 258), (37, 12),\n (206, 207), (306, 307), (66, 67), (168, 167), (257, 256), (70, 69), (245, 270), (166, 167), (202, 177),\n (167, 166), (57, 58), (59, 60), (356, 331), (101, 100), (155, 154), (330, 331), (298, 299), (210, 185),\n (370, 345), (253, 254), (61, 62), (9, 8), (236, 211), (62, 61), (289, 264), (158, 159), (210, 211),\n (334, 309), (70, 45), (353, 352), (159, 158), (13, 12), (115, 114), (43, 68), (149, 148), (249, 274),\n (53, 28), (245, 220), (71, 70), (45, 70), (127, 152), (54, 53), (131, 130), (258, 233), (132, 133),\n (33, 58), (264, 289), (34, 33), (238, 237), (233, 234), (41, 42), (274, 273), (307, 332), (126, 127),\n (130, 131), (35, 60), (264, 263), (364, 363), (237, 212), (45, 20), (179, 180), (43, 44), (134, 133),\n (262, 261), (161, 162), (37, 62), (112, 113), (168, 143), (247, 248), (274, 299), (309, 310),\n (237, 238), (45, 46), (134, 159), (108, 133), (44, 69), (111, 136), (239, 264), (254, 229), (68, 67),\n (170, 195), (29, 28), (118, 119), (168, 193), (296, 321), (370, 369), (29, 54), (104, 105), (147, 172),\n (64, 63), (157, 132), (239, 238), (31, 32), (63, 64), (289, 288), (17, 16), (308, 333), (213, 238),\n (365, 364), (186, 187), (108, 109), (17, 42), (148, 149), (49, 74), (184, 185), (231, 232), (152, 127),\n (184, 159), (234, 233), (362, 361), (255, 230), (187, 186), (69, 68), (229, 230), (188, 189), (14, 39),\n (46, 71), (137, 138), (124, 149), (49, 24), (178, 179), (115, 140), (318, 319), (193, 192), (319, 318),\n (186, 211), (114, 139), (321, 320), (176, 177), (304, 305), (359, 334), (184, 209), (112, 137),\n (275, 250), (312, 337), (214, 239), (240, 265), (176, 151), (144, 119), (260, 259), (112, 111),\n (226, 251), (228, 203), (36, 11), (202, 203), (310, 311), (183, 182), (311, 310), (206, 205),\n (306, 305), (66, 65), (168, 169), (161, 136), (296, 297), (162, 163), (366, 367), (77, 52), (349, 324),\n (200, 175), (136, 111), (292, 317), (346, 345), (307, 306), (204, 205), (308, 309), (57, 56),\n (263, 262), (294, 319), (172, 173), (249, 248), (329, 304), (222, 197), (155, 156), (10, 35),\n (330, 329), (291, 292), (298, 297), (61, 60), (66, 41), (136, 161), (101, 76), (259, 234), (291, 266),\n (69, 44), (158, 157), (210, 209), (14, 15), (276, 275), (360, 359), (136, 135), (236, 235), (207, 206),\n (12, 11), (210, 235), (189, 164), (15, 14), (115, 116), (0, 1), (181, 206), (263, 238), (49, 48),\n (117, 118), (359, 358), (149, 124), (176, 175), (304, 303), (131, 132), (7, 8), (355, 354), (223, 198),\n (129, 154), (197, 198), (233, 232), (41, 40), (126, 125), (130, 129), (264, 265), (44, 19), (102, 77),\n (238, 213), (351, 350), (161, 160), (133, 158), (309, 308), (237, 236), (45, 44), (250, 225),\n (182, 207), (204, 229), (109, 134), (25, 0), (150, 125), (152, 151), (292, 291), (111, 112),\n (118, 117), (123, 122), (162, 137), (320, 345), (97, 122), (133, 108), (156, 131), (99, 124),\n (127, 126), (59, 34), (75, 76), (306, 331), (326, 325), (360, 335), (101, 126), (186, 185), (207, 182),\n (239, 214), (215, 216), (147, 148), (77, 78), (213, 214), (326, 351), (187, 188), (229, 228),\n (317, 318), (137, 136), (119, 118), (126, 151), (178, 177), (318, 317), (125, 150), (98, 99),\n (134, 109), (255, 256), (102, 101), (100, 125), (32, 33), (107, 106), (243, 242), (371, 370),\n (272, 273), (42, 17), (113, 138), (345, 370), (135, 134), (102, 127), (32, 7), (224, 199), (223, 248),\n (175, 176), (303, 304), (310, 309), (39, 40), (344, 319), (170, 169), (297, 322), (238, 263),\n (162, 161), (366, 365), (254, 255), (258, 259), (163, 188), (116, 115), (307, 308), (368, 369),\n (20, 45), (75, 100), (215, 240), (346, 321), (23, 48), (152, 177), (344, 369), (336, 311), (77, 102),\n (236, 261), (57, 32), (172, 197), (175, 200), (297, 272), (60, 59), (249, 224), (68, 43), (155, 180),\n (10, 11), (187, 212), (329, 354), (151, 150), (222, 247), (14, 13), (360, 361), (136, 137), (320, 319),\n (331, 306), (159, 160), (319, 320), (1, 2), (341, 366), (139, 138), (117, 116), (372, 373), (371, 346),\n (345, 346), (355, 356), (74, 99), (247, 222), (7, 32), (199, 224), (357, 358), (355, 330), (174, 199),\n (188, 213), (265, 266), (76, 75), (180, 179), (212, 211), (44, 43), (235, 210), (178, 203), (209, 210),\n (64, 65), (230, 231), (74, 49), (304, 329), (213, 188), (272, 247), (344, 345), (368, 367), (123, 124)]\n dungeon: Dungeon = Dungeon(rooms, hallways)\n\n \"\"\"\n The dungeon looks like:\n \n -------------------------\n --W-------------------W--\n -W---------------------W-\n ----WWWWWWWWWWWWWWWWW----\n --EW-----------------W---\n ---W------------WW---W---\n ---W-----------W-----W---\n ----------------W--W-W---\n -W---------------WWW-W---\n --W-------------W--W-W---\n --W--------------WWW-W---\n --W-WWWWWWWWW--W--W-W----\n -W-----------WW-W------W-\n --WW---------WWW-WW---W--\n ------------------------S\n \n Where \"-\" is an empty room (connected vertically and horizontally NOT diagonally to other available rooms) \n and where the \"W\" is a locked room (unavailable from all sides)\n \n The S is where the start point is, the E is where the endpoint is (these are a \"-\").\n \"\"\"\n\n start_room = 374\n end_room = 183\n stamina_limit = 500\n\n path, student_stamina = dungeon_escape(dungeon, start_room, end_room, stamina_limit)\n self.path_checker(dungeon, path, stamina_limit, start_room, end_room, student_stamina)", "def Dijkstra(grid, source, destination, obstacles):\r\n\r\n unvisited = copy.deepcopy(grid) # unvisited wordt een copy van het grid, wanneer een coordinaat is bezocht door het\r\n # algoritme wordt dit coordinaat uit unvisited gehaald.\r\n grid_Ext = copy.deepcopy(grid) # Dit wordt het grid, maar met extra informatie belangrijk voor het algoritme,\r\n # een coordinaat/vertex staat hier in de form:\r\n # [xCor, Ycor, DistFromSource, OptimalPrevVertex, [neighbourVertex[xCor, yCor, dist]], visited, obstacle]\r\n # Wanneer een coordinaat de extra informatie bevat, wordt het een vertex genoemd in de code\r\n sourceIndex = grid_Ext.index(source) # De index van het doel in grid_Ext.\r\n # Hieronder wordt de extra informatie toegevoegd aan elk coordinaat/vertex, waarbij:\r\n # DistFromSource=-1, OptimalPrevVertex=[-1,-1], De lijst van buren is leeg, visited=0, obstacle=0\r\n for vertex in grid_Ext:\r\n vertex.append(10000000000)\r\n vertex.append([-1,-1])\r\n vertex.append([])\r\n vertex.append(0)\r\n vertex.append(0)\r\n vertex[4] = neighboursCal(vertex, grid[-1][0], grid[-1][1])\r\n # print(grid[-1])\r\n # print(grid[-1][0])\r\n # print(grid[-1][1])\r\n\r\n # Zet waarde obstacle in alle obstacle vertexen op 1\r\n for obstacle in obstacles:\r\n obstacleVertex = FindVertexFromCor(grid_Ext, obstacle)\r\n grid_Ext[grid_Ext.index(obstacleVertex)][6] = 1\r\n\r\n grid_Ext[sourceIndex][2] = 0 # zet de afstand tot de bron van de bron op 0\r\n\r\n currentNeighbourIndex = 0 # De index van de buur waar de afstand tot de bron wordt berekend\r\n currenVertexCor = copy.deepcopy(source) # CurrenVertexCor is het coordinaat van de huidige vertex\r\n listUnVisitedNeighbours = []# Een lijst met coordinaten van buren met visited=0\r\n # dit wordt gebruikt tijdens het bepalen welke volgende vertex bezocht moet worden\r\n step = 0\r\n\r\n # Dit is het Algoritme\r\n while len(unvisited) != 0: # Het algorimte moet stoppen wanneer er geen onbezochte vertexen meer zijn.\r\n currenVertex = FindVertexFromCor(grid_Ext, currenVertexCor) # currenVertex: de vertex van het huidige coordinaat\r\n for neighbour in grid_Ext[grid_Ext.index(currenVertex)][4]: # For buren in de lijst met buren in currenVertex:\r\n currentNeighbourCor = [] # Wat is het coordinaat van de buur?\r\n currentNeighbourCor.append(neighbour[0])\r\n currentNeighbourCor.append(neighbour[1])\r\n\r\n currentNeighbour = FindVertexFromCor(grid_Ext, currentNeighbourCor) # Dit is de vertex van de buur\r\n\r\n currentNeighbourIndex = grid_Ext.index(currentNeighbour) # Wat is de index van de buur in grid_ext\r\n\r\n if grid_Ext[currentNeighbourIndex][5] == 1: # als de buur visited=1 heeft,\r\n continue # hoeft hij niet opnieuw berekend te worden\r\n\r\n newDistance = grid_Ext[grid_Ext.index(currenVertex)][2] + neighbour[2] # Het berekenen van de afstand van de buur\r\n # tot de bron via het huidige coordinaat\r\n if newDistance < grid_Ext[currentNeighbourIndex][2]: # Als deze afstand korter is dan de afstand bij de\r\n grid_Ext[currentNeighbourIndex][2] = newDistance # huidige kortste route, dan is deze route dus\r\n grid_Ext[currentNeighbourIndex][3] = currenVertexCor# sneller en moet dit de nieuwe korste route worden\r\n listUnVisitedNeighbours.append(currentNeighbourCor) # De huidige buur moet wel bij listUnVisitedNeighbours\r\n try:\r\n unvisited.remove(currenVertexCor) # Wanneer de afstand tot alle buren berekend is, is deze coordinaat bezocht\r\n except ValueError:\r\n print(\"Value Error!\\n{}\\nStep = {}\".format(currenVertexCor, step))\r\n print(\"Value? = {}\".format(unvisited[25]))\r\n print(currenVertexCor)\r\n print(\"step = \" + str(step))\r\n break\r\n grid_Ext[grid_Ext.index(currenVertex)][5] = 1 # Zet visited=1\r\n\r\n if (step % 20) == 0:\r\n print(\"Current step = \" + str(step))\r\n if currenVertexCor == destination:\r\n print(\"Destination reached\")\r\n print(\"End step = \" + str(step))\r\n break\r\n\r\n # Welke coordinaat moet nu worden bezocht? Dit wordt de coordinaat die het dichts bij de bron ligt, visited=0,\r\n shortestDistanceToSource = 10000 # En obstacle=0 heeft.Om er voor te zorgen dat niet alle afstanden\r\n for vertexxCor in listUnVisitedNeighbours: # van alle coordinaten worden berekend, wordt het alleen berekend\r\n vertexx = FindVertexFromCor(grid_Ext, vertexxCor) # uit de lijst listUnVisitedNeighbours\r\n if vertexx[5]== 1 or vertexx[6] == 1: # Als die al bezocht is of een obstacle is...\r\n listUnVisitedNeighbours.remove(vertexxCor)\r\n continue\r\n if grid_Ext[grid_Ext.index(vertexx)][2] < shortestDistanceToSource:\r\n shortestDistanceToSource = grid_Ext[grid_Ext.index(vertexx)][2]\r\n currenVertexCor = vertexxCor\r\n step += 1\r\n\r\n # Nu moet de route berekend worden\r\n route_step = 0\r\n route_currentVertexCor = copy.deepcopy(destination)\r\n route_routeCors = []\r\n while(True):\r\n route_routeCors.append(route_currentVertexCor)\r\n route_currentVertex = FindVertexFromCor(grid_Ext, route_currentVertexCor)\r\n route_currentVertexCor = route_currentVertex[3]\r\n if route_currentVertexCor == source:\r\n route_routeCors.append(route_currentVertexCor)\r\n break\r\n route_step += 1\r\n if route_step > 1000:\r\n print(\"Infinate loop? Something went wrong!\")\r\n break\r\n return route_routeCors", "def solution_path(self) -> list[State]:", "def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if the UAV intersects an obstacle on the verticle axis
def does_uav_intersect_obstacle_vertically(self, obstacle, drone_point, waypoint): if isinstance(obstacle, StationaryObstacle): if drone_point[2] < obstacle.height + Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS: return True return False
[ "def check_collision(self, auv_pos):\n for obs in self.obstacle_array:\n distance = self.calculate_range(auv_pos, obs)\n # obs[3] indicates the size of the obstacle\n if distance <= obs[3]:\n print(\"Hit an obstacle\")\n return True\n return False", "def intersects(self, x1, y1, x2, y2, ox1, oy1, ox2, oy2, oh):\n critical_x1 = max(x1, ox1)\n critical_x2 = min(x2, ox2)\n\n # Check if the agent is ahead of behind the obstacle at all times\n if critical_x1 > critical_x2:\n return False\n \n def trisect(a, l, r):\n if a < l: return -1\n if l <= a and a < r: return 0\n return 1\n\n def interpolate(x1, y1, x2, y2, x):\n if x1 == x2:\n assert x == x1, \"x1 == x2 ({}) but x is {}\".format(x1, x)\n return y1\n return y1 + (y2-y1) / (x2 - x1) * (x - x1)\n\n # compute positions of the agent and the obstacle at the critical points\n critical_y1 = interpolate(x1, y1, x2, y2, critical_x1)\n critical_oy1 = interpolate(ox1, oy1, ox2, oy2, critical_x1)\n critical_y2 = interpolate(x1, y1, x2, y2, critical_x2)\n critical_oy2 = interpolate(ox1, oy1, ox2, oy2, critical_x2)\n\n # If the agent stayed on one side of obstacle in these critical points,\n # it didn't collide.\n side1 = trisect(critical_y1, critical_oy1 - oh, critical_oy1)\n side2 = trisect(critical_y2, critical_oy2 - oh, critical_oy2)\n return abs(side1 + side2) != 2", "def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0", "def _in_obstacle(point, obstacle):\n y_positions = collections.deque(p.y - point.y for p in obstacle)\n colinear = False\n inside = False\n intersects = False\n previous_point = obstacle[-1]\n for current_point in obstacle:\n if previous_point.y - current_point.y != 0:\n # Parallel lines have zero or inifinte intersections.\n # Either way, do not count them.\n if (previous_point.y <= point.y <= current_point.y or\n previous_point.y >= point.y >= current_point.y):\n # One point above the ray and one point below the ray.\n if previous_point.x - current_point.x == 0:\n # Vertical line segment.\n x = current_point.x\n else:\n a = _calculate_slope(previous_point, current_point)\n b = _calculate_y_intercept(current_point, a)\n x = (point.y - b) / a\n if math.isclose(x, point.x):\n # If point on line segment, then colinear.\n colinear = True\n else:\n # Next statement is safe, since values are not close.\n intersects = point.x < x\n elif not colinear:\n # If point on parallel line segment, then colinear.\n colinear = point.y == current_point.y and (\n previous_point.x <= point.x <= current_point.x or\n previous_point.x >= point.x >= current_point.x\n )\n\n # A point on a ray is only an intersection if the clostest\n # neighboring points are on opposite sides of the ray.\n if intersects and y_positions[-1] == 0:\n # Already encountered this intersection.\n intersects = False\n elif intersects and y_positions[0] == 0:\n next_y = next(\n (i for i in y_positions if i != 0),\n y_positions[-1]\n )\n intersects = (y_positions[-1] > 0) ^ (next_y > 0)\n if intersects:\n inside = not inside\n intersects = False\n previous_point = current_point\n y_positions.rotate(-1)\n return colinear or inside", "def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )", "def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):\n drone_point = uav_point[:-1]\n waypoint = waypoint[:-1]\n obstacle_point = obstacle.get_point()[:-1]\n\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle_point, drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < obstacle.get_radius()\n\n return False", "def obstacle_in_sight(self):\n\n sensor_dist = np.asarray(self.robot.sense(), dtype=np.float)\n\n threshold = 2.0 * self.robot.getSize()\n\n # Get front sensors of the robot\n front_sensors = self.robot.getFrontSensors()\n\n # Calculate distance to next point -> threshold\n # next_point = self.carrot_donkey.get_next_point()\n next_point = self.carrot_donkey.get_carrot_position()\n robot_point = self.robot_loc.get_robot_point()\n target_dist = Calc.get_dist_from_point_to_point(robot_point, next_point)\n\n # check if there is a obstacle in front of robot\n # if target is nearer than obstacle -> ignore obstacle\n sensor_dist_min = np.nanmin(sensor_dist[front_sensors])\n if sensor_dist_min < (target_dist + threshold):\n obstacle_detected = True\n else:\n obstacle_detected = False\n\n return obstacle_detected", "def inside(self, uv):\n result = self._trimmed.Perform(gp_Pnt2d(uv[0], uv[1]))\n return result == TopAbs_IN", "def check_occlusion(self, x0: int, y0: int, x1: int, y1: int) -> int:\n \n if self.obstacles is None:\n return 0\n\n if x0 == x1 and y0 == y1:\n return 0\n\n if x0 == x1:\n if y1 < y0:\n y0, y1 = y1, y0\n\n for y in range(y0, y1+1):\n if (x0, y) in self.obstacles:\n return 1\n return 0\n\n if y0 == y1:\n if x1 < x0:\n x0, x1 = x1, x0\n\n for x in range(x0, x1+1):\n if (x, y0) in self.obstacles:\n return 1\n return 0\n \n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n # run on x\n if (dx > dy):\n if (x1 < x0):\n x0, y0, x1, y1 = x1, y1, x0, y0\n m = (y1 - y0) / (x1 - x0)\n y = y0\n for x in range(x0, x1+1):\n y_tag = round(y)\n if (x, y_tag) in self.obstacles:\n return 1\n y += m\n # run on y\n else:\n if (y1 < y0):\n x0, y0, x1, y1 = x1, y1, x0, y0\n m = (x1 - x0) / (y1 - y0)\n x = x0\n for y in range(y0, y1+1):\n x_tag = round(x)\n if (x_tag, y) in self.obstacles:\n return 1\n x += m\n\n return 0", "def check_obstructed(r1,r2): \n \n if r1==r2:\n return False\n \n #Densely sample line connecting r1 and r2.\n #If any of those sampled points is inside the rectangle, then the \n #line of sight intersects the rectangle and the tower's view is\n #obstructed.\n NP = 1000\n sampled_x = np.linspace(r1[0],r2[0],NP)\n sampled_y = np.linspace(r1[1],r2[1],NP)\n for x,y,w,h in self.coordinates__obstacles:\n for pt in xrange(NP):\n if (sampled_x[pt] > x) and (sampled_x[pt] < x+w) and \\\n (sampled_y[pt] > y) and (sampled_y[pt] < y+h):\n return True\n return False", "def check_for_collisions(self):\n \n # INSERT HERE - check for collisions with edge of window\n \n # INSERT HERE - check for collisions with obstacles ", "def has_intersection(self, obj):\r\n obj_x = obj.get_x()\r\n obj_y = obj.get_y()\r\n distance = math.sqrt((obj_x[0]-self.__x[0])**2 + (obj_y[0] - self.__y[0])**2)\r\n if distance <= (self.radius() + obj.get_radius()):\r\n return True\r\n return False", "def checkObjectColision(self):\n coords = self.stage.can.coords(self.cobj)\n if len(coords) == 4:\n overlappers = self.stage.can.find_overlapping(coords[0], coords[1], coords[2], coords[3])\n \n ovr_objs = []\n for obj in overlappers:\n search_id = str(obj)\n if search_id in self.stage.world_objects:\n ovr_objs.append(self.stage.world_objects[search_id])\n \n if \"collisionHandler\" in dir(self):\n self.collisionHandler(ovr_objs)", "def is_linearly_independent_2x2(u, v):\n uv = get_uv(u, v)\n if uv[0][0] * uv[1][1] - uv[1][0] * uv[0][1] != 0:\n return True\n else:\n return False", "def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False", "def is_collision(self, s_start, s_end):\n point1 = (s_start[0], s_start[1])\n point2 = (s_end[0], s_end[1])\n in_cnt1 = cv2.pointPolygonTest(np.asarray([self.env.outpool_points]), point1, True)\n in_cnt2 = cv2.pointPolygonTest(np.asarray([self.env.outpool_points]), point2, True)\n # 安全距离\n if in_cnt1 < self.search_safe_pix or in_cnt2 < self.search_safe_pix:\n return True\n else:\n return False\n # if s_start in self.obs or s_end in self.obs:\n # return True\n # if s_start[0] != s_end[0] and s_start[1] != s_end[1]:\n # if s_end[0] - s_start[0] == s_start[1] - s_end[1]:\n # s1 = (min(s_start[0], s_end[0]), min(s_start[1], s_end[1]))\n # s2 = (max(s_start[0], s_end[0]), max(s_start[1], s_end[1]))\n # else:\n # s1 = (min(s_start[0], s_end[0]), max(s_start[1], s_end[1]))\n # s2 = (max(s_start[0], s_end[0]), min(s_start[1], s_end[1]))\n # if cv2.pointPolygonTest(np.asarray([self.env.outpool_points]), s1, True)>0:\n # return True\n # if cv2.pointPolygonTest(np.asarray([self.env.outpool_points]), s2, True)>0:\n # return True\n # # if s1 in self.obs or s2 in self.obs:\n # # return True", "def _in_huc(shply, huc_shply):\n if huc_shply.contains(shply):\n return 2\n elif huc_shply.intersects(shply):\n return 1\n else:\n return 0", "def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)", "def check_collision(obj0, obj1):\n # student implements it.\n if obj0.coord[0] + obj0.w / 2 < obj1.coord[0] - obj1.w / 2:\n return False\n elif obj0.coord[0] - obj0.w / 2 > obj1.coord[0] + obj1.w / 2:\n return False\n elif obj0.coord[1] + obj0.w / 2 < obj1.coord[1] - obj1.w / 2:\n return False\n elif obj0.coord[1] - obj0.w / 2 > obj1.coord[1] + obj1.w / 2:\n return False\n else:\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if the vector between a UAV's position and the current waypoint intersect an obstacle.
def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint): drone_point = uav_point[:-1] waypoint = waypoint[:-1] obstacle_point = obstacle.get_point()[:-1] waypoint_vector = np.subtract(waypoint, drone_point) obstacle_vector = np.subtract(obstacle_point, drone_point) obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector) rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector) rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector) # Uncomment for DEBUGGING ONLY print("Waypoint Vector: " + str(waypoint_vector)) print("Obstacle Vector: " + str(obstacle_vector)) print("Rejection Vector: " + str(rejection_vector)) print("Rejection Vector Magnitude: " + str(rejection_vector_magnitude)) print("Obstacle Radius: " + str(obstacle.get_radius())) print("Distance From Obstacle: " + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point())))) if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector): return rejection_vector_magnitude < obstacle.get_radius() return False
[ "def does_uav_intersect_obstacle_vertically(self, obstacle, drone_point, waypoint):\n if isinstance(obstacle, StationaryObstacle):\n if drone_point[2] < obstacle.height + Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS:\n return True\n\n return False", "def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0", "def is_obstacle_in_path_of_drone(self, obstacle_vector, waypoint_vector):\n obstacle_list = obstacle_vector.tolist()\n waypoint_list = waypoint_vector.tolist()\n\n for index in range(len(obstacle_list)):\n if all(item > 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]) or all(item < 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]):\n return False\n\n return True", "def is_behind(obj1, obj2):\n separation = get_location_vector(obj1) - get_location_vector(obj2)\n separation /= np.linalg.norm(separation)\n temp = get_velocity_vector(obj2).getA1()\n if np.dot(separation, temp) > 0:\n return False\n else:\n return True", "def check_collision(self, auv_pos):\n for obs in self.obstacle_array:\n distance = self.calculate_range(auv_pos, obs)\n # obs[3] indicates the size of the obstacle\n if distance <= obs[3]:\n print(\"Hit an obstacle\")\n return True\n return False", "def _in_obstacle(point, obstacle):\n y_positions = collections.deque(p.y - point.y for p in obstacle)\n colinear = False\n inside = False\n intersects = False\n previous_point = obstacle[-1]\n for current_point in obstacle:\n if previous_point.y - current_point.y != 0:\n # Parallel lines have zero or inifinte intersections.\n # Either way, do not count them.\n if (previous_point.y <= point.y <= current_point.y or\n previous_point.y >= point.y >= current_point.y):\n # One point above the ray and one point below the ray.\n if previous_point.x - current_point.x == 0:\n # Vertical line segment.\n x = current_point.x\n else:\n a = _calculate_slope(previous_point, current_point)\n b = _calculate_y_intercept(current_point, a)\n x = (point.y - b) / a\n if math.isclose(x, point.x):\n # If point on line segment, then colinear.\n colinear = True\n else:\n # Next statement is safe, since values are not close.\n intersects = point.x < x\n elif not colinear:\n # If point on parallel line segment, then colinear.\n colinear = point.y == current_point.y and (\n previous_point.x <= point.x <= current_point.x or\n previous_point.x >= point.x >= current_point.x\n )\n\n # A point on a ray is only an intersection if the clostest\n # neighboring points are on opposite sides of the ray.\n if intersects and y_positions[-1] == 0:\n # Already encountered this intersection.\n intersects = False\n elif intersects and y_positions[0] == 0:\n next_y = next(\n (i for i in y_positions if i != 0),\n y_positions[-1]\n )\n intersects = (y_positions[-1] > 0) ^ (next_y > 0)\n if intersects:\n inside = not inside\n intersects = False\n previous_point = current_point\n y_positions.rotate(-1)\n return colinear or inside", "def is_approaching(self, other_particle):\n if self.pos_x < other_particle.pos_x:\n d_v_x = self.velocity_x - other_particle.velocity_x\n else:\n d_v_x = other_particle.velocity_x - self.velocity_x\n\n if self.pos_y < other_particle.pos_y:\n d_v_y = self.velocity_y - other_particle.velocity_y\n else:\n d_v_y = other_particle.velocity_y - self.velocity_y\n\n return d_v_x > 0 or d_v_y > 0", "def check_position(self, player):\n\n # Mid point of the segment defining the goal\n mid = Point.mid_point(self.s_pos, self.e_pos)\n\n # Transposition of this point by the direction vector of the goal\n # to get the direction vector with its origin in the center of the goal\n mid_prime = self.dir + mid\n\n # Creating both needed vectors\n v1 = Vector.v_from_pp(mid, player.pos)\n v2 = Vector.v_from_pp(mid, mid_prime)\n\n # Getting the angle and checking if it is a valid one\n angle = v1.angle(v2)\n\n return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)", "def has_uav_reached_current_waypoint(self):\n return self.drone.has_reached_waypoint()", "def check_close_to_obstacles(self, auv_pos):\n for obs in self.obstacle_array:\n distance = self.calculate_range(auv_pos, obs)\n # obs[3] indicates the size of the obstacle\n if distance <= (obs[3] + OBSTACLE_ZONE):\n if DEBUG: \n print(\"Close to an obstacles\")\n return True\n return False", "def obstacle_in_sight(self):\n\n sensor_dist = np.asarray(self.robot.sense(), dtype=np.float)\n\n threshold = 2.0 * self.robot.getSize()\n\n # Get front sensors of the robot\n front_sensors = self.robot.getFrontSensors()\n\n # Calculate distance to next point -> threshold\n # next_point = self.carrot_donkey.get_next_point()\n next_point = self.carrot_donkey.get_carrot_position()\n robot_point = self.robot_loc.get_robot_point()\n target_dist = Calc.get_dist_from_point_to_point(robot_point, next_point)\n\n # check if there is a obstacle in front of robot\n # if target is nearer than obstacle -> ignore obstacle\n sensor_dist_min = np.nanmin(sensor_dist[front_sensors])\n if sensor_dist_min < (target_dist + threshold):\n obstacle_detected = True\n else:\n obstacle_detected = False\n\n return obstacle_detected", "def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)", "def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False", "def browse_vector(self, pose: list, vector: tuple, player: str) -> bool:\n while self.is_on_board([pose[0] - vector[0], pose[1] - vector[1]]) and self.grid[pose[0]][pose[1]].status != '.':\n pose = [pose[0] - vector[0], pose[1] - vector[1]]\n if self.grid[pose[0]][pose[1]].status == player:\n return True\n return False", "def doesArmTouchObstacles(armPos, obstacles): \n \n for obstacle in obstacles:\n for arm in armPos:\n x = arm[0]\n y = arm[1]\n dist = abs((y[1] - x[1])*(obstacle[0]) - (y[0] - x[0])*(obstacle[1]) + (x[1]*y[0]) - (y[1]*x[0]))/math.sqrt((y[1] - x[1])**2 + (y[0] - x[0])**2)\n dist_x = math.sqrt((x[0] - obstacle[0])**2 + (x[1] - obstacle[1])**2)\n dist_y = math.sqrt((y[0] - obstacle[0])**2 + (y[1] - obstacle[1])**2)\n if y[0] == x[0]:\n i = x[0]\n j = obstacle[1]\n else: \n m = (y[1] - x[1]) / (y[0] - x[0])\n k = x[1] - m*x[0]\n i = (obstacle[0] + m*obstacle[1] - m*k) / (m**2 + 1)\n j = m*(i) + k\n if y[0] > x[0]:\n test_i = i >= x[0] and i <= y[0]\n else:\n test_i = i >= y[0] and i <= x[0]\n if y[1] > x[1]:\n test_j = j >= x[1] and j <= y[1]\n else:\n test_j = j >= y[1] and j <= x[1]\n if (dist <= obstacle[2] and test_i and test_j) or dist_x <= obstacle[2] or dist_y <= obstacle[2]:\n return True \n return False", "def through_obstacle(line, obstacles):\r\n noofpoints = 20\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0", "def isInVol(self,pt):\n\t\tu = self.s1[0] - self.s2[3]\n\t\tv = self.s1[0] - self.s1[1]\n\t\tw = self.s1[0] - self.s1[3]\n\t\tif np.dot(u,self.s1[0]) > np.dot(u,pt) > np.dot(u,self.s2[3]) and \\\n\t\t\tnp.dot(v,self.s1[0]) > np.dot(v,pt) > np.dot(v,self.s1[1]) and \\\n\t\t\tnp.dot(w,self.s1[0]) > np.dot(w,pt) > np.dot(w,self.s1[3]):\n\t\t\treturn True\n\t\treturn False", "def is_obstacle(self, x, y, l):\n return self.at(x, y, l).cost == -1", "def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Looks at the signs of the components of the vectors to determine if the direction of the obstacle is in the same direction as the waypoint (quadrants)
def is_obstacle_in_path_of_drone(self, obstacle_vector, waypoint_vector): obstacle_list = obstacle_vector.tolist() waypoint_list = waypoint_vector.tolist() for index in range(len(obstacle_list)): if all(item > 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]) or all(item < 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]): return False return True
[ "def test_determine_direction(self):\n directions = range(1, 10)\n expected = [(-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 0), (0, 1),\n (1, -1), (1, 0), (1, 1)]\n for index, direction in enumerate(directions):\n self.assertEqual(quiver_vectors[direction], expected[index])", "def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):\n drone_point = uav_point[:-1]\n waypoint = waypoint[:-1]\n obstacle_point = obstacle.get_point()[:-1]\n\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle_point, drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < obstacle.get_radius()\n\n return False", "def is_behind(obj1, obj2):\n separation = get_location_vector(obj1) - get_location_vector(obj2)\n separation /= np.linalg.norm(separation)\n temp = get_velocity_vector(obj2).getA1()\n if np.dot(separation, temp) > 0:\n return False\n else:\n return True", "def check_position(self, player):\n\n # Mid point of the segment defining the goal\n mid = Point.mid_point(self.s_pos, self.e_pos)\n\n # Transposition of this point by the direction vector of the goal\n # to get the direction vector with its origin in the center of the goal\n mid_prime = self.dir + mid\n\n # Creating both needed vectors\n v1 = Vector.v_from_pp(mid, player.pos)\n v2 = Vector.v_from_pp(mid, mid_prime)\n\n # Getting the angle and checking if it is a valid one\n angle = v1.angle(v2)\n\n return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)", "def sameDirection(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return (abs(v1.angle - v2.angle) % (2 * math.pi)) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.sameDirection(vectors[i], vectors[j]):\n return False\n return True", "def checkDirection(neighbour, current_point, end):\n\n for i in range(3):\n delta = abs(end[i] - current_point[i])\n if abs(end[i] - neighbour[i]) < delta and delta >= 0:\n return True, i\n\n return False, None", "def triangle_isInside(trig: Tuple[sg.Point2, sg.Point2, sg.Point2], point: sg.Point2) -> bool:\n d1 = sign((point, trig[0], trig[1]))\n d2 = sign((point, trig[1], trig[2]))\n d3 = sign((point, trig[2], trig[0]))\n has_neg = (d1 < 0) or (d2 < 0) or (d3 < 0)\n has_pos = (d1 > 0) or (d2 > 0) or (d3 > 0)\n return not (has_neg and has_pos)", "def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)\n if ddif < max_d:\n return True\n return False", "def at_goal(self):\n return (linalg.norm(np.array([self.x-self.x_g, self.y-self.y_g])) < self.near_thresh and abs(wrapToPi(self.theta - self.theta_g)) < self.at_thresh_theta)", "def _hasChangedDirection(motionPts: list) -> tuple:\n dispPts = Ball._getDisplacements(motionPts)\n xDir = yDir = None\n xChange = yChange = False\n for dispPt in dispPts:\n # Compute differences\n xDirNow = RIGHT if dispPt[0] > 0 else LEFT\n yDirNow = DOWN if dispPt[1] > 0 else UP\n # Look for x changes\n if xDir is None:\n xDir = xDirNow\n elif xDirNow != xDir:\n xChange = True\n # Look for y changes\n if yDir is None:\n yDir = yDirNow\n elif yDirNow != yDir:\n yChange = True\n return xChange, yChange", "def vector_equal(v1,v2):\n if (v2.x - 0.001 <= v1.x <= v2.x + 0.001) and \\\n (v2.y - 0.001 <= v1.y <= v2.y + 0.001) and \\\n (v2.z - 0.001 <= v1.z <= v2.z + 0.001):\n return True", "def check_directions(self, taken_flights):\n if self.source != taken_flights[-1].destination:\n return False\n for flight in taken_flights:\n if self.source == flight.source and self.destination == flight.destination:\n return False\n return True", "def near_goal(self):\n return linalg.norm(np.array([self.x-self.x_g, self.y-self.y_g])) < self.near_thresh", "def reached_destination(self):\n return self.rect.centerx > self.coordinates_to_move[0] - 10 and\\\n self.rect.centerx < self.coordinates_to_move[0] + 10 and\\\n self.rect.centery > self.coordinates_to_move[1] - 10 and\\\n self.rect.centery < self.coordinates_to_move[1] + 10", "def check_shot_direction(self, shot):\n return Vector.v_from_a(shot.angle) * self.dir < 0", "def R_will_change_direction(point0, point1, point2):\n\n x0, y0 = point0[0], point0[1]\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n try:\n m1 = (x1 - x2) / (y2 - y1)\n m2 = (y2 - y1) / (x2 - x1)\n x3 = ((m2 * x1) - (m1 * x0) - y1 + y0) / (m2 - m1)\n y3 = m1 * (x3 - x0) + y0\n except ZeroDivisionError:\n (x3, y3) = (x0, y1) if y1 == y2 else (x1, y0)\n\n return ((min(x1, x2) <= x3 <= max(x1, x2)) and (min(y1, y2) <= y3 <= max(y1, y2))), (x3, y3)", "def isInPlane(self, p) -> bool:\n # Testing for zero is done with math.isclose, to avoid rounding/floating point errors.\n # Since we are testing near zero, abs_tol is set to 1e-09\n return math.isclose(\n math.fabs(\n dot(\n self.normal(),\n Vector.connect(p.x, p.y, p.z, self.p0.x, self.p0.y, self.p0.z),\n )\n ),\n 0,\n rel_tol=1e-09,\n abs_tol=1e-09,\n )", "def is_perpendicular_to(self, vector):\n\n if abs(self.dot(vector)) < 0.01:\n return True\n return False", "def on_plane(self, plane):\n v = plane.point - self\n return 0 == v.mag2 or plane.normal.isperpendicular(v)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the shortest path from the paths provided. This function assumes that the paths are possible waypoints calculated from the is_obstacle_in_path() function
def get_min_path(self, paths): shortest_path = paths[0] shortest_distance = self.get_path_distance(paths[0]) for path in paths[1:]: distance = self.get_path_distance(path) if distance < shortest_distance: shortest_path = path shortest_distance = distance return shortest_path
[ "def get_lowest_cost_path(paths): \n \n return min(paths, key = lambda t: t[1])", "def shortest_path(self):\r\n return min([Graph.get_total_path_distance(path) for path in self.path_list])", "def shortest_path(graph, source, destination):\n pass", "def path(self, source, target, path=[]):\n path = path + [source]\n if source == target:\n return path\n if source not in self.rooms:\n return None\n shortest = None\n for room in source.get_targets():\n if room not in path:\n newpath = self.path(room, target, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest", "def shortest_path_example():\n graph = Graph({\n (0, 1): 4,\n (0, 7): 8,\n (1, 2): 8,\n (1, 7): 11,\n (2, 3): 7,\n (2, 5): 4,\n (2, 8): 2,\n (3, 4): 9,\n (3, 5): 14,\n (5, 4): 10,\n (6, 5): 2,\n (7, 6): 1,\n (6, 8): 6,\n (7, 8): 7,\n })\n src = 0\n V_sorted = sorted(graph.vertices)\n E_sorted = sorted(graph.edge_weights.keys())\n v = len(V_sorted)\n e = len(E_sorted)\n c = [-1] * v\n c[0] = 0\n A_ub = [\n [\n {u: -1, v: 1}[w] if w in (u, v) else 0\n for w in V_sorted\n ]\n for u, v in E_sorted\n ]\n b_ub = [\n graph.edge_weights[(u, v)]\n for u, v in E_sorted\n ]\n A_eq = [\n [\n 1 if w == src else 0\n for w in V_sorted\n ]\n for u, v in E_sorted\n ]\n b_eq = [0] * e\n return linprog(\n c, A_ub, b_ub, A_eq, b_eq, method='interior-point')", "def shortest_path(N, a_0, a_1=None):\n path = HJ_path(a_1*N, a_0*N)\n path = [c/d/N for c, d in path]\n return path", "def shortest_path(self, s, t):\n return self.model.shortest_path[s][t]", "def shortest_path_search(start, successors, is_goal):\r\n if is_goal(start):\r\n return [start]\r\n\r\n explored = set() # set of states we have visited\r\n frontier = [ [start] ] # ordered list of paths we have blazed\r\n while frontier:\r\n path = frontier.pop(0)\r\n s = path[-1]\r\n for (state, action) in successors(s).items():\r\n if state not in explored:\r\n explored.add(state)\r\n path2 = path + [action, state]\r\n if is_goal(state):\r\n return path2\r\n else:\r\n frontier.append(path2)\r\n return Fail", "def dijkstra_shortest_path(grid_obs, source, dest):\n #------------------------------------\n #\n # Fill and submit this code\n #\n predecessors = {source: float('inf')}\n visited_blocks = {source: 0}\n queue = PQ()\n queue.__setitem__(source, 0)\n goodIndices = []\n\n print len(grid_obs)\n\n for index in range(len(grid_obs)):\n if grid_obs[index] != \"air\":\n goodIndices.append(index)\n\n for index in goodIndices:\n if index != source:\n visited_blocks[index] = float('inf')\n\n while queue:\n blocks_to_go = []\n current_position = queue.smallest()\n del queue[current_position]\n\n for difference in [-81, -1, 1, 81]:\n if (current_position + difference) in goodIndices:\n blocks_to_go.append(current_position + difference)\n\n for block_Index in blocks_to_go:\n gap = visited_blocks[current_position] + 1\n if gap < visited_blocks[block_Index]:\n visited_blocks[block_Index] = gap\n predecessors[block_Index] = current_position\n queue.__setitem__(block_Index, gap)\n\n shortest_paths = []\n while dest != source:\n shortest_paths.append(dest)\n dest = predecessors[dest]\n shortest_paths.append(source)\n shortest_paths.reverse()\n\n return shortest_paths\n #-------------------------------------", "def minimumCostPath(analyzer, destStation):\n path = djk.pathTo(analyzer['paths'], destStation)\n return path", "def _shortest_path(G, start, end, sp_cache):\n if (start, end) in SP_TABLE:\n return sp_cache[(start, end)]\n elif (end, start) in SP_TABLE:\n return sp_cache[(end, start)]\n else:\n D, P = _dijkstra(G, start, end)\n path = []\n temp = end\n while 1:\n path.append(end)\n if end == start: break\n end = P[end]\n path.reverse()\n sp_cache[(start, temp)] = path\n return path", "def minimal_paths(self):\n\n try:\n if self.__correct:\n # the set of the nodes to which the minimal path has not been found\n # at the beginning all the nodes are unmarked\n unmarked_nodes = set(self.__graph.get_vertices())\n # the dictionary that contains pairs like (n : c) where n is the id of the node and c is the cost to\n # reach the node n form the source node\n self.__costs = {}\n # the dictionary that contains pairs like (n : p) where n is the id of the node and p is the id of the\n # previous node on the minimal path from the source node to the n node\n self.__previous = {}\n\n for v in self.__graph.get_vertices():\n # at the begging cost to all unmarked nodes is infinite\n self.__costs[v] = math.inf\n # at the begging no precedence relationship is defined\n self.__previous[v] = None\n\n # by the convention the cost to reach the source node is equal to 0\n self.__costs[self.__source_node] = 0\n # by the convention the preceding node to the source node is the source node\n self.__previous[self.__source_node] = self.__source_node\n\n # do while the set of the unmarked nodes is not empty\n while len(unmarked_nodes) > 0:\n # get the element with the lowest cost from among all the unmarked nodes\n minimum = math.inf\n minimum_node = None\n for node in unmarked_nodes:\n if self.__costs[node] < minimum:\n minimum = self.__costs[node]\n minimum_node = node\n v = self.__graph.get_vert_dict().get(minimum_node)\n\n # remove the element with the lowest cost\n unmarked_nodes.remove(v.get_id())\n\n # do for each arch originating form v\n for w in v.get_connections():\n # update the dictionaries if new minimal path has been found\n if self.__costs[v.get_id()] + v.get_weight(w) < self.__costs[w.get_id()]:\n self.__previous[w.get_id()] = v.get_id()\n self.__costs[w.get_id()] = self.__costs[v.get_id()] + v.get_weight(w)\n\n else:\n raise IncorrectParametersError\n\n except IncorrectParametersError:\n print(\"Error: The minimal paths cannot be calculated due to incorrect algorithm's parameters.\")\n\n except Exception:\n print(\"Error: The minimal paths cannot be calculated.\")", "def shortest_path(maze):\n\tpath = []\n\tqueue = []\n\tvisited = set()\n\tqueue.append((0, 0))\n\tvisited.add((0, 0))\n\tparent_node = {}\n\twhile queue:\n\t\tcur_i, cur_j = queue.pop(0)\n\t\tif maze[cur_i][cur_j] == 'G':\n\t\t\twhile cur_i + cur_j:\n\t\t\t\tpath.append((cur_i, cur_j))\n\t\t\t\tcur_i, cur_j = parent_node[(cur_i, cur_j)]\n\t\t\treturn len(path)\n\t\tfor i, j in [(cur_i - 1, cur_j), (cur_i + 1, cur_j), (cur_i, cur_j - 1), (cur_i, cur_j + 1)]:\n\t\t\tif 0 <= i < len(maze) and 0 <= j < len(maze) and maze[i][j] != 'X' and (i, j) not in visited:\n\t\t\t\tqueue.append((i, j))\n\t\t\t\tparent_node[(i, j)] = (cur_i, cur_j)\n\t\t\t\tvisited.add((i, j))\n\treturn -1", "def dijkstra_shortest_path(grid_obs, source, dest):\r\n\r\n direction = [21, -1, -21, 1]\r\n vertexdict = dict()\r\n unvisited = []\r\n for i in range(len(grid_obs)):\r\n if grid_obs[i] != 'air': #<----------- Add things to avoid here\r\n vertexdict[i] = [1, 999, -999] #key = index, value = (cost, shortest dist from start, prev vert)\r\n unvisited.append(i) #add to unvisited list\r\n\r\n #set source vertex cost and shortest_dist_from_start to 0\r\n if source in vertexdict:\r\n vertexdict[source][0] = 0\r\n vertexdict[source][1] = 0\r\n else:\r\n return np.zeros(99)\r\n\r\n while len(unvisited) != 0:\r\n #find curVert - lowest shortest dist vertex\r\n lowestDist = float('inf')\r\n curVert = None\r\n for i in unvisited:\r\n if vertexdict[i][1] < lowestDist:\r\n curVert = i\r\n lowestDist = vertexdict[i][1]\r\n\r\n #examine neighbors of curVert\r\n for i in direction:\r\n adjVert = curVert + i\r\n if adjVert in unvisited:\r\n #newcost = (cost of adjVert) + (shortest dist from curVert)\r\n newCost = vertexdict[adjVert][0] + vertexdict[curVert][1]\r\n if newCost < vertexdict[adjVert][1]:\r\n vertexdict[adjVert][1] = newCost\r\n vertexdict[adjVert][2] = curVert\r\n unvisited.remove(curVert)\r\n\r\n backtrack = dest\r\n path_list = []\r\n path_list.append(dest)\r\n while backtrack != source:\r\n path_list.insert(0, vertexdict[backtrack][2])\r\n backtrack = vertexdict[backtrack][2]\r\n return path_list", "def get_shortest(args_array):\n\n node, G, paths_list = args_array\n shortest_score = float(\"inf\")\n path = None\n for pred in G.predecessors(node):\n try:\n path_len,shortest_path = nx.bidirectional_dijkstra(G, node, pred, weight='cost')\n if path_len < shortest_score:\n path = shortest_path\n shortest_score = path_len\n except nx.exception.NetworkXNoPath:\n continue\n if path is not None: paths_list.append(path)\n # done", "def get_shortest_path(self) -> list:\r\n curr_vertex = self._goal\r\n ret_val = [curr_vertex]\r\n while curr_vertex:\r\n curr_vertex = self._came_from.get(curr_vertex)\r\n if curr_vertex:\r\n ret_val.append(curr_vertex)\r\n ret_val.reverse()\r\n return ret_val", "def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line", "def shortest_path(M: object, start: int, goal: int) -> list:\n \n # dictionary of Map intersections coordinates (x,y)\n nodes_xy = M.intersections\n \n # destination coordinates\n goal_xy = nodes_xy[goal]\n \n # list of lists of adjacent nodes\n nodes_connections = M.roads\n \n # Set of integers to track the visited nodes in the search\n visited = set()\n \n # Dictionary to track the predecessors in the path\n come_from = dict()\n \n # initialize single nodes distance from the origin\n nodes_id = nodes_xy.keys() \n dist_from_start = {node_id: math.inf for node_id in nodes_id}\n dist_from_start[start] = 0\n \n # create frontier to be expanded as a PriorityQueue and add the origin to the PriorityQueue\n frontier = PriorityQueue()\n frontier.add(start)\n \n # loop until all the nodes in the frontier have been explored\n while frontier:\n \n # retrieve the node with the minimum cost from frontier\n node = frontier.pop()\n \n # check if the node has been processed\n if node in visited:\n continue\n \n # goal node found: Terminate search\n if node == goal:\n return reconstruct_path(come_from, start, node)\n \n # mark the current node as visited\n visited.add(node)\n \n # examine all possible neighbors and update the cost function f\n # for nodes near the current node\n \n node_xy = nodes_xy[node] # current node's coordinates\n \n for neighbor in nodes_connections[node]:\n \n neighbor_xy = nodes_xy[neighbor] # neighbor's coordinates\n \n # total distance travelled to reach the neighbor\n cost_g = dist_from_start[node] + distance(node_xy, neighbor_xy)\n \n # estimated distance for the remaining distance to goal\n cost_h = distance(neighbor_xy, goal_xy)\n \n # estimated cost of the path from start to goal via neighbor's position \n cost_f = cost_g + cost_h\n \n # check if the actual distance travelled is better of the distance calculated previously.\n if cost_g < dist_from_start[neighbor]:\n \n # update neighbor's distance, precedessor. And add it to the Priority Queue\n dist_from_start[neighbor] = cost_g\n come_from[neighbor] = node\n frontier.add(neighbor, priority = cost_f)\n \n # there is no solution, the graph may be not fully connected\n return None", "def shortest_path_search(start, successors, is_goal):\r\n if is_goal(start):\r\n return [start]\r\n explored = set()\r\n frontier = [[start]]\r\n while frontier:\r\n path = frontier.pop(0)\r\n s = path[-1]\r\n for (state, action) in successors(s).items():\r\n if state not in explored:\r\n explored.add(state)\r\n path2 = path + [action, state]\r\n if is_goal(state):\r\n return path2\r\n else:\r\n frontier.append(path2)\r\n return Fail" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the obstacles in the map
def get_obstacles(self): return self.obstacles
[ "def getObstacles(self):\r\n ausgabeObstacle = self.globalObstaclesList + self.globalHardObstaclesList\r\n self.globalObstaclesList = []\r\n return(ausgabeObstacle)", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def get_map(self) -> list:\n return self.map_obstacle", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def create_obstacles(self) -> list:\n pass", "def getObjects(self):\r\n obstacles=[]\r\n for obst in self.listObstacles:\r\n obstacles.append(obst)\r\n for voya in self.listVoyageurs:\r\n obstacles.append(voya)\r\n return obstacles", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays", "def get_obstacles(image):\n\n ih, iw = image.shape[:2]\n image_copy = image.copy()\n\n #resize the image to the size of arena\n image = cv2.resize(image, ARENA_SIZE, interpolation=cv2.INTER_CUBIC)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n #replace all black pixels to white pixels\n gray[np.where(gray == 0)]= 255\n\n #get the thresholded binary image\n ret,threshold = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)\n\n #find all the countours in the binary image\n _, contours, heiarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n cont = []\n\n #create a mask to draw contours on\n blocks = mask = np.zeros(threshold.shape[:2], np.uint8)\n\n #create a dictionary to hold image roi of all puzzle peices\n blocks_roi = {}\n\n #iterate through all contours\n for i, c in enumerate(contours[1:]):\n\n #find the minimum area fitting rectangle of the contour\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n #create the copy of the mask\n mask_copy = mask.copy()\n\n #draw the rectangle on the mask\n cv2.drawContours(mask_copy, [box], -1, (255,255,255), 3)\n\n #floodfill the rectangle\n cv2.floodFill(mask_copy, None, (0,0), 255)\n mask_inv = cv2.bitwise_not(mask_copy)\n blocks = cv2.add(blocks, mask_inv)\n\n _, contours, heiarchy = cv2.findContours(blocks, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n obstacles = {}\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n obstacles.update({(int(x+w/2), int(y+h/2)): BLOCK_SIZE})\n #obstacles.update({(int(x+w/2), int(y+h/2)): (w, h)}) # for unknown block sizes\n bottom_r = remap((x+w, y+h), ARENA_SIZE, (iw,ih))\n top_l = remap((x, y), ARENA_SIZE, (iw,ih))\n blocks_roi.update({(int(x+w/2), int(y+h/2)): image_copy[top_l[1]:bottom_r[1], top_l[0]:bottom_r[0]]})\n\n return obstacles, blocks_roi", "def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)", "def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)", "def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst", "def updateObstacleRects(self):\n\n # global castle\n\n self.obstacle_rects = [self.castle.rect]\n\n for tile in self.mapr:\n if tile[0] in (self.TILE_BRICK, self.TILE_STEEL, self.TILE_WATER):\n self.obstacle_rects.append(tile[1])", "def obstacle_iterator(self):\n for obstacle in self.tmx_data.get_layer_by_name(\"obstacles\"):\n yield obstacle", "def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles", "def __draw_obstacles(self):\n self.canvas.delete(OBSTACLES_TAG)\n for i in range(0, PACMAN_BOARD_SIDE_SQUARES_NUMBER):\n for j in range(0, PACMAN_BOARD_SIDE_SQUARES_NUMBER):\n if self.board[j][i] == 1:\n self.canvas.create_rectangle(\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * i,\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * j,\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * (i + 1),\n PACMAN_BOARD_WINDOW_MARGIN + PACMAN_BOARD_SQUARE_SIDE_LENGTH * (j + 1),\n fill=BOARD_OBSTACLES_COLOR,\n tag=OBSTACLES_TAG\n )", "def calcGlobalObstaclePosition(self, obstacles): \r\n global_obstacle_list = []\r\n for obstacle in obstacles: \r\n #Wandeln Winkeldaten für Globalberechnung: -90zu+90 und +90zu-90 0=0\r\n #ScanList[i][0]=degrees(asin(sin(radians(ScanList[i][0])+radians(180))))\r\n\r\n Dx = obstacle[0]\r\n Dy = obstacle[1]\r\n\r\n #Drehmatrix für X, Returns Global Hindernis Position\r\n X=(Dx*cos(radians(self.global_kurs))+Dy*(-sin(radians(self.global_kurs))))+self.RoboPosX\r\n #Drehmatrix für Y, Returns Global Hindernis Position\r\n Y=(Dx*sin(radians(self.global_kurs))+Dy*(cos(radians(self.global_kurs))))+self.RoboPosY\r\n\r\n global_obstacle_list.append([int(X),int(Y)])\r\n return(global_obstacle_list)", "def get_obstacle_info(self):\n obstacle_info = []\n\n if self.story_index <= 3:\n pass\n\n elif self.story_index == 4:\n obstacle_info.append([0, 500, 10, False])\n\n elif self.story_index == 5:\n obstacle_info.append([0, 500, 10, False])\n obstacle_info.append([1, 300, 10, False])\n\n elif self.story_index == 6:\n obstacle_info.append([0, 300, 10, False])\n obstacle_info.append([1, 500, 10, False])\n\n elif self.story_index == 7:\n obstacle_info.append([1, 300, 10, True])\n obstacle_info.append([2, 500, 5, True])\n obstacle_info.append([0, 700, 8, False])\n\n return obstacle_info", "def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the UAV has reached the current waypoint and false if not
def has_uav_reached_current_waypoint(self): return self.drone.has_reached_waypoint()
[ "def check_reached_waypoint_goal(self):\n return self.control_instance.check_reached_waypoint_goal()", "def at_goal(self):\n return self.distance_from_goal < self.robot.wheels.base_length/2", "def passed_waypoint(self, waypoint_num):\n bools = self.ros_node.get_data('/diff_drive/waypoints_achieved', simple_data = False)\n # Waits for the data\n if bools is not None:\n if len(bools.bools) >= waypoint_num:\n return bools.bools[waypoint_num -1]\n \n rospy.logerr_throttle(15, \"Checking Waypoint Failed. Did not find a waypoint with the number '%s' in the path\" %(waypoint_num))\n return False\n else:\n return False", "def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False", "def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True", "def goal_reached(self, position):\n return position >= self.goal", "def reached_goal(self):\n for i in range(self.simulator_.num_agents):\n if rvo_math.abs_sq(self.simulator_.agents_[i].position_ - self.goals_[i]) > self.simulator_.agents_[i].radius_ * self.simulator_.agents_[i].radius_:\n return False\n\n return True", "def _is_ahead(wp, target_pos):\n wp_pos = _pos(wp)\n orientation = math.radians(wp.transform.rotation.yaw)\n target_vector = np.array([target_pos[0] - wp_pos[0], target_pos[1] - wp_pos[1]])\n forward_vector = np.array([np.cos(orientation), np.sin(orientation)])\n d_angle = math.degrees(math.acos(_cos(forward_vector, target_vector)))\n return d_angle < 90", "def reached_angle(self, angle):\n if self.ros_node.get_data(\"/auto/hood/current/angle\") == angle:\n return True\n return False", "def is_jumping(self):\n if(self.going_down or self.going_up or self.mid_air):\n return True\n else:\n return False", "def reached_destination(self):\n return self.rect.centerx > self.coordinates_to_move[0] - 10 and\\\n self.rect.centerx < self.coordinates_to_move[0] + 10 and\\\n self.rect.centery > self.coordinates_to_move[1] - 10 and\\\n self.rect.centery < self.coordinates_to_move[1] + 10", "def is_goal_reached(self, current, goal):\n dis = self.distance_between(current, goal)\n # if self.neigh_range[0] <= dis <= self.neigh_range[1]:\n if dis <= self.neigh_range[1]:\n return True", "def check_at_goal(self, currentLocation):\n pass", "def check_waypoint_reached(self, pos_tol=0.3, head_tol=0.01):\n self.local_pos_pub.publish(self.waypoint_g)\n\n dx = abs(\n self.waypoint_g.pose.position.x - self.current_pose_g.pose.pose.position.x\n )\n dy = abs(\n self.waypoint_g.pose.position.y - self.current_pose_g.pose.pose.position.y\n )\n dz = abs(\n self.waypoint_g.pose.position.z - self.current_pose_g.pose.pose.position.z\n )\n\n dMag = sqrt(pow(dx, 2) + pow(dy, 2) + pow(dz, 2))\n\n cosErr = cos(radians(self.current_heading_g)) - cos(\n radians(self.local_desired_heading_g)\n )\n\n sinErr = sin(radians(self.current_heading_g)) - sin(\n radians(self.local_desired_heading_g)\n )\n\n dHead = sqrt(pow(cosErr, 2) + pow(sinErr, 2))\n\n if dMag < pos_tol and dHead < head_tol:\n return 1\n else:\n return 0", "def reached_dest(self) -> bool:\n return self.base_route[-1] == self.traveled_nodes[-1][self.NODE_INDEX]", "def ismoving(self):\n return not self.get_par(\"done_moving\")", "def is_goal(self) -> bool:\n return self.done and self.reward > 0", "def is_goal_reached(self,x):\n\t\treturn x==self.goal_state", "def _ismoving(self):\n return self.dp.state()==PyTango.DevState.MOVING" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Hook to be invoked before the test method has been executed. May perform expensive setup here.
def before_test(self, func, *args, **kwargs): pass
[ "def before_tester_run(self) -> None:", "def before_test_run(self) -> None:", "def _on_test_begin(self):\n pass", "def prepare_test(self):\n pass", "def after_tester_run(self) -> None:", "def setUp(self):\n\n self._set_up()", "def _pre_init(self):\n pass", "def setUp(self) -> None:\n self.dummy_function = lambda x: x", "def _fixture_setup(self):\n pass", "def pre_test_response(self):", "def setUp(self):\n self.context = Context()", "def custom_setup(self):\r\n pass", "def pre_execute(self):", "def after_successful_test_run(self) -> None:", "def setUp(self) -> None:\n self.manager = Manager()", "def setUp(self):\n self.setup_beets()", "def setUpFixture(self):\n pass", "def test_init(self):\n pass", "def start_test_run(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates fixture objects from the given response and stores them in the applicationspecific cache.
def execute(self, response): if not has_request_context: return self._fallback_fixture_names() try: app = self.auto_fixture.app # Create response fixture fixture = Fixture.from_response(response, app, self.response_name) self.auto_fixture.add_fixture(fixture) # Create request fixture if request.data: fixture = Fixture.from_request(request, app, self.request_name) self.auto_fixture.add_fixture(fixture) except TypeError: # pragma: no cover warnings.warn("Could not create fixture for unsupported mime type") return response
[ "def test_template_source_data_response_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n env_variable_response_model = {} # EnvVariableResponse\n env_variable_response_model['hidden'] = True\n env_variable_response_model['name'] = 'testString'\n env_variable_response_model['secure'] = True\n env_variable_response_model['value'] = 'testString'\n\n workspace_variable_response_model = {} # WorkspaceVariableResponse\n workspace_variable_response_model['description'] = 'testString'\n workspace_variable_response_model['name'] = 'testString'\n workspace_variable_response_model['secure'] = True\n workspace_variable_response_model['type'] = 'testString'\n workspace_variable_response_model['value'] = 'testString'\n\n # Construct a json representation of a TemplateSourceDataResponse model\n template_source_data_response_model_json = {}\n template_source_data_response_model_json['env_values'] = [env_variable_response_model]\n template_source_data_response_model_json['folder'] = 'testString'\n template_source_data_response_model_json['has_githubtoken'] = True\n template_source_data_response_model_json['id'] = 'testString'\n template_source_data_response_model_json['template_type'] = 'testString'\n template_source_data_response_model_json['uninstall_script_name'] = 'testString'\n template_source_data_response_model_json['values'] = 'testString'\n template_source_data_response_model_json['values_metadata'] = [{ 'foo': 'bar' }]\n template_source_data_response_model_json['values_url'] = 'testString'\n template_source_data_response_model_json['variablestore'] = [workspace_variable_response_model]\n\n # Construct a model instance of TemplateSourceDataResponse by calling from_dict on the json representation\n template_source_data_response_model = TemplateSourceDataResponse.from_dict(template_source_data_response_model_json)\n assert template_source_data_response_model != False\n\n # Construct a model instance of TemplateSourceDataResponse by calling from_dict on the json representation\n template_source_data_response_model_dict = TemplateSourceDataResponse.from_dict(template_source_data_response_model_json).__dict__\n template_source_data_response_model2 = TemplateSourceDataResponse(**template_source_data_response_model_dict)\n\n # Verify the model instances are equivalent\n assert template_source_data_response_model == template_source_data_response_model2\n\n # Convert model instance back to dict and verify no loss of data\n template_source_data_response_model_json2 = template_source_data_response_model.to_dict()\n assert template_source_data_response_model_json2 == template_source_data_response_model_json", "def yield_fixture():\n print(\"\\n\\n(Initializing yield_fixture)\")\n x = {\"foo\": \"bar\"}\n\n # Remember, unlike generators, fixtures should only yield once (if at all)\n yield x\n\n print(\"\\n(Cleaning up yield_fixture)\")\n del (x)", "def _post_load(self, response, verbose):\n try:\n if verbose:\n print response.content\n log.debug(response.content)\n except Exception, e:\n raise e\n \n if response is not None and response.status_code == 200:\n types = helpers.pluralize(self.resource_type)\n #print \"types %s\" % types\n body = json.loads(response.content, encoding='utf-8')\n self.total_entries = body['collection']['total_entries']\n self.total_pages = body['collection']['total_pages']\n self.current_page = body['collection']['current_page']\n ## now get the items from the class factory\n if self.total_entries != 0:\n for response_item in body[types]:\n obj = self._response_item_to_object(response_item)\n ## add the items\n self._items.append(obj)\n \n else:\n msg = u\"Fetching failed, an error happend\"\n raise SalesKingException(\"LOAD_ERROR\", msg, response)\n \n return self", "def store_response_in_cache(responsefile, response):\n global __response_cache\n log.debug(\"Storing data from flats (%s) in cache\" % responsefile)\n __response_cache[responsefile] = {}\n modtime = str(os.path.getmtime(responsefile))\n __response_cache[responsefile][modtime] = response", "def test_create_from_response(self):\n objects = Statistic.objects.create_from_stats_response(\n endpoint=self.endpoint, body=self.response_body, fetched_at=self.then\n )\n\n # The right number were created\n self.assertEqual(len(objects), 6)\n\n # The objects exist in the database\n for o in objects:\n self.assertIsNotNone(o.pk)\n\n # Test an object was created\n qs = Statistic.objects.filter(key='asset_counts.by_institution.INSTA.total')\n self.assertEqual(qs.count(), 1)\n o = qs.first()\n self.assertEqual(o.endpoint, self.endpoint)\n self.assertEqual(o.fetched_at, self.then)\n self.assertEqual(o.numeric_value, 7)", "def get_fixture_data(self):\n if not self.fixture_data:\n self.fixture_data = requests.get(self.FPL_FIXTURE_URL).json()\n else:\n pass\n return self.fixture_data", "def mock_api():\n with open(os.path.join(HERE, 'response.json'), 'r') as fp:\n webargs_response = fp.read()\n # A valid package with a proper response\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/webargs/json',\n body=webargs_response,\n content_type='application/json'\n )\n # A valid package with no releases\n with open(os.path.join(HERE, 'response_noreleases.json'), 'r') as fp:\n foo_response = fp.read()\n\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/foo/json',\n body=foo_response,\n content_type='application/json'\n )\n\n # An invalid package name\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/nope/json',\n status=404\n )\n responses.start()\n\n yield responses\n\n responses.stop()", "def load_fixture(request):\n if not settings.ALLOW_TEST_FIXTURE_SETUP:\n logger.warning(\n 'The `load_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n )\n raise Http404\n\n fixture = request.data['fixture']\n\n fixture_info = json.dumps(fixture, indent=4, sort_keys=True)\n logger.info(f'Loading fixture: {fixture_info}')\n\n with NamedTemporaryFile(suffix='.json') as tmp_file:\n tmp_file.write(json.dumps(fixture).encode())\n tmp_file.seek(0)\n\n call_command('loaddata', tmp_file.name)\n\n return Response(status=status.HTTP_201_CREATED)", "def fill_from_api_response(self, api_response):\n pass", "def test_fetch_templates__repeated_same_version(self) -> None:\n expected_data = {\n 'schema-version': 'v1',\n DOCUMENT_VERSION_KEY: 'x1x',\n 'gateway-templates': [],\n 'service-templates': [],\n }\n invoker = RunnableInvoker(self._tempdir)\n runner = data_store.DataStoreRunner(invoker.prepare_runnable([0, 30]), self._tempdir)\n with open(os.path.join(self.template_fetch_file), 'w') as f:\n json.dump(expected_data, f)\n\n # Make the call with no cached version. The call will exit with 0.\n data = runner.fetch_document('templates')\n self.assertEqual(expected_data, data)\n self.assertFalse(os.path.isfile(self.template_fetch_file))\n self.assertTrue(os.path.isfile(self.template_cache_file))\n\n # Make the call again with version x1x. The call will exit with 30.\n data = runner.fetch_document('templates')\n self.assertEqual(expected_data, data)\n self.assertFalse(os.path.isfile(self.template_fetch_file))\n self.assertTrue(os.path.isfile(self.template_cache_file))\n self.assertEqual(\n [\n [\n '--document=templates',\n '--action=fetch',\n '--previous-document-version=',\n '--action-file=' + self.template_fetch_file,\n '--api-version=1',\n ],\n [\n '--document=templates',\n '--action=fetch',\n '--previous-document-version=x1x',\n '--action-file=' + self.template_fetch_file,\n '--api-version=1',\n ],\n ],\n invoker.get_invoked_arguments(),\n )", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def parse(response):\n\n soup = bs.BeautifulSoup(response.text, 'lxml')\n title = str(soup.title.string)\n\n if title.find('Problem') == -1:\n raise Exception('Problem could not be found')\n\n test_cases_html = soup.find('div', class_='sample-test')\n\n # stores all input test cases.\n input_cases = get_sample_cases(test_cases_html, 'input')\n\n # stores all corresponding output test cases.\n output_cases = get_sample_cases(test_cases_html, 'output')\n\n # for a json with input and output test cases.\n test_cases = {}\n test_cases['input'] = input_cases\n test_cases['output'] = output_cases\n\n return test_cases", "def caches_mock(request):\n\n from unittest import mock\n from contextlib import ExitStack\n from dogpile.cache import make_region\n\n caches_to_mock = []\n expiration_time = 600\n\n params = __get_fixture_param(request)\n if params:\n caches_to_mock = params.get(\"caches_to_mock\", caches_to_mock)\n expiration_time = params.get(\"expiration_time\", expiration_time)\n\n with ExitStack() as stack:\n mocked_caches = []\n for module in caches_to_mock:\n region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)\n stack.enter_context(mock.patch(module, new=region))\n mocked_caches.append(region)\n\n yield mocked_caches", "def load_test_data(self):\n last_update = datetime.strptime(\n '2020-08-04T16:32:38.390390Z', DATETIME_FORMAT)\n self.task_data = [\n {\n 'id': '0xfakeTaskId',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update,\n 'name': 'TaskName',\n 'evidence_name': 'EvidenceName',\n 'report_data': '#### Fake Low priority Report\\n* Fake Bullet',\n 'report_priority': 80,\n 'request_id': '0xFakeRequestId',\n 'run_time': timedelta(minutes=1),\n 'saved_paths': ['/no/path/', '/fake/path'],\n 'status': 'This fake task executed',\n 'successful': True,\n 'requester': 'myuser',\n 'worker_name': 'fake_worker'\n }, {\n 'id': '0xfakeTaskId2',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update + timedelta(minutes=20),\n 'name': 'TaskName2',\n 'evidence_name': 'EvidenceName2',\n 'report_data': '#### Fake High priority Report\\n* Fake Bullet',\n 'report_priority': 10,\n 'request_id': '0xFakeRequestId',\n 'run_time': timedelta(minutes=5),\n 'saved_paths': ['/no/path/2', '/fake/path/2'],\n 'status': 'This second fake task executed',\n 'successful': True,\n 'requester': 'myuser',\n 'worker_name': 'fake_worker2'\n }, {\n 'id': '0xfakeTaskId3',\n 'instance': 'MyTurbiniaInstance',\n 'last_update': last_update,\n 'name': 'TaskName3',\n 'evidence_name': 'EvidenceName3',\n 'report_data': '',\n 'report_priority': 80,\n 'request_id': '0xFakeRequestId2',\n 'run_time': timedelta(minutes=3),\n 'saved_paths': ['/no/path/3', '/fake/path/3'],\n 'status': 'Third Task Failed...',\n 'successful': False,\n 'requester': 'myuser2',\n 'worker_name': 'fake_worker'\n }\n ] # yapf: disable", "def test_fetch_templates__success(self) -> None:\n expected_data = {\n 'schema-version': 'v1',\n DOCUMENT_VERSION_KEY: '1',\n 'gateway-templates': [],\n 'service-templates': [],\n }\n invoker = RunnableInvoker(self._tempdir)\n runner = data_store.DataStoreRunner(invoker.prepare_runnable([0]), self._tempdir)\n with open(self.template_fetch_file, 'w') as f:\n json.dump(expected_data, f)\n data = runner.fetch_document('templates')\n self.assertEqual(expected_data, data)\n self.assertEqual(\n [[\n '--document=templates',\n '--action=fetch',\n '--previous-document-version=',\n '--action-file=' + self.template_fetch_file,\n '--api-version=1',\n ]],\n invoker.get_invoked_arguments(),\n )\n self.assertTrue(os.path.isfile(self.template_cache_file))\n with open(self.template_cache_file, 'r') as f:\n self.assertEqual(expected_data, json.load(f))", "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def test_guidanceresponse_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"guidanceresponse-example.json\"\n inst = guidanceresponse.GuidanceResponse.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"GuidanceResponse\" == inst.resource_type\n\n impl_guidanceresponse_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"GuidanceResponse\" == data[\"resourceType\"]\n\n inst2 = guidanceresponse.GuidanceResponse(**data)\n impl_guidanceresponse_1(inst2)", "def mock_load_entities(tmpdir, mocker, study_generator):\n\n def _dry_load_entities(*args, **kwargs):\n \"\"\"\n Inserts KF IDs into output of _load_entities\n \"\"\"\n from kf_lib_data_ingest.app.settings.base import TARGET_API_CONFIG\n\n entity_type = args[0]\n loader = LoadStage(\n TARGET_API_CONFIG,\n settings.DATASERVICE_URL,\n [entity_type],\n study_generator.study_id,\n cache_dir=os.path.join(tmpdir, \"temp\"),\n dry_run=True,\n )\n loader.run({entity_type: args[1]})\n return loader.uid_cache\n\n mock = mocker.patch(\n \"creator.ingest_runs.genomic_data_loader.GenomicDataLoader.\"\n \"_load_entities\",\n side_effect=_dry_load_entities,\n )\n return mock", "def fixture_chunked_json_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_chunks = request.param.num_chunks\n chunk_size = request.param.chunk_size\n\n # Seed JSON data\n paths = [root / Path(f\"{idx}.json\") for idx in range(num_chunks)]\n for chunk_idx, path in enumerate(paths):\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n\n content = {str(chunk_idx + idx): chunk_idx + idx for idx in range(chunk_size)}\n with path.open(\"w\") as file:\n json.dump(content, file)\n\n return root" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Falls back to the default fixture names if no names could be determined up to this point.
def _fallback_fixture_names(self): if not self.request_name or not self.response_name: warnings.warn( "No name was specified for the recorded fixture. Falling " "back to default names.") if not self.request_name: self.request_name = __default_names__[0] if not self.response_name: self.response_name = __default_names__[1]
[ "def test_get_all_pytest_fixture_names(request):\n fixture_names = get_all_pytest_fixture_names(request.session, filter=test_get_all_pytest_fixture_names.__module__)\n clear_environment_fixture(fixture_names)\n assert fixture_names == ['make_synthesis', 'a_number_str', 'dummy']\n\n fixture_names = get_all_pytest_fixture_names(request.session, filter=test_foo)\n clear_environment_fixture(fixture_names)\n assert fixture_names == ['make_synthesis', 'a_number_str', 'dummy']", "def populate_fixtures():\n languages()\n words()", "def test_load_fixtures_again():\n test_load_fixtures()", "def pytest_runtest_setup(item):\n if hasattr(item, 'fixturenames') and LOOP_KEY not in item.fixturenames:\n item.fixturenames.append(LOOP_KEY)", "def _fixture_setup(self):\n pass", "def clear_environment_fixture(fixture_names):\n try:\n fixture_names.remove('environment')\n except ValueError:\n pass", "def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)", "def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()", "def start_fixture(self):\n pass", "def fixture_name(self):\n return \"genomic_delins\"", "def test_setup_fixture(fixture_setup_fail):\n pass", "def load_fixtures(fixtures=None):\n to_load = fixtures.split(' ') if fixtures else env.fixtures\n for fixture in to_load:\n manage(\"loaddata %s\" % fixture)", "def setUpFixture(self):\n pass", "def fixture_name(self):\n return \"coding_dna_substitution\"", "def generate_tests(self, fixture):\n if fixture.startswith(\"splunk_searchtime_fields\"):\n yield from self.dedup_tests(\n self.fieldtest_generator.generate_tests(fixture),\n fixture\n )\n elif fixture.startswith(\"splunk_searchtime_cim\"):\n yield from self.dedup_tests(\n self.cim_test_generator.generate_tests(fixture),\n fixture\n )", "def _find_first_full_test(self):\n for test in self._tests:\n if hasattr(test, 'fixtures'):\n return test\n raise AttributeError('no fixtures found')", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def fixture_name(self):\n return \"genomic_substitution\"", "def create_fixtures(self, app_name):\n\t\twith open(self.core_src_path + '/app/' + 'site_additional.json', 'r') as f:\n\t\t\tsite_additional = f.read()\n\t\twith open(self.project_path + '/' + app_name + '/' + 'fixtures/site_additional.json', 'w') as f:\n\t\t\tf.write(Template(site_additional).substitute(user_firstname=self.admin_name))\n\t\t# Home: Group, Application, Service, View, XpTemplate\n\t\t# Write fixture based on data\n\t\t# project_name, app_name, date_now, user_id, project_title, app_slug, group_id\n\t\t# date: \"2013-06-16T16:34:32\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate ics from days.
def generate_ics(days: Sequence[dict], filename: Text) -> None: cal = Calendar() cal.add("X-WR-CALNAME", "中国法定节假日") cal.add("X-WR-CALDESC", "中国法定节假日数据,自动每日抓取国务院公告。") cal.add("VERSION", "2.0") cal.add("METHOD", "PUBLISH") cal.add("CLASS", "PUBLIC") cal.add_component(_create_timezone()) days = sorted(days, key=lambda x: x["date"]) for fr, to in _iter_date_ranges(days): start = _cast_date(fr["date"]) end = _cast_date(to["date"]) + datetime.timedelta(days=1) name = fr["name"] + "假期" if not fr["isOffDay"]: name = "上班(补" + name + ")" cal.add_component(_create_event(name, start, end)) with open(filename, "wb") as f: f.write(cal.to_ical())
[ "def make_ics(occurrences=None,title=None):\n tz = pytz.timezone(settings.TIME_ZONE)\n\n name = \"%s @ %s\"%(title,settings.SITE_NAME)\n calObj = icalendar.Calendar()\n calObj.add('method', 'PUBLISH') # IE/Outlook needs this\n calObj.add('version','2.0')\n calObj.add('prodid', '-//%s calendar product//mxm.dk//'%settings.SITE_NAME)\n\n calObj.add('x-wr-calname', name)\n calObj.add('name', name)\n\n for occ in occurrences:\n vevent = icalendar.Event()\n start_dt = tz.localize(occ.start)\n start_dt = start_dt.astimezone(pytz.utc)\n\n vevent['uid'] = '%s%d'%(slugify(settings.SITE_NAME),occ.id)\n vevent.add('dtstamp', start_dt)\n vevent.add('dtstart', start_dt)\n if occ.end:\n end_dt = tz.localize(occ.end)\n end_dt = end_dt.astimezone(pytz.utc)\n vevent.add('dtend', end_dt)\n\n vevent.add('summary', occ.name)\n vevent.add('url', '%s%s'%(settings.SITE_URL, occ.get_absolute_url()))\n vevent.add('class', 'PUBLIC')\n vevent.add('x-microsoft-cdo-importance', '1')\n vevent.add('priority', '5')\n vevent.add('description', occ.description)\n vevent.add('room', str(occ.get_room()))\n\n calObj.add_component(vevent)\n\n return calObj", "def ics_generator(new_class):\n total_dates = new_class.date()\n new_cal = Calendar()\n new_cal.add('summary', new_class.code + \" \" + new_class.name)\n counter = 1\n for date in total_dates:\n event = Event()\n event[\"dtstart\"] = date + \"T\" + new_class.start_time + \"00\"\n event[\"dtend\"] = date + \"T\" + new_class.end_time + \"00\" \n event[\"summary\"] = new_class.code + \" \" + new_class.name + \" lesson \" + str(counter)\n new_cal.add_component(event)\n counter += 1\n f = open('course_schedule.ics', 'wb')\n f.write(new_cal.to_ical())\n f.close()", "def generate_days(ndays, year=2022, month=1, day=1):\n # NOTE: This method is more efficient than the \"string parsing\"\n # method used by generate_months() and generate_years(),\n # but this only matters if generating a lot of entries\n # and it only works if the datetime64-represented\n # distance between units to generate is constant\n day_indexes = np.arange(ndays, dtype=np.int64) # 0, 1, ..., [ndays-1]\n startdate = np.datetime64(f'{year:02d}-{month:02d}-{day:02d}T00:00:00.000000', 'us')\n usec_per_day = int(1e6) * 86400 # 86.4k sec per day = 60*60*24s\n usec_offsets = day_indexes * usec_per_day\n return usec_offsets + startdate", "def as_ical(self):\n if self.date_is_approximate:\n return None\n\n ymd = (self.date.year, self.date.month, self.date.day)\n event_date = date(*ymd)\n event = icalendar.Event()\n event.add(\"dtstart\", event_date)\n event.add(\"dtend\", event_date + timedelta(days=1))\n event.add(\"uid\", self.ical_uid)\n event.add(\"summary\", \"Django Girls %s\" % self.city)\n event.add(\"location\", f\"{self.country}, {self.city}\")\n return event", "def list_days_generator(year, month, iday, fday):\n ldays = []\n for v in range(iday, fday+1):\n ldays.append(\"%d%d%02d\" % (year, month, v))\n return ldays", "def gen_dates(train_per_start, hours_inc=48, n_inc=10, hours1_inc=6, n1_inc=4):\n dates = []\n train_per = train_per_start[:]\n for i_inc in range(n_inc):\n # '2014-06-24 01:00:00','2014-06-30 00:00:00'\n train_per1 = train_per[:]\n for i1_inc in range(n1_inc):\n dates.append(train_per1[:])\n train_per1[0] = add_hour(train_per1[0], hours1_inc)\n train_per1[1] = add_hour(train_per1[1], hours1_inc)\n train_per[0] = add_hour(train_per[0], hours_inc)\n train_per[1] = add_hour(train_per[1], hours_inc)\n return dates", "def generate_days(self, nr_of_days):\n log = []\n names = self.load_names()\n\n for i in range(0, nr_of_days):\n log.extend(self.generate_day_cycle(names))\n\n return log", "def generate_days_list():\n\n seven_days = []\n\n for i in xrange(1, 8):\n seven_days.append([i, 0])\n\n return seven_days", "def generate_day_cycle(self, names):\n day_log = []\n time_delta = timedelta(days=1)\n\n for i in range(0, len(self.HOUR_SHEET)):\n if self.is_time_for_bruteforce(i):\n day_log.extend(self.generate_brute_force_log(i, names))\n\n day_log.extend(self.generate_hour_cycle(i, names))\n\n day_log.sort()\n\n self.date += time_delta\n\n return day_log", "def get_day_iterations(cls, day=None):\n if day is None:\n day = date.today()\n\n return Word.select().join(Iteration).where(\n Iteration.date == day,\n ).group_by(Word)", "def make_group(self, prefix, rotation_days=7, n=None):\n doctors = []\n if n is None:\n l = len(self.daypattern)\n if l % rotation_days != 0:\n raise ValueError(\n \"daypattern length ({}) is not a multiple of \"\n \"rotation_days ({})\".format(l, rotation_days))\n n = l // rotation_days\n for i in range(n):\n doctors.append(self.copy(prefix + str(i + 1), rotate=i * 7))\n return doctors", "def calendar_list(self, start_date=\"\", end_date=\"\", weekday_str=\"\"):\n\n try:\n return [\n int(weekday_str[0]), # Monday\n int(weekday_str[1]), # Tuesday\n int(weekday_str[2]), # Wednesday\n int(weekday_str[3]), # Thursday\n int(weekday_str[4]), # Friday\n int(weekday_str[5]), # Saturday\n int(weekday_str[6]), # Sunday\n start_date, # Start\n end_date, # End\n ]\n\n except ValueError:\n logging.error(\n \"Failed to create service calendar on line %s of %s\",\n self.line_num,\n self.base_filename,\n )\n return ([0] * 7) + ([(\" \" * 8)] * 2)", "def daily_entropy(days, g):\n if g == 'good':\n color = 'rgba(119, 152, 191, .5)'\n else:\n color = 'rgba(223, 83, 83, .5)'\n de = list()\n for dev, dates in days.items():\n for d in dates:\n e = get_entropy(d[1])\n d[1] = e\n e_sorted = sorted(dates, key=lambda x: datetime.strptime(x[0], '%Y/%m/%d'))\n de.append({'name' : dev, 'color' : color, 'data' : e_sorted})\n return de", "def draw_day(day):\n\n day_drawing = \"\"\n for i in day:\n for j in i:\n day_drawing += j\n return day_drawing", "def generate_date_set(self, year, month, days):\n dates = set()\n for day in days:\n dates.add(date(year, month, day))\n return dates", "def write_ascii_days(self,dpath,fname_fmt='%Y%m%d.dat'):\n days = np.unique(self.t.date)\n print('Days in data:',[str(day) for day in days])\n surface_date = self.surface.index.date\n profile_date = self.profile.index.date\n for day in days:\n surf = self.surface.loc[surface_date == day]\n prof = self.profile.loc[profile_date == day]\n fname = day.strftime(fname_fmt)\n fpath = os.path.join(dpath,fname)\n if os.path.isfile(fpath):\n print('Skipping existing file',fname)\n continue\n self.write_ascii(fpath, surface=surf, profile=prof)", "def daily_images(self, day, **kwargs):\n start = datetime(day.year, day.month, day.day)\n end = datetime(day.year, day.month, day.day, 23, 59, 59, 999999)\n for img in self.iter_images(start, end, **kwargs):\n yield img", "def _getCalendarEntries(self, days=1):\n Outlook = win32com.client.Dispatch(\"Outlook.Application\")\n ns = Outlook.GetNamespace(\"MAPI\")\n appointments = ns.GetDefaultFolder(9).Items\n #print(\"{appointments} : {ns} : {ns.GetDefaultFolder(9)}\")\n appointments.Sort(\"[Start]\")\n appointments.IncludeRecurrences = \"True\"\n today = datetime.today()\n begin = today.date().strftime(\"%d/%m/%Y\")\n tomorrow= timedelta(days=days)+today\n end = tomorrow.date().strftime(\"%d/%m/%Y\")\n appointments = appointments.Restrict(\"[Start] >= '\" +begin+ \"' AND [END] <= '\" +end+ \"'\")\n def convert_time(wintime):\n # Apparently the timezone information is incorrect. It is always +00:00 even thou the time\n # is in local time\n return LOCAL_TIMEZONE.localize(datetime.fromisoformat(str(wintime)[:-6])).timestamp()\n return [(convert_time(a.Start), a.Subject, a.Sensitivity) for a in appointments]", "def visualize_days():\n\t\n\t#grab our parsed data that we parsed earlier\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in the parsed data, and count how many incidents happen on each\n\t#day of the week\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file)\n\t\n\t#separate the x-axis data (days of the week) from the counter variable\n\t#from the y-axis (number of incidents each day)\n\tdata_list = [\n\t\t\t\tcounter[\"Monday\"],\n\t\t\t\tcounter[\"Tuesday\"],\n\t\t\t\tcounter[\"Wednesday\"],\n\t\t\t\tcounter[\"Thursday\"],\n\t\t\t\tcounter[\"Friday\"],\n\t\t\t\tcounter[\"Saturday\"],\n\t\t\t\tcounter[\"Sunday\"]\n\t\t\t\t]\n\tday_tuple = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\t\n\t#with y-axis data, assign it to a matplotlib plot instance\n\tplt.plot(data_list)\n\t\n\t#create amount of ticks need for x and y axes and assign labels\n\tplt.xticks(range(len(day_tuple)), day_tuple)\n\t\n\t#save the plot\n\tplt.savefig(\"Days.png\")\n\t\n\t#close plot file\n\tplt.clf()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get user profile Fetches from the user collection by using the user's email as key.
def get_user_profile(email): # GET # NOTE: This method previously called LCS with director credentials in order to retrieve the user's name # We will update TeamRU to store names along with our user objects, saving the need to call LCS again user_profile = coll("users").find_one({"_id": email}) if not user_profile: return {"message": "User not found"}, 404 user_profile["user_id"] = user_profile.pop("_id") return user_profile, 200
[ "def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile", "def get_user(cls, email=None, user_id=None):\n\n params = {'email': email, 'user_id': user_id}\n user_dict = cls._do_call(\n 'GET', cls.api_endpoint + 'users', params=params)\n return user_dict", "def get_user_by_email(self, strategy, email):\r\n return strategy.storage.user.user_model().objects.get(email=email)", "def _getProfileFromUser(self):\n # make sure user is authed\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # get Profile from datastore\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n\n return profile # return Profile", "def get_user_by_email(self, email):\n query = \"SELECT * FROM users WHERE email=%s\"\n response = Database().fetch_one(query, email)\n return response", "def get_user_by_email(email: str) -> dict:\n for user in get_current_users():\n if user['email'] == email:\n return user\n return {}", "def get_user(self, email):\n return run_transaction(\n self.sessionfactory,\n lambda session: get_user_txn(session, email))", "def get_user(self, email) -> object:\n # TODO: select all items that are needed, except pwd, and don't delete pwd later\n cursor, connection = self.get_cursor()\n del cursor\n connection.row_factory = sqlite3.Row\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM users WHERE email=?\", (email,))\n udict = dict(cursor.fetchall()[0])\n\n del connection.row_factory\n cursor.close()\n connection.close()\n del udict[\"pwd_and_salt\"]\n\n return User(udict.values())", "def get_user_from_current_db(self, email):\n return self.db['users'].find_one({'email': email})", "def get_user_by_email(email):\n for user in USERS:\n if user.email == email:\n return user", "def get_user_by_email(email):\r\n\r\n return User.query.filter(User.email == email).first()", "def retrieve_user_from_db_by_email(email):\n # Query db for user with those params\n query = \"\"\"\n SELECT user_id, username, email, password FROM users\n WHERE users.email = '{}'\"\"\".format(email)\n\n return database.select_from_db(query)", "def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile", "def get_user_profile(self):\n return self.user.profile", "def get_profile(self):\n user = (User.objects.filter(Q(email=self.username) |\n Q(username=self.username)))[:1]\n\n if user:\n # Yes, sometimes the User exists but the UserProfile doesn't.\n # See: https://bugzilla.mozilla.org/show_bug.cgi?id=699234\n try:\n profile = user[0].get_profile()\n except ObjectDoesNotExist, e:\n statsd.incr('user.errors.profile_doesnotexist')\n log.warning(e)\n\n profile = UserProfile.objects.create(user=user[0])\n else:\n statsd.incr('user.errors.doesnotexist')\n log.warning('No user with email %s' % self.username)\n\n user = User(username=self.username, email=self.username)\n user.set_unusable_password()\n user.save()\n\n profile = user.get_profile()\n\n return profile", "def _getProfileFromUser(self):\n # make sure user is authed\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # get Profile from datastore\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get(use_cache=False, use_memcache=False)\n \n # create new Profile if not there\n if not profile:\n\n #generate initial BalanceHistory \n mrbhk = self._generateInitialBalanceHistory(p_key)\n \n #pupulate new ndb Profile entity\n profile = Profile(\n key = p_key,\n userId = user_id,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n MostRecentBalanceHistoryKey = mrbhk,\n )\n \n #if profile exists make it's balance history current \n #and update it's mosrt recent balance history key\n else:\n\n mrbh = self._MakeBalanceHistCurrent(profile)\n\n profile.MostRecentBalanceHistoryKey = mrbh.key\n\n #save Profile entity to Datastore \n profile.put() \n\n # return ndb Profile entity object\n return profile", "def find_by_email(cls, email):\n for user in cls.all:\n if user.email == email:\n return user", "def get_user_by_email(data, email):\n\n if isinstance(data, list):\n for user in data:\n if user.get(\"email\") == email:\n return user\n else:\n for key, val in data.items():\n if key == \"email\" and val == email:\n return data\n return None", "def lookup_email(email):\n user = User.objects(email=email).first()\n return user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create user profile Creates a new user profile from the user email, skills, prizes, and other fields.
def create_user_profile(email, **kwargs): # POST user_exists = coll("users").find_one({"_id": email}) if user_exists: return {"message": "User already exists"}, 400 # NOTE Doesn't make sense for a person to have prizes only a team should have this coll("users").insert_one( { "_id": email, "skills": kwargs["skills"], "prizes": kwargs["prizes"], "bio": kwargs["bio"], "github": kwargs["github"], "interests": kwargs["interests"], "seriousness": kwargs["seriousness"], "team_id": "", "hasateam": False, } ) return {"message": "User profile successfully created"}, 201
[ "def create_profile():\n if g.user is not None or 'openid' not in session:\n return redirect(url_for('index'))\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n if not name:\n flash(u'Error: you have to provide a name')\n elif '@' not in email:\n flash(u'Error: you have to enter a valid email address')\n else:\n user = User(name=name, email=email, openid=session['openid'])\n user.put()\n flash(u'Profile successfully created')\n return redirect(oid.get_next_url())\n return render_template('/login/create_profile.html', next_url=oid.get_next_url())", "def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n profile = UserProfile()\n profile.user = instance\n profile.save()", "def create_user_profile(instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def CreateProfile(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create_profile(self, user):\r\n salt = sha_constructor(str(random.random())).hexdigest()[:5]\r\n activation_key = sha_constructor(salt+user.username).hexdigest()\r\n return self.create(user=user,\r\n activation_key=activation_key)", "def create_my_profile(\n body: Optional[UserProfilePrivateCreate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def create_profile(request):\n try:\n profile_obj = request.user.profile\n return redirect(\"profiles_edit_profile\")\n except ObjectDoesNotExist:\n pass\n\n form = ProfileForm(request.POST or None, request.FILES or None, request=request)\n if form.is_valid():\n profile_obj = form.save(commit=False)\n profile_obj.user = request.user\n profile_obj.save()\n\n if hasattr(form, \"save_m2m\"):\n form.save_m2m()\n return redirect(\"profiles_my\")\n return {\"form\": form}", "def create_profile(self, user, invitation_hash = None):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n invitation_hash = invitation_hash\n \n return self.create(user=user,\n activation_key=activation_key,invitation_hash=invitation_hash)", "def create_profile(self,user):\n salt= sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n\n return RegistrationProfile(user=user,\n activation_key=activation_key)", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def createUserProfile(sender, instance, **kwargs):\n UserProfile.objects.get_or_create(user=instance)", "def create(cls, **kwargs):\n if \"user\" not in kwargs:\n with mute_signals(post_save):\n profile = ProfileFactory.create()\n kwargs[\"user\"] = profile.user\n return super().create(**kwargs)", "def create_user(request, client, email):\n username = slugify(client.name)\n password = generate_password()\n user_count = User.objects.filter(username__icontains=username).count()\n if user_count:\n # This case may fail when somebody delete a user\n # and the user count is > 1\n username = '{}_{}'.format(username, user_count)\n user = User.objects.create(username=username, email=email,\n password=password, is_active=False)\n up = user.userprofile\n up.user_type = u'CL'\n up.employee_id = client.name\n up.client_id = client.id\n up.save()\n email_client_creation(request, client, client.reg_name, username, password)\n return user", "def create_user_profile_callback(sender, instance, created, **kwargs):\n try:\n instance.get_profile()\n except UserProfile.DoesNotExist:\n UserProfile.objects.create(user=instance)", "def create_profile_of_user(sender, instance, created, **kwargs):\n if created:\n RevolvUserProfile.objects.get_or_create(user=instance)", "def create_or_update_user_profile(sender, instance, created, **kwargs):\n _, created = UserProfile.objects.get_or_create(user=instance)\n if created and instance.email != \"\":\n instance.profile.email = instance.email\n instance.profile.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolver should be able to produce a value for a given key. If key doesn't exist, should return None.
def resolve(self, key: str) -> Optional[Any]: pass
[ "def resolve(self, key: str) -> Optional[Any]:\n return self.dict.get(key)", "def _get(self, key):\n key_in_st = []\n self._check_key(key_in_st, key, self._keys, 0, self._size - 1)\n if key_in_st[0]:\n return self._vals[key_in_st[1]]\n else:\n return None", "def get_value(data, key):\n if key in data:\n return data[key]\n return None", "def get(self, key, fallback):\r\n try:\r\n return self[key]\r\n except (KeyError, IndexError):\r\n return fallback", "def try_get(self, key):\n\t\tif key not in self.db:\n\t\t\treturn None\n\n\t\treturn self.db[key]", "def get_value(doc, key):\n\n value = doc.get(key)\n if value is None:\n return None\n elif len(value) == 0:\n return None\n else:\n return value[0]", "def get(self, key):\n index1 = self.hash_index(key)\n if self.data[index1] is not None:\n return self.data[index1].find(key)\n return None", "def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None", "def find_key_value(self, key):\n lst = self.hash_table[self.hash_gen(key)]\n if lst != None:\n return lst.find_value(key)\n return None", "def __getitem__(self, key):\n value = self.get(key, default=None)\n if value == None:\n raise KeyError, key\n else:\n return value", "def __getitem__(self, key):\n try:\n return self.args[key]\n except KeyError:\n return None # does not exist is the same as None, gracefull catch", "def resolve_resolver_value(self, resolver: \"Resolver\") -> Any:\n try:\n return resolver.resolve()\n except RecursiveResolve:\n # Recursive resolve issues shouldn't be masked by a placeholder.\n raise\n except Exception:\n if are_placeholders_enabled():\n placeholder_value = create_placeholder_value(\n resolver, self.placeholder_type\n )\n\n self.logger.debug(\n \"Error encountered while resolving the resolver. This is allowed for the current \"\n f\"operation. Resolving it to a placeholder value instead: {placeholder_value}\"\n )\n return placeholder_value\n raise", "def get_item(self, key: int) -> Any:\n hash_value = key % self.table_size\n vals = self.hash_table[hash_value]\n if self.hash_table[hash_value] is None or not self.hash_table[hash_value]:\n raise LookupError\n for i in vals:\n if i[0] == key:\n return i[1]\n raise LookupError", "def resolve(self, section, key):\n\n return self.sections[section][key]", "def _GetValue(self, key):\n pass", "def get(self, key, default=None):\n key = self._validate_key(key)\n sql = u\"\"\"\n SELECT `value` FROM `{table}` WHERE key = ?\n \"\"\".format(table=self.name)\n\n r = self.conn.execute(sql, (key,)).fetchone()\n\n if r:\n return self.convert_out(r['value'])\n\n return default", "def get_item(self, key):\n try:\n return self.dict[key]\n except KeyError:\n if self.exceptions:\n raise KeyError\n return None", "def get(self, key: str, default=None):\n if key in self.params:\n return self.params.get(key)\n if key in self.results:\n return self.results.get(key).value\n return default", "def lookup(key, default=None):\n def _lookup(mapping):\n return mapping.get(key, default)\n return _lookup" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolver should be able to produce a value for a given key. If key doesn't exist, should return None.
def resolve(self, key: str) -> Optional[Any]: return self.dict.get(key)
[ "def resolve(self, key: str) -> Optional[Any]:\n pass", "def _get(self, key):\n key_in_st = []\n self._check_key(key_in_st, key, self._keys, 0, self._size - 1)\n if key_in_st[0]:\n return self._vals[key_in_st[1]]\n else:\n return None", "def get_value(data, key):\n if key in data:\n return data[key]\n return None", "def get(self, key, fallback):\r\n try:\r\n return self[key]\r\n except (KeyError, IndexError):\r\n return fallback", "def try_get(self, key):\n\t\tif key not in self.db:\n\t\t\treturn None\n\n\t\treturn self.db[key]", "def get_value(doc, key):\n\n value = doc.get(key)\n if value is None:\n return None\n elif len(value) == 0:\n return None\n else:\n return value[0]", "def get(self, key):\n index1 = self.hash_index(key)\n if self.data[index1] is not None:\n return self.data[index1].find(key)\n return None", "def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None", "def find_key_value(self, key):\n lst = self.hash_table[self.hash_gen(key)]\n if lst != None:\n return lst.find_value(key)\n return None", "def __getitem__(self, key):\n value = self.get(key, default=None)\n if value == None:\n raise KeyError, key\n else:\n return value", "def __getitem__(self, key):\n try:\n return self.args[key]\n except KeyError:\n return None # does not exist is the same as None, gracefull catch", "def resolve_resolver_value(self, resolver: \"Resolver\") -> Any:\n try:\n return resolver.resolve()\n except RecursiveResolve:\n # Recursive resolve issues shouldn't be masked by a placeholder.\n raise\n except Exception:\n if are_placeholders_enabled():\n placeholder_value = create_placeholder_value(\n resolver, self.placeholder_type\n )\n\n self.logger.debug(\n \"Error encountered while resolving the resolver. This is allowed for the current \"\n f\"operation. Resolving it to a placeholder value instead: {placeholder_value}\"\n )\n return placeholder_value\n raise", "def get_item(self, key: int) -> Any:\n hash_value = key % self.table_size\n vals = self.hash_table[hash_value]\n if self.hash_table[hash_value] is None or not self.hash_table[hash_value]:\n raise LookupError\n for i in vals:\n if i[0] == key:\n return i[1]\n raise LookupError", "def resolve(self, section, key):\n\n return self.sections[section][key]", "def _GetValue(self, key):\n pass", "def get(self, key, default=None):\n key = self._validate_key(key)\n sql = u\"\"\"\n SELECT `value` FROM `{table}` WHERE key = ?\n \"\"\".format(table=self.name)\n\n r = self.conn.execute(sql, (key,)).fetchone()\n\n if r:\n return self.convert_out(r['value'])\n\n return default", "def get_item(self, key):\n try:\n return self.dict[key]\n except KeyError:\n if self.exceptions:\n raise KeyError\n return None", "def get(self, key: str, default=None):\n if key in self.params:\n return self.params.get(key)\n if key in self.results:\n return self.results.get(key).value\n return default", "def lookup(key, default=None):\n def _lookup(mapping):\n return mapping.get(key, default)\n return _lookup" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines the number of files each node will process in scatter gather environment
def number_of_files_per_node(files, number_of_nodes): files_per_node = float(len(files))/float(number_of_nodes) if files_per_node > 0.: return int(math.floor(files_per_node)) else: return int(math.ceil(files_per_node))
[ "def get_number_of_processors_used(global_to_local_fpath):\n data = np.loadtxt(global_to_local_fpath, dtype='i4')\n n_proc = np.amax(data[:,1]) + 1\n return n_proc", "def fileCount(self):\n pass", "def num_partitions(self): # -> int:\n ...", "def file_engine_node_count(self) -> int:\n return pulumi.get(self, \"file_engine_node_count\")", "def get_num_files(self):\r\n return self.nfile", "def get_n_files_total(file_list):\n\n\t# Get the base path of the files\n\ts = file_list[0].split('/')\n\tpath = os.path.join(*s[:-2])\n\t# Initialize n_files with 0\n\tnfiles = 0\n\t# Loop over all files in file_list\n\tfor f in file_list:\n\t\t# Split the filename to retrieve the set number and running number\n\t\tsplitted = f.split(\"/\")[-1].split(\".\")[0].split(\"_\")\n\t\t# Reconstruct the file name of the pickle file that was used to create\n\t\t# the hdf5 file\n\t\tfile_name = \"i3Files_MC_{}_{}.pickle\".format(splitted[3], splitted[-1])\n\t\t# Combine base path and pickle name\n\t\tprep_path = os.path.join(path, \"prep_files\", file_name)\n\t\t# Open the pickle file and retrieve the number of i3files\n\t\twith open(prep_path) as p:\n\t\t\td = pickle.load(p)\n\t\t# Add number of i3files to total number of i3 files\n\t\tnfiles += len(d[\"i3_list\"])-1\n\treturn float(nfiles)", "def number_of_workers():\n return (cpu_count() * 2) + 1", "def fileCounter(directory):", "def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1", "def get_num_chunks(self) -> int:", "def num_chunking_units(self):\n if self._source_paths:\n return len(self._source_paths)\n return 1", "def file_stats(file_pairs):\n loc = 0\n nfiles = 0\n nsuites = 0\n ntests = 0\n for path, filename in file_pairs:\n loc += int(os.popen('wc -l '+path+'/'+filename).read().split()[0])\n nfiles += 1\n if (filename[:4] == 'Test'):\n nsuites+=1\n ntests+= int(os.popen('egrep -c -i \"void\\s+Test\" '+path+'/'+filename).read().split()[0])\n return (nfiles, loc, nsuites, ntests)", "def num_processes():\n return 1", "def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size", "def n_files(self):\n return len(self._library)", "def total_files_to_process(self) -> float:\n return pulumi.get(self, \"total_files_to_process\")", "def calculate_train_cluster_sizes():\n return calculate_cluster_sizes(os.path.expanduser(\"resources/legal_filter_train\"))", "async def num_fomod_files_to_install(self):\n n = 0\n for f in self.fomod.files_to_install:\n if f.type == \"folder\":\n n += await self.count_folder_contents(f.source)\n else:\n n += 1\n\n return n", "def __computer_number_of_samples__(self):\n\n slice_count = []\n for ii in self.imgs_list:\n with h5py.File(ii, 'r') as f:\n aux = f['data'].shape[0]\n slice_count.append(aux - (self.crop[1] + self.crop[0]))\n\n slice_count = np.array(slice_count)\n return slice_count.sum(),slice_count.cumsum()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to Slack and validate the response
def slack_request(url: str, headers: dict, data: dict) -> dict: logger.debug(f'\nSending request to Slack API using {url}') response = requests.post(url=url, headers=headers, data=data) if response.status_code != 200: logger.error(f'Got status {r.status_code} while trying to post to the slack url {url}.') # todo: check for error details, since their reponse format is not always consistent then converting to json # doesn't work all the time. #data = response.json() #if not data['ok']: # logger.error(f"Got the following errors back from slack: {data}") return response
[ "def slack_slash_request():\n try: \n if not is_slack_request_valid(\n ts=request.headers[\"X-Slack-Request-Timestamp\"],\n body=request.get_data().decode(\"utf-8\"), \n signature=request.headers[\"X-Slack-Signature\"],\n signing_secret=SLACK_SIGNING_SECRET):\n print(\"ERROR: Invalid Slack request\")\n abort(400)\n\n # get token for current workspace\n team_id = request.form.get(\"team_id\")\n user_id = request.form.get(\"user_id\")\n\n if team_id is not None and user_id is not None: \n return Response(\n slack_create_main_menu(team_id, user_id).get_json(), \n mimetype='application/json'\n )\n \n except Exception as error:\n print(\"ERROR: \", error) \n response_json = {\n \"text\": \"An internal error has occurred\"\n }\n raise error\n # return Response(response_json, mimetype='application/json')\n\n return \"\"", "def slash_response(payload):\n\n # get the full request from Slack\n slack_request = request.form\n print(slack_request)\n # starting a new thread for doing the actual processing \n x = threading.Thread(\n target=ai_message,\n args=(slack_request,payload,)\n )\n x.start()\n\n ## respond to Slack with quick message\n # and end the main thread for this request\n return \"Let me think.... Please wait :robot_face:\"", "def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def __call(self, headers, method, data):\n url = 'https://slack.com/api/'+method\n req = requests.post(\n url=url,\n data=data,\n headers=headers\n )\n return req", "def slack_api_call(self, *args, **kwargs):\n if self.slack_client is None:\n return {\"ok\": False, \"error\": \"Slack client does not exist\"}\n else:\n try:\n resp = self.slack_client.api_call(*args, **kwargs)\n return resp\n except Exception as e:\n # leaving error/warning logging to callers, only debug log here.\n log.debug(f\"Exception encountered when making Slack api call: {e}\")\n return {\"ok\": False, \"error\": f\"{type(e).__name__}: {e}\"}", "def slack_api(params, method='chat.postMessage'):\n SLACK_API = 'https://slack.com/api/' + method\n\n # url = SLACK_API + urllib.urlencode(params)\n #\n # print(\"URL:\", url)\n # content = urllib2.urlopen(url)\n\n r = requests.get(SLACK_API, params=params)\n\n return r.text", "def post(self):\n send_slack_log('Entered /slack/submit')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n if request.form.get('payload') is None:\n send_slack_log('Invalid request: no payload')\n return\n else:\n return handle_interaction(json.loads(request.form['payload']))", "def send_message_to_slack(text):\n\n\n try:\n post = {\n \"text\": \":fire: :sad_parrot: An error has occured in the *Athena \\\n Partition Maintenace* pod :sad_parrot: :fire:\",\n \"attachments\": [\n {\n \"text\": \"{0}\".format(text),\n \"color\": \"#B22222\",\n \"attachment_type\": \"default\",\n \"fields\": [\n {\n \"title\": \"Priority\",\n \"value\": \"High\",\n \"short\": \"false\"\n }\n ],\n \"footer\": \"Kubernetes API\",\n \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n }\n ]\n }\n\n ssm_param_name = 'slack_notification_webhook'\n ssm = boto3.client('ssm', config=CONFIG)\n try:\n response = ssm.get_parameter(Name=ssm_param_name, WithDecryption=True)\n except ClientError as err:\n if err.response['Error']['Code'] == 'ParameterNotFound':\n LOGGER.info('Slack SSM parameter %s not found. \\\n No notification sent', ssm_param_name)\n return\n else:\n LOGGER.error(\"Unexpected error when attempting to get Slack webhook URL: %s\", err)\n return\n if 'Value' in response['Parameter']:\n url = response['Parameter']['Value']\n\n json_data = json.dumps(post)\n req = urllib.request.Request(\n url,\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n LOGGER.info('Sending notification to Slack')\n response = urllib.request.urlopen(req)\n\n else:\n LOGGER.info('Value for Slack SSM parameter %s not found. \\\n No notification sent', ssm_param_name)\n return\n\n except Exception as err:\n LOGGER.error(\n 'The following error has occurred on line: %s',\n sys.exc_info()[2].tb_lineno)\n LOGGER.error(str(err))", "def is_request_valid(request: request) -> bool:\n \n key = os.environ.get(\"SLACK_SIGNING_SECRET\")\n basestring = 'v0:' + request.headers['X-Slack-Request-Timestamp'] + ':' + str(request.get_data(), 'utf-8')\n\n # Hash the basestring using the signing secret as the key in order to get the signature\n signature = 'v0=' + hmac.new(\n bytes(key, 'utf-8'),\n bytes(basestring, 'utf-8'),\n hashlib.sha256\n ).hexdigest()\n slacksig = request.headers['X-Slack-Signature']\n\n # If the signature is equal to the signature sent by slack, then it is indeed from slack.\n return hmac.compare_digest(slacksig, signature)", "def test_slackP_send(get_slackpost, capsys):\n s = get_slackpost\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def _silent_request(*args, **kwargs):\n try:\n return _request(*args, **kwargs)\n except SlackError:\n logger.exception('Slack API Error')\n except HTTPError as e:\n status_code = e.response.status_code\n # If we're being rate-limited, log the exception but don't fail.\n if status_code == 429:\n logger.exception('Slack API rate limit')\n else:\n raise", "def test_slack_challenge_response(self, verify_signature_mock):\n verify_signature_mock.return_value = True\n challenge = 'CHALLENGE_STRING'\n resp = self.client.post(\n '/events',\n headers=self._headers(),\n json={\n 'token': 'unYFPYx2dZIR4Eb2MwfabpoI',\n 'type': 'challenge',\n 'challenge': challenge,\n },\n )\n # self.assertEqual(resp.status_code, responses.OK.code)\n # self.assertEqual(resp.data.decode('utf-8'), challenge)\n responses.assertResponse(self, ({'challenge': challenge}, 200), resp)", "def slash_command():\n form_text = request.form[\"text\"]\n \n if len(form_text) > 0:\n data = {\n \"response_type\": \"in_channel\",\n \"text\": \"My response\",\n }\n else:\n \"\"\"\n If the user didn't type a message send a note that only\n they see about typing a message\n \"\"\"\n data = {\n \"response_type\": \"ephemeral\",\n \"text\": \"Error: No status message entered. Please try again.\",\n }\n\n \"\"\"\n Create the response object to send to Mattermost with the\n data object written as json, 200 status, and proper mimetype\n \"\"\"\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response", "def bot_request():\n\n logger.debug('Received request, post data: {0}'.format(\n request.get_data()))\n\n if not viber.verify_signature(request.get_data(), request.headers.get(\n 'X-Viber-Content-Signature')):\n return Response(status=403)\n\n viber_request = viber.parse_request(request.get_data())\n\n if isinstance(viber_request, ViberConversationStartedRequest):\n cache.conversation_started(viber_request.user.id,\n viber_request.user.name)\n send_message(viber_request.user.id,\n 'Hello, {}!\\n\\n{}'.format(\n viber_request.user.name, command_help()))\n elif isinstance(viber_request, ViberMessageRequest):\n if not check_user_id(viber_request):\n return Response(status=403)\n handle_viber_request(viber_request)\n elif isinstance(viber_request, ViberSubscribedRequest):\n logger.info('User \"{}\" subscribed as user id \"{}\"'.format(\n viber_request.user.name, viber_request.user.id))\n cache.subscribe_user(viber_request.user.id, viber_request.user.name)\n send_message(viber_request.user.id,\n 'Hello, {}!\\n\\n{}'.format(\n viber_request.user.name, command_help()))\n elif isinstance(viber_request, ViberUnsubscribedRequest):\n logger.info('User id \"{}\" un-subscribed'.format(viber_request.user_id))\n cache.unsubscribe_user(viber_request.user_id)\n elif isinstance(viber_request, ViberFailedRequest):\n logger.warning('Client failed receiving message, failure: '\n '{0}'.format(viber_request))\n\n return Response(status=200)", "def test_bot_message():\n send_json_message_to_bot(request.get_json())\n return \"ok\"", "def slack_message(message, webhook_url):\n webhook = WebhookClient(webhook_url)\n response = webhook.send(text=message)\n return response", "def send_request(self, request):\r\n try:\r\n rv = self.api.request(request)\r\n # print(json.dumps(rv, indent=2))\r\n return rv\r\n except oandapyV20.exceptions.V20Error as err:\r\n print(request.status_code, err)\r\n return 1", "def flask_slack_test():\n _log('@channel: slack is working?')\n return 'slack test'", "def do_GET(self): # pylint: disable=invalid-name\n parsed_url = urlparse(self.path)\n parsed_query = parse_qs(parsed_url.query)\n\n helper.log_info(f'Incoming request from {self.client_address[0]} - {self.path}')\n\n # Strava webhook expects a reply with the hub.challenge parameter\n challenge = parsed_query['hub.challenge'][0]\n request_verify_token = parsed_query['hub.verify_token'][0]\n\n # Respond with hub.challenge parameter if verify_token is correct\n if request_verify_token == verify_token:\n self.write_response(200, {\"hub.challenge\": challenge})\n else:\n self.write_empty_response(400)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a report about stale branches for a list of repositories.
def check_stale_branches(event: dict, context) -> dict: ssm_parameters = load_params('dev_tools', 'dev') if 'jira_statuses_for_task_completion' in ssm_parameters and ssm_parameters['jira_statuses_for_task_completion']: jira_statuses_for_task_completion = ssm_parameters['jira_statuses_for_task_completion'] else: jira_statuses_for_task_completion = ('Resolved', 'Closed') repository_names = ssm_parameters['github_repository_names'] github_repository_names = repository_names.split(',') jira_oauth_dict = { 'access_token': ssm_parameters['jira_access_token'], 'access_token_secret': ssm_parameters['jira_access_token_secret'], 'consumer_key': ssm_parameters['jira_consumer_key'], 'key_cert': ssm_parameters['jira_private_key'] } auth_jira = JIRA(ssm_parameters['jira_url'], oauth=jira_oauth_dict) # Github authentication setup g = Github(ssm_parameters['github_access_token']) # Look for stale branches for all the specified repos total_stale_branches = 0 general_report = '' author_count = defaultdict(int) for repo_name in github_repository_names: logger.debug(f'\nChecking repo: {repo_name}') try: repo = g.get_repo(f"{ssm_parameters['github_account']}/{repo_name}") except GithubException: logger.error(f"Github repository '{ssm_parameters['github_account']}/{repo_name}' not found!") continue repo_report = '' # confirm the name for the main develop branch main_develop_branch = 'develop' try: _ = repo.get_branch('develop') except GithubException: main_develop_branch = 'master' logger.debug('Develop branch not found, using master as the main develop branch.') continue branches = repo.get_branches() for branch in branches: # only check feature and hotfix branches if not branch.name.startswith('feature/') and not branch.name.startswith('hotfix/'): continue # compare the branch against the main develop branch try: comparison = repo.compare(main_develop_branch, branch.name) except GithubException as error: logger.error(f'GithubException: Error while trying to compare {main_develop_branch} and {branch.name}.') logger.error(f'GithubException: {error}.') if comparison.behind_by == 0: # the branch is up to date, nothing to do continue # try to get the jira ticket number from the branch name ticket = None result = re.search(r'feature/(?P<ticket>[a-zA-Z]+-[0-9]+).*', branch.name) if result: ticket = result.groupdict()['ticket'].upper() try: issue = auth_jira.issue(ticket) except jira_exceptions.JIRAError: logger.debug(f"The ticket {ticket} specified in the branch name doesn't exist in Jira.") if issue and issue.fields.status.name not in jira_statuses_for_task_completion: # the issue hasn't been marked as resolved in jira, so the branch may still be needed continue author = branch.commit.author.login if branch.commit.author else 'unknown' author_count[author] += 1 repo_report += f'Branch: {branch.name}\nComparison status: {comparison.status}\nAuthor: {author}\n' if ticket: repo_report += f'Ticket status: "{issue.fields.status.name}\n' repo_report += '\n' total_stale_branches += 1 if repo_report: general_report += f'Repo: {repo_name}, develop branch name: {main_develop_branch}\n{repo_report}' if total_stale_branches: count_by_author = '' for author, count in sorted(author_count.items(), key=operator.itemgetter(1), reverse=True): count_by_author += f'{author}: {count}\n' report_overview = f'Current number of stale branches: {total_stale_branches}\n\n'\ f'Count by author:\n{count_by_author}\n' report_details = f'Details:\n\n{general_report}' _ = slack_request(url=ssm_parameters['slack_webhook_url'], headers={'Content-type': 'application/json', 'Authorization': f"Bearer {ssm_parameters['slack_access_token']}"}, data=json.dumps({'text': report_overview}) ) _ = slack_request(url='https://slack.com/api/files.upload', headers={'Content-type': 'application/x-www-form-urlencoded'}, data={'token': ssm_parameters['slack_access_token'], 'channels': 'GE8NS0FT5', 'content': report_details, 'title': 'Stale branches details'} )
[ "def stale_pr_branches(config, args):\n repo = config.repo\n for pr in repo.pull_requests(state=\"closed\"):\n if pr.head.repo == pr.base.repo and repo.branch(pr.head.ref):\n yield {\n \"html_url\": pr.html_url,\n \"base_branch\": pr.base.ref,\n \"head_branch\": pr.head.ref,\n }", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def get_updated_all_commits(self, old_commits): # pylint: disable=too-many-locals\n\n # get new list of branches\n newest_branches_names = [branch_info['name'] for branch_info in self.get_branches()]\n\n # get old list of branches from old metadata\n old_branches_names = list(old_commits['metadata'].keys())\n\n # get old metadata\n old_commits_metadata = old_commits['metadata']\n result = {}\n\n # delete all items in old metadata where branch name is not exist in new list of branches\n for old_branch_name in old_branches_names:\n if not newest_branches_names.count(old_branch_name):\n old_commits_metadata.pop(old_branch_name)\n\n checked_commits_metadata = old_commits_metadata\n # add to checked_commits_metadata all metadata that is not exist in old_commits_metadata\n for branch in newest_branches_names:\n if not old_branches_names.count(branch):\n checked_commits_metadata[branch] = None\n\n # get dict of old commits with key - hash of commit for further mapping by branch\n repo_commits = {commit['hash']: commit for commit in old_commits['data']}\n\n # get list of new commits from all branches in repository\n for branch_name, newest_commit in checked_commits_metadata.copy().items():\n updated_list_of_branch_commits = \\\n self.get_updated_commits_by_branch(branch_name, newest_commit, only_new=True)\n if updated_list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in updated_list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch_name)\n else:\n commit_in_branch['branches'] = [branch_name]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n\n # add new metadata to method response for further updates by get_updated_all_commits\n if updated_list_of_branch_commits:\n checked_commits_metadata[branch_name] = updated_list_of_branch_commits[0]\n else:\n # if given old commit is the newest - add it to new metadata. P.S unnecessary ???\n checked_commits_metadata[branch_name] = newest_commit[0]\n\n updated_list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n updated_sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'],\n reverse=True)\n\n result['data'] = updated_sorted_commits\n result['metadata'] = checked_commits_metadata\n\n return result", "def get_historical_commits(api, repo):\n data = []\n result = []\n page = 1\n while True:\n tries = 0\n print(\"Commits page {}\".format(page))\n while tries < 10:\n try:\n result = api.operations.list_commits(repo, page).body\n time.sleep(RATE_SLEEP_LIMIT)\n break\n except:\n print(\"Commits tries {}\".format(tries))\n time.sleep(10)\n tries = tries + 1\n\n page = page + 1\n if not result:\n break\n data.extend([(e['commit']['committer']['date'][:10]) for e in result])\n data.sort()\n return data", "def list_commits(self, repos: WildcardFilter, n: int = 10) -> pd.DataFrame:\n repo_names = self._list_repo_names(repos)\n if len(repo_names) == 0:\n raise PachydermClientException(f'No repos matching \"{repos}\" were found.')\n df = pd.concat([self.adapter.list_commits(repo=repo, n=n) for repo in repo_names])\n return df.set_index(['repo', 'commit'])[['size_bytes', 'started', 'finished', 'parent_commit']]", "def get_repos_to_freeze(repo_root):\n schedule = _get_schedule_file(repo_root)\n repos_to_freeze = []\n link_re = re.compile(r'<a\\s+(?:[^\\s]+\\s+)?href=\"(?P<url>[^\"]+)\"')\n for line in schedule:\n start = 0\n while start >= 0:\n match = link_re.search(line, start)\n if match:\n # Found a match - store the url found\n url = urllib.parse.urlparse(match.group('url').strip())\n if url.netloc.endswith('.github.io'):\n url = _github_io_to_github_com(url)\n\n repos_to_freeze.append( (match.group('url'), url.geturl()) )\n # Check the rest of the string for another url\n start = match.end()\n else:\n start = -1 # No match, so exit loop\n logger.debug(\"get_repos_to_freeze found: %s\", repos_to_freeze)\n return repos_to_freeze", "def fetch_branches(self):\n for jrepo in self.json_repos['repos']:\n title = str(jrepo[\"title\"])\n self.branches[title] = str(jrepo['current'])", "async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)", "def list_repos(self, repos: WildcardFilter = '*') -> pd.DataFrame:\n df = self.adapter.list_repos()\n if repos is not None and repos != '*':\n df = df[df.repo.isin(set(_wildcard_filter(df.repo, repos)))]\n df['is_tick'] = df['repo'].str.endswith('_tick')\n df['created'] = _tz_localize(df['created'])\n return df.set_index('repo')[['is_tick', 'branches', 'size_bytes', 'created']].sort_index()", "def __lastrepocommits(self, args, since):\n pushed = self.__has_been_pushed(args.repo, since.to_datetime())\n if pushed is not None:\n self._output.echo(pushed)\n for commit in self.__get_last_commits(args.repo, since.to_datetime()):\n self._output.echo(commit)", "def expireStaleLocks(ctx):\n desiredBuckets = ctx.desiredBuckets\n cutoff = datetime.utcnow() - LOCK_EXPIRE_TIMEDELTA\n for bkt in desiredBuckets:\n lockSel = {\n 'op': LOCK_OP_REPLICATE, 'dstBucket': bkt, 'ts': { '$lt': cutoff },\n }\n blobSel = {\n 'locks': { '$elemMatch': lockSel },\n }\n for blob in ctx.db.blobs.find(blobSel):\n bid = blob['_id']\n ctx.db.blobs.update_one(\n {\n '_id': bid,\n 'locs': { '$elemMatch': {\n 'bucket': bkt, 'status': 'copying'\n } },\n },\n { '$set': { 'locs.$.status': 'missing' } },\n )\n ctx.db.blobs.update_one(\n { '_id': bid },\n {\n '$pull': { 'locks': lockSel },\n '$currentDate': { 'mtime': True },\n },\n )\n msg = 'Expired stale lock {}/dstBucket={},op={}.'\n logger.info(msg.format(bid, bkt, LOCK_OP_REPLICATE))\n nogdStaleLocksExpiredTotal.labels(op=LOCK_OP_REPLICATE).inc()", "def extract_branch_names_hashes_from_report(reports):\n branch_names = []\n\n for command in reports['commands']:\n if command['command'].startswith('git rev-parse refs/heads/') and command['result']['status'] == 0:\n branch = {}\n branch['name'] = command['command'][25:].strip()\n branch['commit_hash'] = command['result']['out'].strip()\n branch_names.append(branch)\n return branch_names", "def get_branches(self, path):\n def _wrapper(callback=None):\n if path:\n root = findparent(self, RepoWatcher)\n text = root.get_activebranch(path)\n os.chdir(path)\n\n script = \"git for-each-ref --format='%(committerdate:rfc2822)=date\"\n script += \"%(authorname)=commiter%(refname:short)=branch\"\n script += \"%(objectname:short)=sha%(subject)=message'\"\n script += \"--sort=refname refs/heads/\"\n out = run_syscall(script).strip()\n\n remotes = run_syscall(\"git remote\").strip().split('\\n')\n pushed_script = \"git for-each-ref --format='%(refname:short);\"\n pushed_script += \"%(objectname:short)' --sort=refname refs/remotes/\"\n pushed_branches = []\n for remote in remotes:\n data = run_syscall(pushed_script + remote).strip()\n data = map(lambda x: x.strip().split(remote + '/')[1].split(';'),\n filter(lambda x: x.strip(), data.split('\\n')))\n pushed_branches.extend(data)\n\n pushed_branches = dict(pushed_branches)\n\n self.branches = []\n for l in out.split(\"\\n\"):\n tmp = dict(date=\"\", name=\"\", sha=\"\", commiter=\"\",\n subject=\"\", published=False, merge=self.readymerge)\n c, l = l.strip().rsplit(\"=date\", 1)\n tmp['date'] = \" \".join(\n c.split(\",\")[1].strip().split(\" \")[:3])\n tmp['commiter'], l = l.strip().rsplit(\"=commiter\", 1)\n tmp['name'], l = l.strip().rsplit(\"=branch\", 1)\n tmp['sha'], l = l.strip().rsplit(\"=sha\", 1)\n tmp['subject'], l = l.strip().rsplit(\"=message\", 1)\n\n tmp['published'] = tmp['name'] in pushed_branches\n tmp['republish'] = False\n if tmp['published']:\n sha = pushed_branches[tmp['name']]\n if sha != tmp['sha']:\n tmp['republish'] = True\n if text and text == tmp['name']:\n self.name = tmp['name']\n self.subject = tmp['subject']\n self.sha = tmp['sha']\n self.commiter = tmp['commiter']\n self.date = tmp['date']\n self.published = tmp['published']\n self.republish = tmp['republish']\n else:\n self.branches.append(tmp)\n os.chdir(settings.PROJECT_PATH)\n else:\n self.branches = []\n self.name = \"\"\n self.subject = \"\"\n self.sha = \"\"\n self.commiter = \"\"\n self.date = \"\"\n\n if callback:\n callback()\n\n return _wrapper", "def test_sort_bzr_latest(self):\n identifiers = [\"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_BZR\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\", \"master\"],\n [v.slug for v in sort_version_aware(versions)],\n )", "def step_create_unique_list_commits(self, project_info):\n del project_info\n hash_dict = {}\n commits_to_check = []\n for branch in self.branches_data:\n for commit in branch['commits']:\n if commit['hash'] == branch['commit_hash']:\n commits_to_check.append(commit)\n continue\n\n if commit['hash'] == branch['merge_target']['fork_point']:\n commits_to_check.append(commit)\n continue\n\n if commit['hash'] in hash_dict:\n continue\n\n if commit['hash'] in self.known_commit_hashes:\n continue\n\n self.unique_commmits.append(commit)\n hash_dict[commit['hash']] = commit['hash']\n\n # All commits directly pointed by a Branch will be allways feeded\n for commit in commits_to_check:\n if commit['hash'] in hash_dict:\n continue\n\n self.unique_commmits.append(commit)\n hash_dict[commit['hash']] = commit['hash']\n\n return True", "def clean_branches(self):\n git = which(\"git\")\n for branch in git.collect(\"branch --merged | grep -v '*' | grep -v master\"):\n git(\"branch -d %s\" % branch)", "def requeue_all_repositories():\n for repository in Repository.objects.filter(first_fetch_done=True):\n # check if we can have a token for this repository\n if repository.private and not Token.get_one_for_repository(repository.pk, permission='pull', available=False):\n maintenance_logger.info('(no token for private repository %s)', repository.full_name)\n continue\n # we can have a token so we add the jobs\n CheckRepositoryEvents.add_job(repository.id)\n CheckRepositoryHook.add_job(repository.id, delayed_for=30)\n FetchForUpdate.add_job(repository.id)", "def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])", "def reap_broken_repos():\n token = os.environ.get('GITHUB_TOKEN', None)\n if token is None:\n print('MISSING GITHUB_TOKEN')\n return\n\n # Do graphql nonsense\n query = '''\n query{\n organization(login:\"os3224\"){\n repositories(first:100,orderBy:{field:CREATED_AT,direction:DESC}){\n nodes{\n ref(qualifiedName:\"master\") {\n target {\n ... on Commit {\n history(first: 20) {\n edges { node { oid } }\n }\n }\n }\n }\n name \n url\n }\n }\n }\n }\n '''\n url = 'https://api.github.com/graphql'\n json = {'query': query}\n headers = {'Authorization': 'token %s' % token}\n\n # Make the graph request over http\n try:\n r = requests.post(url=url, json=json, headers=headers)\n data = r.json()['data']\n organization = data['organization']\n repositories = organization['repositories']['nodes']\n except Exception as e:\n print(traceback.format_exc())\n print(f'Request to github api Failed {e}')\n return\n\n # Running map of unique_code -> assignment objects\n assignments = dict()\n\n # Parse out repo name and url from graphql response\n repos = map(lambda node: (node['name'], node['url'], node['ref']), repositories)\n for repo_name, repo_url, ref in repos:\n assignment = None\n\n # Try to get the assignment object from running map\n for code in repo_name.split('-'):\n assignment = assignments.get(code, None)\n\n # If not in the map, then try to get from the database\n if assignment is None:\n assignment = Assignment.query.filter(\n Assignment.unique_code.in_(repo_name.split('-'))\n ).first()\n\n if assignment is not None:\n assignments[assignment.unique_code] = assignment\n\n # If not in database or map, then eject\n if assignment is None:\n print(f'Could not find assignment for {repo_name}')\n continue\n\n # Guess github username, then create the repo if it doesn't yet exist\n user, github_username = guess_github_username(assignment, repo_name)\n repo = check_repo(assignment, repo_url, github_username, user)\n\n if user is None:\n continue\n\n # Check for broken submissions\n submissions = []\n for submission in Submission.query.filter(Submission.assignment_repo_id == repo.id).all():\n if submission is None:\n continue\n if submission.owner_id != user.id:\n print(f'found broken submission {submission.id}')\n submission.owner_id = repo.owner_id\n submissions.append(submission.id)\n db.session.commit()\n for sid in submissions:\n enqueue_autograde_pipeline(sid)\n\n # Check for missing submissions\n for commit in map(lambda x: x['node']['oid'], ref['target']['history']['edges']):\n submission = Submission.query.filter(\n Submission.commit == commit\n ).first()\n if submission is None:\n print(f'found missing submission {github_username} {commit}')\n submission = Submission(\n commit=commit,\n owner=user,\n assignment=assignment,\n repo=repo,\n state=\"Waiting for resources...\",\n )\n db.session.add(submission)\n db.session.commit()\n init_submission(submission)\n enqueue_autograde_pipeline(submission.id)\n\n r = AssignmentRepo.query.filter(AssignmentRepo.repo_url == repo_url).first()\n if r is not None:\n if r.owner_id != user.id:\n print(f'fixing broken repo owner {r.id}')\n r.owner_id = user.id\n submissions = []\n for submission in Submission.query.filter(\n Submission.assignment_repo_id == r.id\n ).all():\n submission.owner_id = user.id\n submissions.append(submission.id)\n\n db.session.commit()\n for sid in submissions:\n enqueue_autograde_pipeline(sid)\n\n if repo:\n print(f'checked repo: {repo_name} {github_username} {user} {repo.id}')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset this node's (and its children's) state to ready
def reset(self): self.state = EvaluationState.ready for child in self.children: if hasattr(child, "reset"): child.reset()
[ "def reset(self):\n # print \"Node \" + self.name_ + \" resetting.\"\n self.reset_self()\n for C in self.children_:\n C.reset()", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def reinitialize(self):\n # TODO: seems like a strange name...\n self.initialize()\n for c in self.get_children():\n c.reinitialize()", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def reset(self):\n self._trees = []", "def clear(self):\n self.nodes = {}\n self.root = None", "def reset(self):\n\n self.color = COLORS['NODE']\n self.down_weight = 1\n self.up_weight = 1\n self.right_weight = 1\n self.left_weight = 1", "def reset_tree(self):\n for i in self.tree.get_children():\n self.tree.delete(i)", "def _reallocate(self):\n self._debug_tree(\"Re-alloc\")\n for node in self._iter_children():\n node._reallocate()", "def update(self, state: State):\n\n for child in self.root.children:\n if child.state == state:\n self.root = child\n break\n else:\n self.root = Monte_Carlo_Tree.Node(state)", "def reset(self):\n self.current_branch = self.root", "def reset_mcts(self, root_state: np.ndarray) -> None:\n self.mcts.root_node = None\n self.mcts.root_state = root_state", "def _initialize_children_if_not_initialized(self) -> None:\r\n if hasattr(self, '_children'):\r\n return\r\n self._children = Array([])", "def clear(self):\n self.children = []\n return self", "def reset(self):\r\n self.states = []", "def clear(self):\n self.root = None\n for leaf in self.leaves:\n leaf.p, leaf.sib, leaf.side = (None, ) * 3", "def reset_states(self) -> None:\n self._metric.reset_states()\n # for each child log\n for child in self.children_real_fake:\n child[0].reset_states()\n child[1].reset_states()", "def reset_tree(self):\r\n self.treeWidget.clear()\r\n self.treeWidget.hide()", "def clear(self):\n self.root = CobwebPlusNode()\n self.root.tree = self\n #self.attr_scales = {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates the node's (and its children's) state.
def evaluate(self, blackboard): success = EvaluationState.success state = success for child in self.children: state = child.__call__(blackboard) if state != success: break return state
[ "def evaluate(self):\n\n def backtrack():\n # Move Tree backward\n self.remove_from_trackers(self.tree.current_node)\n # remove the current node from its parent's valid options\n self.tree.current_node.parent_Node.remove_current_child()\n self.tree.backtrack_node()\n\n def forward():\n # Move Tree forward\n self.tree.forward_node()\n self.add_to_trackers(self.tree.current_node)\n\n if not self.tree.current_node.state.expanded:\n # Check constraints for Tree's current_Node\n result = self.constraint_check()\n\n # According to constraint results, either backtrack, or forward Tree's current_Node\n if result:\n # set this node to expanded before moving to the next\n self.tree.current_node.state.expanded = True\n # Expand current node by adding on children nodes\n self.check_adj()\n if len(self.tree.current_node.children) > 0:\n forward()\n else:\n # no options after check_adj, so we need to backtrack unless we are finished\n if not self.finished:\n backtrack()\n else:\n backtrack()\n else:\n if len(self.tree.current_node.children) > 0:\n forward()\n else:\n backtrack()", "def evaluate(self, state):\n abstract", "def evaluate(self, strategy, tree, state):\n if tree:\n state[0] = tree\n return True\n return False", "def test_get_node_state(self):\n pass", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def evaluate(self, blackboard):\n success = EvaluationState.success\n\n for child in self.children:\n state = child.__call__(blackboard)\n\n if state == success:\n return success\n\n return EvaluationState.failure", "def leafEvaluation(self, state):\n\n \"\"\"\n Use random generated values for now\n \"\"\"\n z = np.random.randint(2)\n v = random.uniform(0, 1) \n return (1-LAMBDA) * v + LAMBDA * z", "def evaluate(self, state):\n\n fitness = np.sum(state)\n self.num_evals += 1\n #print(self.num_evals)\n return fitness", "def sample_state(self):\n state_node = self.parent.perform(self, self.action)\n\n if state_node not in self.children:\n self.children[state_node] = state_node\n\n return self.children[state_node]", "def eval(self, variable_assignments):\n\n try:\n # Calculate values of children nodes\n children_results = [child.eval(variable_assignments) for child in self.children]\n\n # Apply function to children_results. * unpacks the list of results into\n # arguments to self.function.\n return self.function(*children_results)\n except ValueError as e:\n print(\"----------\\nWeird value error:\", e)\n print(\"Node causing it:\", self)\n raise", "def eval_heuristic(self,state):\n return 0", "def update(self, state: State):\n\n for child in self.root.children:\n if child.state == state:\n self.root = child\n break\n else:\n self.root = Monte_Carlo_Tree.Node(state)", "def solveOneStep(self):\n ### Student code goes here\n cs = self.currentState\n self.visited[cs] = True\n\n # If already in correct state then return True\n if self.currentState.state == self.victoryCondition:\n return True\n\n moves = self.gm.getMovables()\n if not cs.children:\n if moves: # Check if there is indeed moves\n for move in moves:\n self.gm.makeMove(move)\n ns = GameState(self.gm.getGameState(), cs.depth + 1, move)\n if ns not in self.visited:\n ns.parent = cs\n cs.children.append(ns)\n self.visited[ns] = False\n self.gm.reverseMove(move)\n\n # Make our Queue\n queue = [self.currentState]\n\n while self.currentState in self.visited:\n self.currentState = queue.pop(0)\n # Queue new\n for children in self.currentState.children:\n queue.append(children)\n if children not in self.visited:\n queue.append(children)\n self.visited[children] = True\n\n next_state = self.currentState\n temp_state = next_state\n\n # now we need to get to the current state from the node\n path = []\n while temp_state.parent:\n path.insert(0, temp_state.requiredMovable) # add to the path\n temp_state = temp_state.parent\n\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n # now we make the moves\n for move in path:\n self.gm.makeMove(move)\n\n self.currentState = next_state\n if self.currentState == self.victoryCondition:\n return True\n else:\n return False", "def test(self, node):\n\n return True", "def eval_logic(self, checkDict):\n result = True\n #gets individual evaluations from children\n passList = []\n for child in self.children:\n myVal = child.eval_comparison(checkDict)\n passList.append(child.eval_comparison(checkDict))\n\n #if only one child returns the only boolean available\n if(len(passList) == 1):\n result = passList[0]\n\n #TODO: Combine following cases possibly\n #print(passList)\n #gets resutl if only 2 simple logics\n elif(len(passList) == 2 and len(self.operators) == 1):\n\n result = self.operators[0](passList[0], passList[1])\n else:\n #combines all children logic using the operators\n firstCheck = True\n opIndex = 0\n for i in range(0,len(passList)):\n if(firstCheck):\n firstCheck = False\n result = self.operators[opIndex](passList[0], passList[1])\n i+=1\n else:\n result = self.operators[opIndex](result,passList[i])\n opIndex += 1\n \"\"\"\n print('----------------------')\n print(result)\n \"\"\"\n return result", "def test_get_node_state_readonly(self):\n pass", "def eval_action(self, state):\n pass", "def score_tree(self, fn: TreeScoring = def_tree_fn()) -> bool:\n\t\t#if self.root.label == \"R\" or self.is_leaf():\n\t\t\t#print(\"scoring the %s tree (%s) : \" % (self.root.id, self.root.label))\n\t\t\t#print(\"----------------\")\n\t\t\t#print(\"tree \" + self.root.id + \" content : \" + str(len(self.enumerate_nodes())) + \" elements\")\n\t\t\t#for e in self.enumerate_nodes():\n\t\t\t#\tif self.is_leaf():\n\t\t\t#\t\tprint(self)\n\t\t\t#\telse :\n\t\t\t#\t\tprint(hashlib.md5(str(e).encode('utf-8')).hexdigest())\n\t\t\n\t\tstatuses = []\n\t\tif self.is_leaf():\n\t\t\tself.meta['score'] = fn(self)\n\t\t\ttrace(\"score[ \" + str(self) + \" ] = \" + str(fn(self)), 3)\n\t\t\treturn True\n\t\telse :\n\t\t\tfor (child, modality) in self.children:\n\t\t\t\tstatuses.append(child.score_tree(fn))\n\t\t\tself.meta['score'] = fn(self)\n\t\t\ttrace(\"score[ \" + str(self) + \" ] = \" + str(fn(self)), 3)\n\t\t\treturn not False in statuses", "def check() -> dict:\n resp = dict()\n for node in nodes.list_of_nodes:\n state = node.check_state(node.path)\n resp[node.name] = state\n return resp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates the node's (and its children's) state. Returns success if any node succeeds, else failure.
def evaluate(self, blackboard): success = EvaluationState.success for child in self.children: state = child.__call__(blackboard) if state == success: return success return EvaluationState.failure
[ "def evaluate(self, blackboard):\n success = EvaluationState.success\n\n state = success\n for child in self.children:\n state = child.__call__(blackboard)\n\n if state != success:\n break\n\n return state", "def evaluate(self):\n\n def backtrack():\n # Move Tree backward\n self.remove_from_trackers(self.tree.current_node)\n # remove the current node from its parent's valid options\n self.tree.current_node.parent_Node.remove_current_child()\n self.tree.backtrack_node()\n\n def forward():\n # Move Tree forward\n self.tree.forward_node()\n self.add_to_trackers(self.tree.current_node)\n\n if not self.tree.current_node.state.expanded:\n # Check constraints for Tree's current_Node\n result = self.constraint_check()\n\n # According to constraint results, either backtrack, or forward Tree's current_Node\n if result:\n # set this node to expanded before moving to the next\n self.tree.current_node.state.expanded = True\n # Expand current node by adding on children nodes\n self.check_adj()\n if len(self.tree.current_node.children) > 0:\n forward()\n else:\n # no options after check_adj, so we need to backtrack unless we are finished\n if not self.finished:\n backtrack()\n else:\n backtrack()\n else:\n if len(self.tree.current_node.children) > 0:\n forward()\n else:\n backtrack()", "def evaluate(self, strategy, tree, state):\n if tree:\n state[0] = tree\n return True\n return False", "def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'", "def check() -> dict:\n resp = dict()\n for node in nodes.list_of_nodes:\n state = node.check_state(node.path)\n resp[node.name] = state\n return resp", "def score_tree(self, fn: TreeScoring = def_tree_fn()) -> bool:\n\t\t#if self.root.label == \"R\" or self.is_leaf():\n\t\t\t#print(\"scoring the %s tree (%s) : \" % (self.root.id, self.root.label))\n\t\t\t#print(\"----------------\")\n\t\t\t#print(\"tree \" + self.root.id + \" content : \" + str(len(self.enumerate_nodes())) + \" elements\")\n\t\t\t#for e in self.enumerate_nodes():\n\t\t\t#\tif self.is_leaf():\n\t\t\t#\t\tprint(self)\n\t\t\t#\telse :\n\t\t\t#\t\tprint(hashlib.md5(str(e).encode('utf-8')).hexdigest())\n\t\t\n\t\tstatuses = []\n\t\tif self.is_leaf():\n\t\t\tself.meta['score'] = fn(self)\n\t\t\ttrace(\"score[ \" + str(self) + \" ] = \" + str(fn(self)), 3)\n\t\t\treturn True\n\t\telse :\n\t\t\tfor (child, modality) in self.children:\n\t\t\t\tstatuses.append(child.score_tree(fn))\n\t\t\tself.meta['score'] = fn(self)\n\t\t\ttrace(\"score[ \" + str(self) + \" ] = \" + str(fn(self)), 3)\n\t\t\treturn not False in statuses", "def evaluate(self, state):\n abstract", "def check(self, tree):\n pass", "def test(self, node):\n\n return True", "def test_get_node_state(self):\n pass", "def check(self, tree):\r\n modified = True\r\n while modified:\r\n modified = False\r\n for node in tree.allchildren():\r\n #if node.__class__ == Article:\r\n # log.info(\"checking article:\", node.caption.encode('utf-8'))\r\n for r,cb in self.rules:\r\n passed, errnode = r.test(node)\r\n if not passed and cb:\r\n if cb(r, errnode or node):\r\n modified = True\r\n break\r\n if modified:\r\n break", "def verifyNode():\n return verifyReturnNode() and verifyBreakContinueNode() and verifyDefault()", "def eval_tree(tree: GPTree, dataset: Iterable) -> list:\n results = []\n for data in zip(*dataset):\n try:\n output = tree.compute_tree(data[0])\n results.append(\n 0 if output == data[1] else 1\n ) # right or wrong, but no error.\n except Exception:\n results.append(2) # Fails to run.\n\n return results", "def all_nodes_seen( root ):\n if root._Seen:\n a, b = True, True\n if root._Left: a = all_nodes_seen( root._Left )\n if root._Right: b = all_nodes_seen( root._Right )\n return a and b\n else: \n return False", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def eval_logic(self, checkDict):\n result = True\n #gets individual evaluations from children\n passList = []\n for child in self.children:\n myVal = child.eval_comparison(checkDict)\n passList.append(child.eval_comparison(checkDict))\n\n #if only one child returns the only boolean available\n if(len(passList) == 1):\n result = passList[0]\n\n #TODO: Combine following cases possibly\n #print(passList)\n #gets resutl if only 2 simple logics\n elif(len(passList) == 2 and len(self.operators) == 1):\n\n result = self.operators[0](passList[0], passList[1])\n else:\n #combines all children logic using the operators\n firstCheck = True\n opIndex = 0\n for i in range(0,len(passList)):\n if(firstCheck):\n firstCheck = False\n result = self.operators[opIndex](passList[0], passList[1])\n i+=1\n else:\n result = self.operators[opIndex](result,passList[i])\n opIndex += 1\n \"\"\"\n print('----------------------')\n print(result)\n \"\"\"\n return result", "def visit(self, node):\n return self._evaluate_binary_expression(node, self.BOOLEAN_OPERATORS)", "def isCompleteTree(self, root):\n _, ans, _ = self.dfs(root, 0)\n return ans", "def test_bool_descendants(self):\n #self.t3 = DndParser('(((a,b,c),(d)),(e,f))', UniFracTreeNode)\n id_, child = index_tree(self.t3)\n a = zeros((11,3)) + 99 #fill with junk\n bindings = bind_to_array(child, a)\n #load in leaf envs\n a[0] = a[1] = a[2] = a[7] = [0,1,0]\n a[3] = [1,0,0]\n a[6] = [0,0,1]\n bool_descendants(bindings)\n self.assertEqual(a, \\\n array([[0,1,0],[0,1,0],[0,1,0],[1,0,0],[0,1,0],[1,0,0],\\\n [0,0,1],[0,1,0],[1,1,0],[0,1,1],[1,1,1]])\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
imports 'catalog', and creates a pandas.DataFrame containing the columns specified in 'params'. 'catalog' is expected to be in the .csv format.
def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','): print "Loading %s and creating DataFrame.." % catalog df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows) print "..Done\n----------" return df_imported
[ "def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return", "def get_data() -> pd.DataFrame:\n try:\n return pd.read_csv('library.csv')\n except FileNotFoundError as ex:\n return pd.DataFrame(columns=['name', 'author', 'genre', 'copies_total', 'copies_avail', 'reads'])", "def newDataFrame(title):\r\n return pd.read_csv(title)", "def _load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path)\n nrow, ncol = self.catalog.shape\n logger.info(\"Loaded SNRs catalog data from: {0}\".format(\n self.catalog_path))\n logger.info(\"SNRs catalog data: {0} objects, {1} columns\".format(\n nrow, ncol))\n # Set the units for columns\n self.units = {\n \"glon\": au.deg,\n \"glat\": au.deg,\n \"size\": au.arcmin,\n \"flux\": au.Jy,\n }\n # The flux densities are given at 1 GHz\n self.catalog_flux_freq = 1.0 * au.GHz", "def load(self) -> pd.DataFrame:\n try:\n return pd.read_csv(self._LOCAL_FILE_PATH,\n index_col=0,\n encoding='utf-8')\n except Exception as e:\n self.logger.error(msg=f'Something went wrong when loading data from csv file: {self._LOCAL_FILE_PATH}',\n tags=[self._SERVICE_TAG, 'load', 'csv', 'error'])\n raise Exception(f'Something went wrong when proceeding'\n f' to productions information loading: {e}')", "def load_catalog(path=path_to_cat):\n catalog = pd.read_csv(path,delim_whitespace=True,usecols=(3,6), names = (\"qso_name\",\"redshift\"))\n return catalog[\"qso_name\"],catalog[\"redshift\"]", "def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n columns = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator', 'year_quarter'\n ]\n dtypes = {\n 'loan_id': np.int64,\n 'orig_channel': CategoricalDtype(['B', 'C', 'R']),\n 'seller_name': str,\n 'orig_interest_rate': np.float64,\n 'orig_upb': np.int64,\n 'orig_loan_term': np.int64,\n 'orig_date': str,\n 'first_pay_date': str,\n 'orig_ltv': np.float64,\n 'orig_cltv': np.float64,\n 'num_borrowers': np.float64,\n 'dti': np.float64,\n 'borrower_credit_score': np.float64,\n 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),\n 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),\n 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),\n 'num_units': np.int64,\n 'occupancy_status': CategoricalDtype(['I', 'P', 'S']),\n 'property_state': CategoricalDtype(\n ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',\n 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',\n 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',\n 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',\n 'VT', 'WA', 'WI', 'WV', 'WY']),\n 'zip': np.int64,\n 'mortgage_insurance_percent': np.float64,\n 'product_type': CategoricalDtype(['FRM']),\n 'coborrow_credit_score': np.float64,\n 'mortgage_insurance_type': np.float64,\n 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),\n 'year_quarter': np.int64\n }\n\n a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)\n return a", "def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n cols = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator'\n ]\n\n dtypes = {\n \"loan_id\": np.int64,\n \"monthly_reporting_period\": str,\n \"servicer\": str,\n \"interest_rate\": np.float64,\n \"current_actual_upb\": np.float64,\n \"loan_age\": np.float64,\n \"remaining_months_to_legal_maturity\": np.float64,\n \"adj_remaining_months_to_maturity\": np.float64,\n \"maturity_date\": str,\n \"msa\": np.float64,\n \"current_loan_delinquency_status\": np.int32,\n \"mod_flag\": CategoricalDtype(['N', 'Y']),\n \"zero_balance_code\": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),\n \"zero_balance_effective_date\": str,\n \"last_paid_installment_date\": str,\n \"foreclosed_after\": str,\n \"disposition_date\": str,\n \"foreclosure_costs\": np.float64,\n \"prop_preservation_and_repair_costs\": np.float64,\n \"asset_recovery_costs\": np.float64,\n \"misc_holding_expenses\": np.float64,\n \"holding_taxes\": np.float64,\n \"net_sale_proceeds\": np.float64,\n \"credit_enhancement_proceeds\": np.float64,\n \"repurchase_make_whole_proceeds\": np.float64,\n \"other_foreclosure_proceeds\": np.float64,\n \"non_interest_bearing_upb\": np.float64,\n \"principal_forgiveness_upb\": np.float64,\n \"repurchase_make_whole_proceeds_flag\": CategoricalDtype(['N', 'Y']),\n \"foreclosure_principal_write_off_amount\": np.float64,\n \"servicing_activity_indicator\": CategoricalDtype(['N', 'Y']),\n }\n print(acquisition_path)\n\n #return pd.read_csv(acquisition_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])\n return pd.read_csv('acq.csv', names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])", "def read_catalog(self, verbose=True):\n if verbose:\n if hasattr(self.param['CATALOG_FILE'], 'colnames'):\n print('CATALOG_FILE is a table')\n else:\n print('Read CATALOG_FILE:', self.param['CATALOG_FILE'])\n \n if hasattr(self.param['CATALOG_FILE'], 'colnames'):\n self.cat = self.param['CATALOG_FILE']\n elif 'fits' in self.param['CATALOG_FILE'].lower():\n self.cat = Table.read(self.param['CATALOG_FILE'], format='fits')\n elif self.param['CATALOG_FILE'].lower().endswith('.csv'):\n self.cat = Table.read(self.param['CATALOG_FILE'], format='csv') \n else:\n self.cat = Table.read(self.param['CATALOG_FILE'], \n format=self.param['CATALOG_FORMAT'])\n \n if verbose:\n print(f' >>> NOBJ = {len(self.cat)}') \n \n # self.NOBJ = len(self.cat)\n self.prior_mag_cat = np.zeros(self.NOBJ)-1\n \n #np.save(self.param['FILTERS_RES']+'.npy', [all_filters])\n\n self.filters = []\n self.flux_columns = []\n self.err_columns = []\n self.f_numbers = []\n \n # Some specific columns\n self.fixed_cols = {'id':'id', \n 'z_spec':'z_spec', \n 'ra':'ra',\n 'dec':'dec', \n 'x':'x_image', \n 'y':'y_image'}\n \n required_cols = ['id']\n warn_cols = ['z_spec','ra','dec']\n \n for k in ['id', 'z_spec', 'ra', 'dec', 'x', 'y']:\n if k in self.cat.colnames:\n self.fixed_cols[k] = k\n else:\n new = None\n for ke in self.translate.trans:\n if self.translate.trans[ke] == k:\n new = ke\n \n if (new is None) | (new not in self.cat.colnames):\n col_options = {'id':['ID','OBJID','NUMBER'], \n 'ra':['RA', 'X_WORLD', 'ALPHA_J2000','ALPHA'],\n 'dec':['DEC', 'Y_WORLD', 'DELTA_J2000', \n 'DELTA'],\n 'z_spec':['Z_SPEC','ZSPEC','ZSP'],\n 'x':['X','X_IMAGE'], \n 'y':['Y','Y_IMAGE']}\n \n if k in col_options:\n for ke in col_options[k]:\n for str_method in [str.upper, str.lower, \n str.title]:\n if str_method(ke) in self.cat.colnames:\n new = str_method(ke)\n break\n \n if (new is None) | (new not in self.cat.colnames): \n if (k in required_cols):\n msg = (f'Catalog or translate_file must have a {k} ' +\n f'column')\n raise ValueError(msg)\n \n elif k in warn_cols:\n msg = (f'No {k} column found in catalog. Some ' \n 'functionality might not be available.')\n warnings.warn(msg, AstropyUserWarning)\n \n self.fixed_cols[k] = new\n \n for k in self.cat.colnames:\n if k.startswith('F'):\n try:\n f_number = int(k[1:])\n except:\n continue\n \n ke = k.replace('F','E')\n if ke not in self.cat.colnames:\n continue\n \n self.filters.append(self.RES[f_number])\n self.flux_columns.append(k)\n self.err_columns.append(ke)\n self.f_numbers.append(f_number)\n msg = '{0} {1} ({2:3d}): {3}'\n print(msg.format(k, ke, f_number, \n self.filters[-1].name.split()[0]))\n \n # Apply translation \n for k in self.translate.trans:\n fcol = self.translate.trans[k]\n if fcol.startswith('F') & ('FTOT' not in fcol):\n try:\n f_number = int(fcol[1:])\n except:\n # Has character at the end\n f_number = int(fcol[1:-1])\n \n for ke in self.translate.trans:\n #if self.translate.trans[ke] == 'E{0}'.format(f_number):\n if self.translate.trans[ke] == fcol.replace('F','E'):\n break\n \n if (k in self.cat.colnames) & (ke in self.cat.colnames):\n self.filters.append(self.RES[f_number])\n self.flux_columns.append(k)\n self.err_columns.append(ke)\n self.f_numbers.append(f_number)\n msg = '{0} {1} ({2:3d}): {3}'\n print(msg.format(k, ke, f_number, \n self.filters[-1].name.split()[0]))\n \n self.f_numbers = np.array(self.f_numbers)\n if len(self.f_numbers) == 0:\n msg = ('No valid filters found in {0}! Check that all flux ' +\n 'and uncertainty columns are specified / translated ' + \n 'correctly.')\n \n raise ValueError(msg.format(self.param['CATALOG_FILE']))\n \n # Initialize flux arrays\n self.fnu = np.zeros((self.NOBJ, self.NFILT), dtype=self.ARRAY_DTYPE)\n efnu = np.zeros((self.NOBJ, self.NFILT), dtype=self.ARRAY_DTYPE)\n self.spatial_offset = None\n \n self.fmodel = self.fnu*0.\n self.efmodel = self.fnu*0.\n \n # MW extinction correction: dered = fnu/self.ext_corr\n ext_mag = [f.extinction_correction(self.MW_EBV) \n for f in self.filters]\n self.ext_corr = 10**(0.4*np.array(ext_mag))\n\n # Does catalog already have extinction correction applied?\n # If so, then set an array to put fluxes back in reddened space\n if self.param.params['CAT_HAS_EXTCORR'] in utils.TRUE_VALUES:\n self.ext_redden = self.ext_corr\n else:\n self.ext_redden = np.ones(self.NFILT)\n \n #print(self.flux_columns, self.fnu.shape)\n \n for i in range(self.NFILT):\n self.fnu[:,i] = self.cat[self.flux_columns[i]]*1\n efnu[:,i] = self.cat[self.err_columns[i]]*1\n if self.err_columns[i] in self.translate.error:\n #print('x', efnu[:,i].shape, self.translate.error[self.err_columns[i]], self.err_columns[i])\n efnu[:,i] *= self.translate.error[self.err_columns[i]]\n \n if self.param['MAGNITUDES'] in utils.TRUE_VALUES:\n warnings.warn(f'Catalog photometry is given in (AB) magnitudes.' + \n 'It is **strongly** recommended to measure ' + \n 'photometry in linear flux density units!',\n AstropyUserWarning)\n \n neg_values = (self.fnu < 0) | (efnu < 0)\n \n with warnings.catch_warnings():\n warnings.simplefilter('ignore', AstropyWarning)\n \n fluxes = 10**(-0.4*(self.fnu - self.param['PRIOR_ABZP']))\n unc = np.log(10)/2.5 * efnu * fluxes\n \n fluxes[neg_values] = self.fnu[neg_values]\n unc[neg_values] = efnu[neg_values]\n \n self.fnu = fluxes\n efnu = unc\n \n self.efnu_orig = efnu*1.\n \n self.set_sys_err(positive=True)\n \n self.set_ok_data()\n\n self.lc_zmax = self.zgrid.max()\n\n self.clip_wavelength = None", "def get_data() -> pd.DataFrame:\n print(\"Retrieving case study data from: {}\".format(CASE_STUDY_CSV_DIR))\n return pd.read_csv(CASE_STUDY_CSV_DIR, delimiter=',')", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def _create_dataframe(*fields):\n # Create DataFrame from concatenating price columns\n dataframe = pd.concat(fields, axis=1)\n\n return dataframe", "def retrieve_meta_data():\r\n global crm_colnames\r\n\r\n df = pd.read_csv(settings['data_path']+settings['source_data'],\r\n compression='gzip', header=0, sep=',', quotechar='\"',\r\n nrows=10)\r\n crm_colnames = [\r\n \"GivenName\",\"Surname\",\"Gender\",\"NameSet\",\"Title\",\"StreetAddress\",\"City\",\"State\",\"StateFull\",\r\n \"ZipCode\",\"Country\",\"EmailAddress\",\"Username\",\"Password\",\"TelephoneNumber\",\"TelephoneCountryCode\",\r\n \"Birthday\",\"Age\",\"NationalID\",\"Color\",\"Occupation\",\"Company\",\"BloodType\",\"Kilograms\",\"Centimeters\"\r\n ]\r\n return df", "def createDataFrame(path):\n df = pd.read_csv(path)\n df = df[['planet_name', 'planet_mass', 'orbital_radius', 'host_name', \n 'spectral_type', 'stellar_age', 'stellar_radius', \n 'stellar_mass', 'stellar_temperature', 'stellar_luminosity', \n 'optical_magnitude', 'near_ir_magnitude', \n 'stellar_surface_gravity', 'stellar_metallicity']]\n \n df = df.dropna(subset=['spectral_type'])\n df.spectral_type = df.spectral_type.str[0:1]\n df.spectral_type = df.spectral_type.str.strip()\n classification = np.array(['O','B','A','F','G','K','M'])\n df = df[df.spectral_type.isin(classification)]\n df.insert(4, \"amount_of_planets\", 0)\n df.amount_of_planets = df.groupby('host_name')['host_name'].transform('count')\n \n df.planet_mass = np.log10(df.planet_mass)\n df.orbital_radius = np.log10(df.orbital_radius)\n \n df = df.sort_values(by=['host_name'])\n df = df.reset_index(drop=True) \n \n return df", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def load_data(columns=['precTot', 'wetdayTot', 'drizzle']):\n df = pd.concat([load_one(fp, columns=columns) for fp in filepath.values()],\n axis=1, keys=filepath.keys())\n return df", "def prepare_data(base: str) -> pd.DataFrame:\n # read zip\n try:\n with zipfile.ZipFile(base + \"/hotels.zip\") as myzip:\n files = [\n item.filename\n for item in myzip.infolist()\n if item.filename.endswith(\".csv\")\n ]\n if not files:\n quit()\n df = pd.concat([pd.read_csv(myzip.open(file)) for file in files])\n except FileNotFoundError:\n quit()\n\n # preprocess dataset\n try:\n df[\"Latitude\"] = pd.to_numeric(df[\"Latitude\"], errors=\"coerce\")\n df[\"Longitude\"] = pd.to_numeric(df[\"Longitude\"], errors=\"coerce\")\n df = df[(abs(df[\"Latitude\"]) < 90) & (abs(df[\"Longitude\"]) < 180)]\n\n df[\"Address\"] = (\n df[\"Latitude\"].astype(\"str\") + \", \" + df[\"Longitude\"].astype(\"str\")\n )\n df.reset_index(inplace=True)\n df.drop([\"Id\", \"index\"], axis=1, inplace=True)\n except KeyError:\n quit()\n return df # [300:310] # SHORTENED!!!!", "def _load_metadata_df(meta_csv_file: Union[str, Path]) -> pd.DataFrame:\n\n metadata_df = pd.read_csv(\n meta_csv_file,\n index_col=0,\n header=[0, 1],\n low_memory=False,\n engine=\"c\")\n\n # parse date cols as datetime\n for col in [\"timerange_from\", \"timerange_to\"]:\n metadata_df[col, \"val\"] = pd.to_datetime(metadata_df[col, \"val\"])\n\n lvars = []\n for c in metadata_df.columns:\n if c[0] not in lvars:\n lvars.append(c[0])\n\n # we assume triples for all vars except these, so they must be at the end\n assert lvars[-2:] == [\n \"file_path\",\n \"file_type\",\n ], \"file_type and file_path must be at the end.\"\n\n metadata_df.index.name = \"idx\"\n\n return metadata_df", "def read_csv():\n df = pandas.read_csv(global_vars.path, sep=';')\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a table fits file and convert it to a pandas dataframe.
def import_fits(fitsfile='tgasptyc.fits'): if isfile(fitsfile): print "Opening %s.." % fitsfile table = Table.read(fitsfile) pandas_df = table.to_pandas() else: print "%s not found. Exiting." % fitsfile sys.exit() print "Converting table to pandas_df.." print "..Done" return pandas_df
[ "def df_from_fits(filename, i=1):\n return pd.DataFrame.from_records(fitsio.FITS(filename)[i].read().byteswap().newbyteorder())", "def _parse_fits(filepath):\n header, d = rhessi.parse_obssumm_file(filepath)\n data = DataFrame(d['data'], columns=d['labels'], index=d['time'])\n\n return header, data", "def read_fits(dir_fits, file_name, fits_cols):\n file_name = os.path.join(dir_fits, file_name)\n if not os.path.isfile(file_name):\n print('File not found: {}'.format(file_name))\n MPI.COMM_WORLD.Abort()\n\n fits = fitsio.FITS(file_name)\n\n #http://stackoverflow.com/questions/30283836/\n # creating-pandas-dataframe-from-numpy-array-leads-to-strange-errors\n fits_read = fits[1].read(columns= fits_cols)\n fits_to_df = {col:fits_read[col].byteswap().newbyteorder() for col in fits_cols}\n df = pd.DataFrame(fits_to_df)\n return df", "def read_table(table_f):\n df = pandas.read_csv(table_f, sep=\"\\t\", header=0, index_col=0)\n\n # convert potentially numerical row names into strings\n df.index = [str(i) for i in df.index]\n\n return df", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")", "def read_table(\n filename,\n dataset_class=dataset.pandas_dataset.PandasDataset,\n expectation_suite=None,\n profiler=None,\n *args, **kwargs\n):\n df = pd.read_table(filename, *args, **kwargs)\n df = _convert_to_dataset_class(\n df, dataset_class, expectation_suite, profiler)\n return df", "def from_astropy_table(table):\n from vaex.astro.astropy_table import DatasetAstropyTable\n ds = DatasetAstropyTable(table=table)\n return vaex.dataframe.DataFrameLocal(ds)", "def read_table(file_name: Union[str, Path], **kwargs):\n\tfile_name = Path(file_name)\n\textension = file_name.suffix\n\tdefault_args = {\n\t\t'.csv': {'delimiter': ','},\n\t\t'.tsv': {'delimiter': '\\t'}\n\t}\n\n\t# arguments = self._cleanArguments(extension, arguments)\n\tfile_name = str(file_name.absolute())\n\tif extension in {'.xls', '.xlsx', '.xlsm'}: # .xlsm is not a typo.\n\n\t\tdf = pandas.read_excel(file_name, **kwargs)\n\telif extension in {'.csv', '.tsv', '.fsv', '.txt'}:\n\t\targuments = {**default_args.get(extension), **kwargs}\n\t\tif 'sheetname' in arguments: arguments.pop('sheetname')\n\t\tdf = pandas.read_table(file_name, **arguments)\n\telif extension == '.pkl':\n\t\tdf = pandas.read_pickle(file_name)\n\telse:\n\t\traise NameError(\"{} does not have a valid extension!\".format(file_name))\n\treturn df", "def read_pipe_table_to_pandas(filename):\n\n astropy_data = astropy.io.ascii.read(filename)\n data_stream = StringIO()\n astropy_data[2:].write(data_stream, format='ascii.basic', delimiter='|')\n data_stream.seek(0)\n return pandas.read_csv(data_stream,\n comment='#',\n sep='|',\n skipinitialspace=True)", "def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb", "def read_tables():\n table = pd.read_excel(os.path.join(cwd, FidelityTable))\n return table", "def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df", "def data_from_fits(fits_file):\n hdul = fits.open(fits_file)\n data = hdul[0].data\n return data", "def as_data_frame(self) -> pd.DataFrame:\n rows, cols = len(self.flows), len(self.sectors)\n log.info('convert satellite table to a %sx%s data frame', rows, cols)\n data = np.zeros((rows, cols), dtype=np.float64)\n for i, row in self.entries.items():\n for j, entry in row.items():\n data[i, j] = entry.value\n index = [f.key for f in self.flows]\n columns = [s.key for s in self.sectors]\n return pd.DataFrame(data=data, index=index, columns=columns)", "def dataToDataFrame(inFile):\n #df = pd.read_csv(inFile, header=[0,2])\n df = pd.read_csv(inFile)\n return df", "def create_from_fits_file(cls, fits_file, hdu_name='nonlin'):\n hdulist = fits.open(fits_file)\n table = hdulist[hdu_name]\n nl = cls.create_from_table(table)\n hdulist.close()\n return nl", "def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()", "def read_fits_file(fits):\n hdulist = pyfits.open(fits)\n#\n#--- get column names\n#\n cols_in = hdulist[1].columns\n cols = cols_in.names\n#\n#--- get data\n#\n tbdata = hdulist[1].data\n\n hdulist.close()\n\n return [cols, tbdata]", "def read_hdf5(hdf5_filename):\n with tables.open_file(hdf5_filename) as fi:\n results = pandas.DataFrame.from_records(fi.root.summary.read())\n\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a new column 'tycho2_id' in the tycho2 catalog. This is for comparison with the TGAS catalog.
def create_tycho_id(tycho2df): tycho2df['tycho2_id'] = tycho2df.TYC1.astype(str).str.cat(tycho2df.TYC2.astype(str), sep='-')\ .str.cat(tycho2df.TYC3.astype(str), sep='-') tycho2df = tycho2df.rename(columns={'HIP': 'hip'}) return tycho2df
[ "def l2_id(self, l2_id):\n self._l2_id = l2_id", "def l2_id(self):\n return self._l2_id", "def fill_column_2(self):\n self.report.loc[:,self.fields[\"2\"]] = \\\n int(self.data_bucket.get_shareclass_infos(\"type_tpt\"))", "def get_idx2id(self, id2idx = None):\n if id2idx is None:\n return {v:k for k, v in self.id2idx.items()}\n return {v:k for k, v in id2idx.items()}", "def trilha2(self, trilha2):\n self._trilha2 = trilha2", "def tag_id2(self, tag_id2):\n if self._configuration.client_side_validation and tag_id2 is None:\n raise ValueError(\"Invalid value for `tag_id2`, must not be `None`\") # noqa: E501\n\n self._tag_id2 = tag_id2", "def _getNewCatId(self):\n\n newCatId = COCO_PLUS.CAT_ID\n COCO_PLUS.CAT_ID += 1\n\n return newCatId", "def make_category_table_level2(category_level2_table, category_table):\n # Create a dict mapping 'category_level1_names' to 'category_level1_index'\n category_name2label_level2 = {}\n for item_level2 in category_level2_table.itertuples():\n category_name = item_level2[1]\n category_idx = item_level2[2]\n category_name2label_level2[category_name] = category_idx\n # Create a dict mapping 'category_id' to 'category_level1_index'\n category_id2label_level2 = {}\n for item in category_table.itertuples():\n category_id = item[0]\n category_idx = category_name2label_level2[item[2]]\n category_id2label_level2[category_id] = category_idx\n return category_id2label_level2", "def set_t2(self, t2):\n self.t2 = t2", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def secondary_id(self, value):\n self._write(MX_SECONDARY_ID, value)", "def entity2(self):\n return Entity(int(self.Entity2[\"id\"]),self.key,data=self.Entity2)", "def catalog_id(self) -> str:\n return pulumi.get(self, \"catalog_id\")", "def catalog_id(self):\n return self._catalog_id", "def newid(self, target_table):\n self.new_id[target_table] += 1\n return self.new_id[target_table]", "def _output_imei_column(self):\n if self._generate_check_digit:\n imei_col_name = sql.Identifier('imei_norm_with_check_digit')\n else:\n imei_col_name = sql.Identifier('imei_norm')\n return imei_col_name", "def _get_country_id(self, code2):\n if not hasattr(self, '_country_codes'):\n self._country_codes = {}\n\n if code2 not in self._country_codes.keys():\n self._country_codes[code2] = Country.objects.get(code2=code2).pk\n return self._country_codes[code2]", "def OBJID(self):\n # No test on validity because `read_catalog` should have failed\n return self.cat[self.fixed_cols['id']]", "def get_catalog_id(self):\n return self._catalog_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
select data with relative parallax error less than 'cutoff', add absolute magnitude columns for plotting. If catalog is not None, the cutoff on BV will not be applied (ensures initial variable stars DataFrame is not constrained in magnitudes)
def data_process(df_toprocess=None, cutoff=0.2, bv_cutoff=0.15, catalog=None): print "Selecting objects.." df_toprocess['sigma_pi/pi'] = df_toprocess.loc[:, 'parallax_error'].astype(float) / df_toprocess.loc[:, 'parallax']\ .astype(float) print "..Done\nCutoff at relative parallax error of %s\n----------" % cutoff # only take objects with relative parallax error < cutoff df_toprocess = df_toprocess.loc[df_toprocess.loc[:, 'parallax'] / df_toprocess.loc[:, 'parallax_error'] > 1. / cutoff] print catalog if catalog is None: print "Replacing whitespace with nan" df_toprocess = df_toprocess.replace(' ', np.nan) # some cells are ' ' instead of nan print "Converting BTmag and VTmag to floats.." df_toprocess.BTmag = df_toprocess.BTmag.astype(float) df_toprocess.VTmag = df_toprocess.VTmag.astype(float) # Some values are NaN: print "Removing objects with missing BT or VT measurements.." df_toprocess = df_toprocess[df_toprocess.BTmag.notnull()] df_toprocess = df_toprocess[df_toprocess.VTmag.notnull()] print "Computing B-V and M_V.." df_toprocess['B_V'] = df_toprocess.BTmag - df_toprocess.VTmag df_toprocess['M_V'] = df_toprocess.VTmag - 5. * (np.log10(1000. / df_toprocess.parallax) - 1.) print "Converting sigma BT and sigma VT to float.." df_toprocess.e_BTmag = df_toprocess.e_BTmag.astype(float) df_toprocess.e_VTmag = df_toprocess.e_VTmag.astype(float) print "Computing sigma B-V.." df_toprocess['e_B_V'] = np.sqrt(df_toprocess.e_BTmag.pow(2)+df_toprocess.e_VTmag.pow(2)) print "Applying selection on sigma BT-VT < %s.." % bv_cutoff df_toprocess = df_toprocess[df_toprocess.e_B_V < bv_cutoff] if catalog == 'xmatch_TGAS_Simbad.csv': df_toprocess = df_toprocess.loc[(df_toprocess['J'] < 11.) & (df_toprocess['K'] < 11.)] print "min in J: %s" % np.max(df_toprocess['J']) print "max in J: %s" % np.min(df_toprocess['J']) df_toprocess.insert(10, 'B_V', df_toprocess.loc[:, 'B'] - df_toprocess.loc[:, 'V']) df_toprocess.insert(10, 'J_K', df_toprocess.loc[:, 'J'] - df_toprocess.loc[:, 'K']) df_toprocess.insert(10, 'M_G', df_toprocess.loc[:, 'phot_g_mean_mag'] - 5. * (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.)) df_toprocess.insert(10, 'M_J', df_toprocess.loc[:, 'J'] - 5. * (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.)) df_toprocess.insert(10, 'M_K', df_toprocess.loc[:, 'K'] - 5. * (np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.)) if catalog == 'xmatch_TGAS_VSX.csv': df_toprocess = df_toprocess[df_toprocess.V == 0] print "%s objects selected" % len(df_toprocess) print "..Done\n----------" return df_toprocess
[ "def subcatsMWfootprint_diagnostics(catname='Skelton',plotdir='/Users/kschmidt/work/MUSE/MWv2_analysis/continuum_source_selection/',\n skeltonwhitakermag='814',xrange=None,bins=None,verbose=True):\n\n ids = np.array([])\n mags = np.array([])\n magnames = np.array([])\n\n if (catname.lower() == 'skelton') or (catname.lower() == 'skelton_goodss') or (catname.lower() == 'all'):\n photcat_goodss = '/Users/kschmidt/work/catalogs/MUSE_GTO/goodss_3dhst.v4.1_inMUSEWideFootprint.fits'\n photdat_goodss = afits.open(photcat_goodss)[1].data\n ids = np.append(ids,photdat_goodss['id']+1100000000)\n if skeltonwhitakermag in ['775','606']:\n magcol = 'f_F'+skeltonwhitakermag+'W'\n else:\n magcol = 'f_F'+skeltonwhitakermag+'Wcand'\n mags = np.append(mags,25.0-2.5*np.log10(photdat_goodss[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'skelton') or (catname.lower() == 'skelton_cosmos') or (catname.lower() == 'all'):\n photcat_cosmos = '/Users/kschmidt/work/catalogs/MUSE_GTO/cosmos_3dhst.v4.1_inMUSEWideFootprint.fits'\n photdat_cosmos = afits.open(photcat_cosmos)[1].data\n ids = np.append(ids,photdat_cosmos['id']+2100000000)\n magcol = 'f_F'+skeltonwhitakermag+'W'\n mags = np.append(mags,25.0-2.5*np.log10(photdat_cosmos[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'whitaker') or (catname.lower() == 'all'):\n photcat = '/Users/kschmidt/work/catalogs/MUSE_GTO/hlsp_hlf_hst_60mas_goodss_v2.0_catalog_inMUSEWideFootprint.fits'\n photdat = afits.open(photcat)[1].data\n ids = np.append(ids,photdat['id']+1200000000)\n magcol = 'f_f'+skeltonwhitakermag+'w'\n mags = np.append(mags,25.0-2.5*np.log10(photdat[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'laigle') or (catname.lower() == 'all'):\n photcat = '/Users/kschmidt/work/catalogs/MUSE_GTO/cosmos2015_laigle_v1.1_candelsregion_inMUSEWideFootprint.fits'\n photdat = afits.open(photcat)[1].data\n ids = np.append(ids,photdat['NUMBER']+2200000000)\n magcol = 'V_MAG_ISO'\n mags = np.append(mags,photdat[magcol])\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if len(ids) == 0:\n sys.exit('No IDs available for \"catname='+str(catname)+'\"')\n\n goodent = np.where((mags < 40) & (mags > 5) & np.isfinite(mags))[0]\n mags_good = mags[goodent]\n ids_good = ids[goodent]\n\n Nbad = len(ids) - len(ids_good)\n Ncosmos = len(np.where(ids_good > 1.9e9)[0])\n Ngoodss = len(np.where(ids_good < 1.9e9)[0])\n Ntotal = Ngoodss+Ncosmos\n\n if verbose: print(' - Read the catalog selection \"'+catname+'\" finding the following number of sources:')\n if verbose: print(' (discarding '+str(Nbad)+' sources for not being finite or having poor mags)')\n if verbose: print(' Total : '+str(Ntotal))\n if verbose: print(' GOODS-S : '+str(Ngoodss))\n if verbose: print(' COSMOS : '+str(Ncosmos))\n\n # - - - - - - - - - - - - - - - - - - - - PLOTTING - - - - - - - - - - - - - - - - - - - -\n if catname.lower() == 'all':\n magext = 'm'+skeltonwhitakermag\n else:\n magext = magcol\n plotname = plotdir+'mag_histogram_'+catname.lower()+'_'+magext+'.pdf'\n if verbose: print(' - Setting up and generating histogram of MUSE-Wide sources in \\n '+plotname)\n fig = plt.figure(figsize=(5, 4))\n fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.2, right=0.95, bottom=0.2, top=0.95)\n Fsize = 14\n lthick = 1.5\n marksize = 3\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n #plt.title('M^\\star',fontsize=Fsize)\n\n if xrange is None:\n xrange = [np.min(mags_good),np.max(mags_good)]\n\n if (bins is None):\n bin_dz = 0.1\n bins = np.arange(np.min(mags_good),np.max(mags_good)+bin_dz,bin_dz)\n if xrange is not None:\n bins = np.arange(np.min(xrange),np.max(xrange)+bin_dz,bin_dz)\n\n\n magranges = [[0,24],[24,25],[25,26],[26,99],[0,26]]\n colors = ['blue','green','orange','red','black']\n\n for mm, magrange in enumerate(magranges):\n goodent = np.where((mags_good > magrange[0]) & (mags_good <= magrange[1]))[0]\n Ngood = len(goodent)\n\n if Ngood>1:\n goodIDs = ids[goodent]\n goodmag = mags_good[goodent]\n goodcolor = colors[mm]\n magmin = np.min(goodmag)\n magmax = np.max(goodmag)\n\n infostr = ' Histinfo:'\n\n percent = float(Ngood)/float(Ntotal)*100.\n label = str(magrange[0])+'$<$mag$<=$'+str(magrange[1])+' \\n('+str(Ngood)+' obj; '+str('%.2f' % percent)+'\\%)'\n\n if mm < len(magranges)-1:\n fillval = True\n linest = '-'\n else:\n fillval = False\n linest = ':'\n hist = plt.hist(goodmag,color=goodcolor,bins=bins,histtype=\"step\",lw=lthick,label=label,ls=linest,\n fill=fillval,fc=goodcolor,alpha=0.8)\n\n plt.xlim(xrange)\n plt.xlabel('AB magnitude \\n('+', '.join(list(magnames))+')', fontsize=Fsize)\n\n #plt.ylim(yrange)\n plt.ylabel(catname.replace('_','\\_')+' catalog objects\\nover MUSE-Wide 100 field footprint', fontsize=Fsize)\n\n #--------- LEGEND ---------\n anchorpos = (0.5, 1.2)\n leg = plt.legend(fancybox=True,numpoints=1, loc='upper left',prop={'size':Fsize-3},ncol=1)#,\n #bbox_to_anchor=anchorpos) # add the legend\n leg.get_frame().set_alpha(0.7)\n #--------------------------\n\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')", "def pressure_cutoff(amount,pressure,cutoff=450.,commentary=\"\",do_avg=False):\n #amount : 3D grid of amount of substance (vertical, lat, lon)\n #pressure : 3D grid of pressure at the *bottom* of each box (vertical, lat, lon)\n #cutoff : is the pressure cutoff, same units as previous variable.\n #commentary : set to any string to get a printout when this command is running (useful for diagnostic)\n \n #diagnostic\n if commentary != \"\":\n print(\"%s - Calculating pressure cutoff\"%commentary)\n \n #work out shape of array\n (num_levels,num_lats,num_lons) = amount.shape\n \n #prepare output grid\n output = np.zeros((num_lats,num_lons))\n \n for i in range(num_lats):\n for j in range(num_lons):\n pres_col = pressure[:,i,j] #get 1D array of pressures at this i,j point\n amount_col = amount[:,i,j] #get 1D array of amounts at this i,j point\n amount_col = list(np.where(np.isnan(np.array(amount_col)),0.,np.array(amount_col)))\n #for each i,j point, get the sum up to this level (linearlt interpolating using log pressures)\n output[i,j] = float(log_interp_sum_to_level(pres_col,amount_col,cutoff))\n if commentary == \"full\" and i == 20 and j == 20:\n print(pres_col)\n print(amount_col)\n print(output[i,j])\n \n return(output)", "def loadData(catalogfile, vel_err, mag_err, rotate=True):\n d = Table.read(catalogfile)\n \n # Converting from pix to mas, applying mag offsets\n pscale = astrometry.scale['WFC'] * 1e3 # mas/pix, from ACS comparison\n\n d['fit_vx'] *= pscale\n d['fit_vy'] *= pscale\n d['fit_vxe'] *= pscale \n d['fit_vye'] *= pscale\n\n d['fit_x0'] *= pscale / 1e3\n d['fit_y0'] *= pscale / 1e3\n d['fit_x0e'] *= pscale / 1e3\n d['fit_y0e'] *= pscale / 1e3\n\n # Applying the error cuts, only to F153m filter\n lowErr = np.where((d['fit_vxe'] < vel_err) &\n (d['fit_vye'] < vel_err) &\n (d['me_2005_F814W'] < mag_err) &\n (d['me_2010_F160W'] < mag_err) &\n (d['me_2013_F160W'] < mag_err))\n \n d_trim = d[lowErr]\n print('Stars in trimmed catalog: {0} out of {1}'.format(len(d_trim), len(d)))\n\n #--- If rotate flag, then rotate to RA/DEC ---#\n if rotate:\n center = np.array([d_trim['fit_x0'].mean(), d_trim['fit_y0'].mean()])\n x0_tmp, y0_tmp = pos_rotate(d_trim['fit_x0'], d_trim['fit_y0'], angle, center) \n x0e_tmp, y0e_tmp = velerr_rotate(d_trim['fit_x0e'], d_trim['fit_y0e'], angle)\n vx_tmp, vy_tmp = vel_rotate(d_trim['fit_vx'], d_trim['fit_vy'], angle) \n vxe_tmp, vye_tmp = velerr_rotate(d_trim['fit_vxe'], d_trim['fit_vye'], angle)\n\n d_trim['fit_vx'] = vx_tmp * -1.0\n d_trim['fit_vy'] = vy_tmp\n d_trim['fit_vxe'] = vxe_tmp\n d_trim['fit_vye'] = vye_tmp\n\n d_trim['fit_x0'] = x0_tmp * -1.0\n d_trim['fit_y0'] = y0_tmp\n d_trim['fit_x0e'] = x0e_tmp\n d_trim['fit_y0e'] = y0_tmp\n \n return d_trim", "def filter(self,prevalence_threshold=0.1, abundance_num=100, variance_num=100):\n temp_col = list(copy.copy(self.df_primary_col))\n temp_df = self.df[temp_col]\n prevalence_threshold = float(prevalence_threshold)\n abundance_num = int(abundance_num)\n variance_num = int(variance_num)\n #prevalence \n for col in self.df_primary_col:\n non_zero_count= 0\n for ele in temp_df[col]:\n if ele > 0:\n non_zero_count += 1\n prevalence_of_col = non_zero_count/len(temp_df[col])\n if prevalence_of_col < prevalence_threshold:\n temp_col.remove(col)\n # abundance\n abundance_dict = {}\n for col in temp_col:\n abundance = 0\n for ele in temp_df[col]:\n if ele > 0:\n abundance += ele\n abundance_dict[col] = abundance\n\n if len(abundance_dict) < abundance_num:\n pass\n else:\n # sort and select abundance_num features\n sort_abun = sorted(abundance_dict,key=abundance_dict.get,reverse=True)\n temp_col = sort_abun[0:abundance_num]\n # variance\n variance_dict = {}\n for col in temp_col:\n std_of_col = np.std(temp_df[col])\n variance_dict[col] = std_of_col\n if len(variance_dict) < variance_num:\n pass\n else:\n sort_vari = sorted(variance_dict,key=variance_dict.get,reverse=True)\n temp_col = sort_vari[0:variance_num]\n self.df = self.df[temp_col]", "def filter_catalog(catalog, row):\n filtered_catalog = catalog[\n (catalog.Lm_OPQ > row.L) & \n (catalog.Lm_OPQ < row.L + dL)\n ]\n filtered_catalog = filtered_catalog[\n (filtered_catalog.MLT_OPQ > row.MLT) & \n (filtered_catalog.MLT_OPQ < row.MLT + dMLT)\n ]\n filtered_catalog = filtered_catalog[\n (filtered_catalog.AE > row.AE) & \n (filtered_catalog.AE < row.AE + dAE)\n ]\n return filtered_catalog", "def _filter_catalog(self):\n cond1 = pd.isnull(self.catalog[\"size_major\"])\n cond2 = pd.isnull(self.catalog[\"size_minor\"])\n cond3 = pd.isnull(self.catalog[\"flux\"])\n cond4 = pd.isnull(self.catalog[\"specindex\"])\n cond_keep = ~(cond1 | cond2 | cond3 | cond4)\n n_total = len(cond_keep)\n n_remain = cond_keep.sum()\n n_delete = n_total - n_remain\n n_delete_p = n_delete / n_total * 100\n self.catalog = self.catalog[cond_keep]\n # Drop the index\n self.catalog.reset_index(drop=True, inplace=True)\n self.catalog_filtered = True\n logger.info(\"SNRs catalog: filtered out \" +\n \"{0:d} ({1:.1f}%) objects\".format(n_delete, n_delete_p))\n logger.info(\"SNRs catalog: remaining {0} objects\".format(n_remain))", "def do_lowzcut_check(cat, subdir):\n lowzcut = cat.lowzcut\n cat.lowzcut = True\n cat.plot_omega_dla(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_omega_dla(zmax=5,label=\"Not cutting\")\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"omega_gp_lowz\"))\n plt.clf()\n\n cat.lowzcut = True\n cat.plot_line_density(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_line_density(zmax=5,label=\"Not cutting\")\n plt.ylim(0,0.12)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"dndx_gp_lowz\"))\n plt.clf()\n cat.lowzcut = lowzcut", "def filter_and_augment_variants(vars_df, min_allele_freq, min_alt_depth, min_depth, max_num_het, tissue, add_joint_cols=True):\n\n # Filter using allele freq, alt depth, and depth.\n vars_df = vars_df[(vars_df.allele_freq >= min_allele_freq) &\n (vars_df.alt_depth >= min_alt_depth) &\n (vars_df.depth >= min_depth)]\n\n # Update num_het, add num_het_by_id, germline\n vars_df = gem_ops.update_num_het(vars_df)\n vars_df = gem_ops.update_num_het_by_id(vars_df)\n vars_df = gem_ops.update_germline(vars_df)\n vars_df = gem_ops.update_mean_af(vars_df)\n\n # Filter by max_num_het and tissue.\n vars_df = vars_df[(vars_df.num_het <= max_num_het) & (vars_df.tissue).isin(tissue)]\n\n # Add joint columns.\n if add_joint_cols:\n vars_df = add_joint_cols(vars_df)\n\n return vars_df", "def cut_fdr(df: pd.DataFrame, fdr_level:float=0.01, plot:bool=True) -> (float, pd.DataFrame):\n\n df[\"target\"] = ~df[\"decoy\"]\n\n df = df.sort_values(by=[\"score\",\"decoy\"], ascending=False)\n df = df.reset_index()\n\n df[\"target_cum\"] = np.cumsum(df[\"target\"])\n df[\"decoys_cum\"] = np.cumsum(df[\"decoy\"])\n\n df[\"fdr\"] = df[\"decoys_cum\"] / df[\"target_cum\"]\n df[\"q_value\"] = get_q_values(df[\"fdr\"].values)\n\n last_q_value = df[\"q_value\"].iloc[-1]\n first_q_value = df[\"q_value\"].iloc[0]\n\n if last_q_value <= fdr_level:\n logging.info('Last q_value {:.3f} of dataset is smaller than fdr_level {:.3f}'.format(last_q_value, fdr_level))\n cutoff_index = len(df)-1\n\n elif first_q_value >= fdr_level:\n logging.info('First q_value {:.3f} of dataset is larger than fdr_level {:.3f}'.format(last_q_value, fdr_level))\n cutoff_index = 0\n\n else:\n cutoff_index = df[df[\"q_value\"].gt(fdr_level)].index[0] - 1\n\n cutoff_value = df.loc[cutoff_index][\"score\"]\n cutoff = df[df[\"score\"] >= cutoff_value]\n\n targets = df.loc[cutoff_index, \"target_cum\"]\n decoy = df.loc[cutoff_index, \"decoys_cum\"]\n\n fdr = df.loc[cutoff_index, \"fdr\"]\n\n\n logging.info(f\"{targets:,} target ({decoy:,} decoy) of {len(df)} PSMs. fdr {fdr:.6f} for a cutoff of {cutoff_value:.2f} (set fdr was {fdr_level})\")\n\n if plot:\n import matplotlib.pyplot as plt\n plt.figure(figsize=(10, 5))\n plt.plot(df[\"score\"], df[\"fdr\"])\n plt.axhline(0.01, color=\"k\", linestyle=\"--\")\n\n plt.axvline(cutoff_value, color=\"r\", linestyle=\"--\")\n plt.title(\"fdr vs Cutoff value\")\n plt.xlabel(\"Score\")\n plt.ylabel(\"fdr\")\n # plt.savefig('fdr.png')\n plt.show()\n\n bins = np.linspace(np.min(df[\"score\"]), np.max(df[\"score\"]), 100)\n plt.figure(figsize=(10, 5))\n plt.hist(df[df[\"decoy\"]][\"score\"].values, label=\"decoy\", bins=bins, alpha=0.5)\n plt.hist(df[~df[\"decoy\"]][\"score\"].values, label=\"target\", bins=bins, alpha=0.5)\n plt.xlabel(\"Score\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Score vs Class\")\n plt.legend()\n plt.show()\n\n cutoff = cutoff.reset_index(drop=True)\n return cutoff_value, cutoff", "def cuts_ana( df ):\n initial_cuts = (df.mll > 12 ) & (df.qT > 30) & (df.METProj > 20) & (df.numbExtraLep == 0)\n same_flavor_cut = (df.lep_Type < 0)\n dif_flavor_cut = (df.lep_Type > 0)\n z_peak_cut = (df.mll > 106) | (df.mll < 76)\n met_proj_cut = (df.METProj > 45)\n\n basic_sf_0j_cuts = initial_cuts & (df.qT > 45) &same_flavor_cut & met_proj_cut & z_peak_cut& (df.numb_jets <= 1)\n basic_df_0j_cuts = initial_cuts & dif_flavor_cut &(df.numb_jets <= 1)\n return pd.concat( [df[basic_sf_0j_cuts], df[basic_df_0j_cuts]] )", "def get_magnitudes(catalog,columns):\n vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)\n data = C.loaddata(catalog)\n mags = data[:,vars] \n return mags", "def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :\n if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))\n else : fig,ax=fig\n tbins=[3000,3500,4000,4500,5500,8000,30000] \n hbins=[8,11,12,13,15]\n try: snr = a['SNREV']\n except: snr=a['SNR']\n j=np.where(snr > 300) [0]\n snr[j] = 300\n for i in range(len(tbins)-1) :\n ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)\n for j in range(len(hbins)-1) :\n ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))\n gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &\n (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &\n (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]\n print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))\n try :\n #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')\n ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)\n ax[i,j].set_xlabel('VSCATTER (km/s)')\n ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())\n #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])\n #ax[i,1].set_xlabel('VSCATTER')\n except : pass\n\n if out is not None : \n fig.savefig(out+'.png')\n plt.close()\n\n fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))\n return fig,ax", "def get_errmagnitudes(catalog,columns):\n vars,evars,posref,zpe,zpo = get_usefulcolumns(columns)\n data = C.loaddata(catalog)\n emags = data[:,evars] \n return emags", "def box_and_whisker_thresholded(data:gpdGeoDataFrame, threshold: int = 3000):\r\n fig, ax = plt.subplots(figsize=(20,4))\r\n d = data[data[DATAFIELDS] > 0.1]\r\n d = d[d[DATAFIELDS] < threshold]\r\n ax =d.boxplot();\r\n ax.set_title('AAL > 0 \\n(outliers removed)')\r\n ax.set_ylabel('Loss in Dollars');", "def cos_fit(ftype=\"fitacf\", azbin_nvel_min=10, naz_min=3, az_span_min=30, baseLocation=\"./\",\n dbName=None, sqrt_weighting=True):\n import sqlite3\n import datetime as dt\n import numpy as np \n\n # make db connection for dopsearch\n if dbName is None:\n dbName = \"master_\" + ftype + \".sqlite\"\n conn = sqlite3.connect(baseLocation + dbName)\n cur = conn.cursor()\n\n if sqrt_weighting:\n T1 = \"master_cosfit\"\n else:\n T1 = \"master_cosfit_equal_weighting\"\n \n #T1 = \"master_cosfit\"\n T2 = \"master_summary\"\n\n# # build a new master table where we store the cosfitted velocities at each lat-lon grid points\n# cur.execute(\"CREATE TABLE IF NOT EXISTS {tb1}\\\n# (vel_mag REAL, vel_mag_err REAL, vel_dir REAL, vel_dir_err REAL,\\\n# vel_count INTEGER, gazmc_count INTEGER,\\\n# glatc REAL, glonc REAL,\\\n# PRIMARY KEY (glatc, glonc))\".format(tb1=T1))\n#\n# # select the velocity data grouping by latc-lonc bins for the nigh side\n# command = \"SELECT count(*), glatc, glonc FROM {tb2}\\\n# WHERE (glonc BETWEEN 0 AND 135) OR (glonc BETWEEN 225 AND 360)\\\n# GROUP BY glatc, glonc\\\n# \".format(tb2=T2)\n\n cur.execute(\"CREATE TABLE IF NOT EXISTS {tb1}\\\n (vel_mag REAL, vel_mag_err REAL, vel_dir REAL, vel_dir_err REAL,\\\n vel_count INTEGER, gazmc_count INTEGER,\\\n glatc REAL, glonc REAL,\\\n PRIMARY KEY (glatc, glonc))\".format(tb1=T1))\n\n # select the velocity data grouping by latc-lonc bins. Also each latc-lonc cell should\n #contain gazmc bins that have at least azbin_nvel_min amount of measurements\n command = \"SELECT count(*), glatc, glonc, group_concat(gazmc) FROM\\\n (SELECT * FROM {tb2} WHERE vel_count >= {azbin_nvel_min})\\\n GROUP BY glatc, glonc\\\n \".format(tb2=T2, azbin_nvel_min=azbin_nvel_min)\n #WHERE (glonc BETWEEN 0 AND 135) OR (glonc BETWEEN 225 AND 360)\n\n cur.execute(command)\n rws = cur.fetchall()\n\n # filter out lat-lon grid points that have less than 3 qualifying amimuthal bins \n rws = [x for x in rws if x[0] >= naz_min]\n\n # filter out lat-lon grid points that have less than 30 degrees azimuthal span\n for rwi in rws:\n az_rwi = np.sort(np.array([int(x) for x in rwi[3].split(\",\")]))\n if len(az_rwi) == 3:\n if az_rwi.tolist()==[5, 345, 355] or az_rwi.tolist()==[5, 15, 355]:\n #print az_rwi\n rws.remove(rwi)\n elif az_rwi.tolist()==[az_rwi[0], az_rwi[0]+10, az_rwi[0]+20]:\n #print az_rwi\n rws.remove(rwi)\n else:\n continue\n else:\n continue\n\n azm_count = [x[0] for x in rws]\n lat = [x[1] for x in rws]\n lon = [x[2] for x in rws]\n\n for ii in xrange(len(lat)):\n command = \"SELECT median_vel, vel_count, gazmc FROM {tb2}\\\n WHERE glatc={lat}\\\n AND glonc={lon}\\\n ORDER BY gazmc\"\\\n .format(tb2=T2, lat=lat[ii], lon=lon[ii])\n cur.execute(command)\n rows = cur.fetchall()\n median_vel = np.array([x[0] for x in rows])\n vel_count = np.array([x[1] for x in rows])\n if sqrt_weighting:\n sigma = 1./np.sqrt(vel_count)\n else:\n sigma = np.array([1.0 for x in rows])\n azm = np.array([x[2] for x in rows])\n\n # do cosine fitting with weight\n fitpars, perrs = cos_curve_fit(azm, median_vel, sigma)\n vel_mag = round(fitpars[0],2)\n vel_dir = round(np.rad2deg(fitpars[1]) % 360,1)\n vel_mag_err = round(perrs[0],2)\n vel_dir_err = round(np.rad2deg(perrs[1]) % 360, 1)\n\n # populate the table \n command = \"INSERT OR IGNORE INTO {tb1} (vel_mag,\\\n vel_mag_err, vel_dir, vel_dir_err, vel_count,\\\n gazmc_count, glatc, glonc) VALUES ({vel_mag},\\\n {vel_mag_err}, {vel_dir}, {vel_dir_err}, {vel_count},\\\n {gazmc_count}, {glatc}, {glonc})\".format(tb1=T1, vel_mag=vel_mag,\\\n vel_mag_err=vel_mag_err, vel_dir=vel_dir,\\\n vel_dir_err=vel_dir_err, vel_count=np.sum(vel_count),\\\n gazmc_count =azm_count[ii], glatc=lat[ii], glonc=lon[ii])\n cur.execute(command)\n print \"finish inserting cosfit result at \" + str((lat[ii], lon[ii]))\n\n # commit the change\n conn.commit()\n\n # close db connection\n conn.close()\n\n return", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def apply(self, catalog):\n flagField = self.fluxField + \"_flag\"\n if flagField in catalog.schema:\n selected = np.logical_not(catalog[flagField])\n else:\n selected = np.ones(len(catalog), dtype=bool)\n\n signalToNoise = catalog[self.fluxField]/catalog[self.errField]\n selected &= BaseLimit.apply(self, signalToNoise)\n return selected", "def filter_data(df, coords, vel=False, vel_threshold=0.15, radiant_threshold=5):\n\tdf = df[(df.range > 110) & (df.range < 130) \n\t\t& (df.th < 70) # Unphysical, given the range cut.\n\t\t& (df.fl == 0)] # Bad interferometry if fl=1\n\n\t# Velocity cut\n\tif vel:\n\t\tdf = df[(df.new_ptn > vel*(1-vel_threshold)) \n\t\t& (df.new_ptn < vel*(1+vel_threshold))]\n\n\t# Identify the shower radiant\n\tif isinstance(coords, str):\n\t\twavelet = read_wavelet(coords)\n\t\tdf = df.apply(get_wavelet_radiant, axis=1, args=(wavelet,))\n\telse:\n\t\tdf['radiant_ll0'] = coords[0]\n\t\tdf['radiant_beta'] = coords[1]\n\n\tdf['separation'] = df.apply(check_radiant, axis=1)\n\tdf.drop(['radiant_ll0', 'radiant_beta'], axis=1, inplace=True)\n\tdf_shower = df[df['separation'] <= radiant_threshold]\n\treturn df_shower", "def column_cutoff(data_frame, cutoffs):\n index_names = []\n\n dftemp = data_frame.copy() # copy dataframe to preserve original\n if dftemp is not None: # check dataframe is not empty\n for x in range(len(cutoffs)):\n column_name = cutoffs[x][0] # extract column name\n min_value = float(cutoffs[x][1]) # extract minimum value\n max_value = float(cutoffs[x][2]) # extract maximum value\n index_names.extend(dftemp[(dftemp[column_name] < min_value) |\n (dftemp[column_name] > max_value)].index) # create index of all values that\n # meet condition\n dftemp.drop(index_names, inplace=True)\n\n return dftemp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
plot the background stars (HR diagram). The plot is a 2d histogram, for better readability. Only bins with at least 10 stars a shown.
def plot_hr_diag(hr_df, x='B_V', y='M_V', cutoff=0.2, bvcutoff=0.05): plt.figure(figsize=(11., 10.)) print "Plotting background stars.." plt.set_cmap('gray_r') plt.hist2d(hr_df[x].tolist(), hr_df[y].tolist(), (200, 200), norm=LogNorm(), cmin=10) plt.axis([-0.2, 2.35, -3., 7.]) plt.gca().invert_yaxis() plt.xlabel(r'$BT-VT$ (mag)') plt.ylabel(r'$M_{VT}$ (mag)') # Plotting M_{VT} plt.title(r'$\sigma_\pi / \pi < %s, \sigma_{BT-VT}< %s$ mag' % (cutoff, bvcutoff)) print "..Done" return
[ "def histogram():\r\n y = np.random.rand(50)\r\n x = np.linspace(1,50,50)\r\n plt.subplot(1,2,1)\r\n plt.hist(y, bins=5)\r\n plt.subplot(1,2,2)\r\n plt.scatter(x,y)\r\n avg = np.mean(y)\r\n avg = np.ones(50)*avg\r\n plt.plot(x, avg,'r')\r\n plt.axis([0,50,0,1])\r\n plt.show()", "def plot_maxrho_histo(self):\n try:\n self.R_w.mean()\n except:\n self._readwall()\n #ind = np.where(self.data_e['endcond']== 10)[0] #rho=1\n ind = np.where(self.data_e['endcond']== 3)[0] #wall\n #ind = np.arange(len(self.data_e['endcond'])) #all particles\n \n pitchi = self.data_i['pitch'][ind]\n energyi = self.data_i['energy'][ind]\n #pitch = self.data_e['pitch'][ind]\n vr = self.data_e['vR'][ind]\n vz = self.data_e['vz'][ind]\n #vphi = self.data_e['vphi'][ind]\n r = self.data_e['R'][ind]\n z = self.data_e['z'][ind]\n R0=self.infile['misc/geomCentr_rz'][0]\n theta = np.arctan2(z,r-R0)\n phi = self.data_e['phi'][ind]\n x = r*np.cos(phi); y=r*np.sin(phi) \n \n \n \n #plt.close('all')\n plt.figure(); plt.hist(pitchi, bins=20); plt.xlabel('Pitch'); plt.ylabel('Number of particles')\n plt.figure(); plt.hist(energyi, bins=30); plt.xlabel('Energy'); plt.ylabel('Number of particles')\n plt.figure(); plt.hist(vr*1e-3, bins=20); plt.xlabel(r'$v_r$ [km/s]'); plt.ylabel('Number of particles')\n plt.figure(); plt.hist(vz*1e-3, bins=20); plt.xlabel(r'$v_z$ [km/s]'); plt.ylabel('Number of particles')\n plt.figure(); plt.hist(phi, bins=20); plt.xlabel('Phi (toroidal angle)'); plt.ylabel('Number of particles')\n plt.figure(); plt.hist(theta, bins=20); plt.xlabel('theta (poloidal angle)'); plt.ylabel('Number of particles')\n\n plt.figure(); plt.scatter(x,y); plt.grid('on'); plt.xlabel(r'x'); plt.ylabel('y')\n plt.tight_layout()", "def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])", "def histogram() -> None:\n\n\tcol_h, gray_h = analyser.histogram_gen(color = color, gray = gray_scale)\n\timage_process_end(col = col_h, gray = gray_h, mode = 6)", "def histogram_ratings(data):\n ratings = [review.rating for review in data]\n plt.hist(ratings, bins=6, normed=True)\n plt.title(\"Ratings histogram\")\n plt.show()", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def plot_histo_rho(self):\n try:\n self.R_w.mean()\n except:\n self._readwall() \n ind = np.where(self.data_e['endcond']== 3)[0] #wall\n x = self.data_i['rho'][ind]\n _plot_1d(x, xlabel=r'$\\rho$', ylabel=r'# markers', hist=1, ylim=[0, 2500])\n ax=plt.gca()\n ax.axvline(1.,color='k', lw=2.3)", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def _plot_ring_hist(data, bins=None, density=True, saveas='tmp.png'):\n\n hst = [el for sublist in data.to_list() for el in sublist]\n # print(hst)\n if bins:\n his = np.histogram(hst, bins=bins)\n else:\n his = np.histogram(hst, bins=range(max(hst) + 2), density=density)\n\n plt.figure(figsize=(15, 10))\n plt.xlabel('ring number')\n plt.ylabel('number of occurences')\n plt.title(\"\"\"Voronoi cell ring histogram for a GPS position\n averaged over 5 minute intervals. Number of MPN positions is %i, number of timestamps is %i.\"\"\" %\n (len(hst), data.shape[0]))\n plt.bar(his[1][:-1], his[0], width=0.8 * (his[1][1] - his[1][0]))\n\n plt.savefig(os.path.join(PICS_LOCATION, saveas), bbox_inches=\"tight\")", "def entries_histogram(turnstile_weather):\n\n plt.figure()\n turnstile_weather[turnstile_weather.rain == 0][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining\n turnstile_weather[turnstile_weather.rain == 1][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining\n return plt", "def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')", "def plot_star_classes(obj_catalog):\n\n fig = plt.figure(num=None,figsize=(8,8), dpi=100)\n ax = fig.add_subplot(1,1,1)\n\n phot_class = obj_catalog.phot_star_class\n sclass = obj_catalog.star_class\n phot_class_num = np.zeros(obj_catalog.shape[0])\n sclass_num = np.zeros(obj_catalog.shape[0])\n\n star_classes = ['WD',\\\n 'O','O8','O9','OB','B0','B1','B2','B3','B5','B6','B7','B8','B9',\\\n 'A0','A1','A2','A3','A4','A5','A6','A8','A9',\\\n 'F0','F2','F3','F5','F6','F8','F9',\\\n 'G0','G1','G2','G3','G4','G5','G8','G9',\\\n 'K0','K1','K2','K3','K4','K5','K7',\\\n 'M0','M1','M2','M3','M4','M5','M6','M7','M8','M9', \\\n 'L0','L1','L2','L3','L4','L5','L9','Ldwarf', \\\n 'T','other','C']\n print len(star_classes)\n\n star_dict = dict(zip(star_classes,np.arange(len(star_classes))))\n\n # print phot_class.value_counts()\n\n for i in range(len(phot_class)):\n print phot_class[i], star_dict[phot_class[i]], sclass[i],star_dict[sclass[i]]\n phot_class_num[i] = star_dict[phot_class[i]]\n sclass_num[i] = star_dict[sclass[i]]\n\n #ax.plot(sclass_num,phot_class_num,'.')\n\n cmap = plt.cm.Blues\n cmap.set_bad('0.85',1.0)\n\n cax = plt.hist2d(sclass_num,phot_class_num, bins=65,range = [[0,65], [0,65]], norm = LogNorm(), cmap=cmap, zorder=0)\n cbar = plt.colorbar(ticks=[1,5,10,15,20,25,30,40])\n cbar.ax.set_yticklabels([1,5,10,15,20,25,30,40],fontsize=12)\n\n ax.plot(np.arange(65),np.arange(65),'r')\n\n plt.xticks(np.arange(len(star_classes)),star_classes,fontsize=8,rotation='vertical')\n plt.yticks(np.arange(len(star_classes)),star_classes,fontsize=8)\n\n plt.grid(True)\n return plt", "def plotAGNfraction(pos_z_AGN, pos_z_gal, redshift_limit_agn, bin_size):\n fig, ax = plt.subplots(1,2,figsize=(19,7))\n \n # getting the useful histogram properties\n counts_agn, redshift_bins_agn = np.histogram(pos_z_AGN[2], bins = bin_size)\n counts_gal, redshift_bins_gal = np.histogram(pos_z_gal[2], bins = bin_size)\n \n # plotting the galaxy and agn distribution as a function of redshift \n ax[0].plot(redshift_bins_gal[1:], counts_gal, 'ks', ms=4, label=r'DM Halos')\n ax[0].plot(redshift_bins_agn[1:], counts_agn, 'bs', ms=4, label=r'AGNs')\n \n # axis properties - 0\n xlim = [np.min(redshift_bins_agn[1:]), np.max(redshift_bins_agn[1:])]\n setLabel(ax[0], r'Redshift$_R$', 'Counts','', xlim, 'default', legend=True)\n ax[0].set_yscale(\"log\")\n\n # agn fraction as a function of redshift\n f_agn, idx = [], []\n for c, c_gal in enumerate(counts_gal):\n if c_gal != 0:\n f_agn.append(((counts_agn[c]*100)/c_gal))\n idx.append(c)\n z_bin_modified = redshift_bins_gal[1:][np.array(idx)]\n \n # plot agn fraction\n ax[1].plot(z_bin_modified, f_agn, 's', color='#6b0385', ms=4)\n \n # axis properties - 1\n xlim = [np.min(redshift_bins_agn[1:])-0.02, np.max(redshift_bins_agn[1:])]\n setLabel(ax[1], r'Redshift$_R$', r'$f_{AGN}$ (%s)'%\"%\", '', xlim, 'default', legend=False)\n ax[1].set_yscale(\"log\")\n \n plt.savefig('figures/agn_frac.pdf', facecolor='w', edgecolor='w')\n print( 'Reddhift z<%.2f'%redshift_limit_agn )\n return redshift_bins_gal[1:]", "def create_star_background(ai_settings, screen, stars):\n star = Star(ai_settings, screen)\n star_y = 0\n max_number_stars_x = get_max_number_stars_x(ai_settings, star.rect.width)\n max_number_stars_y = get_max_number_stars_y(ai_settings, star.rect.height)\n #Generates a random number of rows to appear in the background\n number_rows = randint(5, max_number_stars_y)\n for row_number in range(number_rows):\n star_y += (ai_settings.screen_height) / number_rows\n #Generates a random number of stars to appear in each row\n number_stars_x = randint(4, max_number_stars_x)\n star_x = 0\n for star_number in range(number_stars_x):\n star_x += (ai_settings.screen_width) / number_stars_x\n create_star(ai_settings, screen, stars, star_x, star_y)", "def het_hist(het_check_df: pd.DataFrame):\n\n fig = plt.figure(figsize=(8,6))\n plt.hist(het_check_df['het_rate'])\n plt.axvline(het_check_df['low_limit'][0], c='red', ls='--')\n plt.axvline(het_check_df['up_limit'][0], c='red', ls='--')\n plt.xlabel(\"Heterozygosity Rate\")\n plt.ylabel(\"Number of Samples\")\n plt.title(\"Heterozygosity Distribution of All Samples\\n (< {:.3f} or > {:.3f} are removed)\".format(het_check_df['low_limit'][0], het_check_df['up_limit'][0]))\n return fig", "def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()", "def plot_values_histogram(values):\n hist, _, _ = plt.hist(values, np.linspace(0, MAX_GHF, 61), normed=True,\n lw=1, alpha=.7, color='k', edgecolor='k')\n plt.xlabel('GHF (mW m$^{-2}$)')\n plt.ylabel('Normalized Frequency')\n plt.grid(linestyle='dotted')\n plt.xlim([0, MAX_GHF])\n plt.ylim([0, max(hist) * 1.1])", "def plot_redshifthistograms(plotname='/Users/kschmidt/work/MUSE/MUSEWide_sourcehistogram.pdf',\n zranges=[[0,2.5],[2.5,7]],colors=['black','red'],labels=None,fill=False,\n bins=None,xrange=None,ylabel=' ',verbose=True):\n if verbose: print(' - Loading source catalogs')\n IDlist, ra, dec, redshifts = mwp.loadcatalogs(verbose=verbose)\n\n if verbose: print(' - Setting up and generating histogram of MUSE-Wide sources in \\n '+plotname)\n fig = plt.figure(figsize=(10, 3))\n fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.1, right=0.98, bottom=0.2, top=0.95)\n Fsize = 15\n lthick = 2\n marksize = 3\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n #plt.title('M^\\star',fontsize=Fsize)\n\n if xrange is None:\n xrange = [np.min(redshifts),np.max(redshifts)]\n\n if bins is None:\n bin_dz = 0.1\n bins = np.arange(np.min(redshifts),np.max(redshifts)+bin_dz,bin_dz)\n\n outID = []\n outz = []\n for zz, zrange in enumerate(zranges):\n goodent = np.where((redshifts > zrange[0]) &(redshifts <= zrange[1]))[0]\n goodIDs = IDlist[goodent]\n goodz = redshifts[goodent]\n goodcolor = colors[zz]\n zmin = np.min(goodz)\n zmax = np.max(goodz)\n outID.append(goodIDs)\n outz.append(goodz)\n\n infostr = ' Histinfo:'\n if labels is not None:\n goodlabel = labels[zz]\n infostr = infostr+' label = '+goodlabel\n\n infostr = infostr+' zrange = '+str(zrange)+' Nobj = '+str(len(goodent))+' color = '+goodcolor+\\\n ' zmin = '+str(\"%.5f\" % zmin)+' zmax = '+str(\"%.5f\" % zmax)\n if verbose: print(infostr)\n\n hist = plt.hist(goodz,color=goodcolor,bins=bins,histtype=\"step\",lw=lthick,label=goodlabel,\n fill=fill,fc=goodcolor)\n\n plt.xlim(xrange)\n # xticksstrings = ['A','B','C','D','E','F']\n # plt.xticks(xvec, xticksstrings)\n plt.xlabel(r'redshift', fontsize=Fsize)\n\n #plt.ylim(yrange)\n plt.ylabel(ylabel, fontsize=Fsize)\n\n if labels is not None:\n #--------- LEGEND ---------\n anchorpos = (0.5, 1.2)\n leg = plt.legend(fancybox=True,numpoints=1, loc='upper center',prop={'size':Fsize-3},ncol=len(labels))#,\n #bbox_to_anchor=anchorpos) # add the legend\n leg.get_frame().set_alpha(0.7)\n #--------------------------\n\n if verbose: print(' Saving plot... ')\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')\n\n return outID, outz", "def _plot_ring_histogram_by_group(data, size_borders, title, label, saveas):\n\n n_hist = data.shape[1]\n width_factor = 0.8 / n_hist\n shift_factor = (n_hist - 1) / 2.0\n\n plt.figure(figsize=(15, 10))\n plt.xlabel('ring number')\n plt.ylabel('number of occurences')\n\n for key in range(n_hist - 1):\n hst = [el for sublist in data[data.columns[key + 1]].to_list() for el in sublist]\n # print(hst)\n\n his = np.histogram(hst, bins=range(max(hst) + 2), density=True)\n\n plt.bar(his[1][:-1] + width_factor * (key - shift_factor), his[0], width=width_factor * (his[1][1] - his[1][0]),\n # label=\"area is within \"+ str(size_borders[key])+ ' $km^2$',\n label=label % str(size_borders[key]),\n linewidth=2, alpha=0.6, edgecolor='black'\n )\n\n hst = [el for sublist in data[data.columns[0]].to_list() for el in sublist]\n his = np.histogram(hst, bins=range(max(hst) + 2), density=True)\n plt.bar(his[1][:-1] + width_factor * (key - shift_factor + 1), his[0], width=width_factor * (his[1][1] - his[1][0]),\n label='without dividing to classes',\n linewidth=2, alpha=0.6, edgecolor='black', facecolor='black', hatch=r\"//\"\n )\n plt.legend()\n # plt.title(\"\"\"Voronoi cell ring histogram for a GPS position. Number of MPN positions is %i, number of timestamps is %i.\n # Plots are for different size-based classes of VCs\"\"\"%\n # (len(hst), data.shape[0]))\n plt.title(title % (len(hst), data.shape[0]))\n # https://stackoverflow.com/questions/46913184/how-to-make-a-striped-patch-in-matplotlib\n\n plt.savefig(os.path.join(PICS_LOCATION, saveas), bbox_inches=\"tight\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parent function of get_variable_stars. Sequencially select 'variableTypes' variable stars and plot them on the HR diagram.
def plot_variable_stars(variablesdf, variabletype=None, x='B_V', y='M_V'): if variabletype is None: variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR', 'SPB', 'M', 'LPV', 'roAp'] markers = ['^', 'D', 'D', 'v', 's', 'D', 'D', 'D', 'D', 's', 'D', 'D', 'D', 'o', 'p', 'o', 'o'] colors = ['k', 'k', 'k', '#00c000', 'r', 'r', 'r', 'r', 'r', 'm', 'm', 'm', '#00c0ff', (1, .7, 0), 'w', 'w', 'r'] sizes = [50, 40, 40, 40, 50, 40, 40, 40, 40, 50, 50, 50, 40, 40, 45, 40, 40] labels = ['', "BCEP, BCEPS", '', 'DSCT', 'SR', "SRA, SRB, SRC, SRD", '', '', '', 'RR', "RRAB, RRC", '', 'GDOR', 'SPB', '', 'LPV', 'roAp'] for i in range(len(variabletype)): if i in [2, 6, 7, 8, 11]: my_label = None else: my_label = "%s" % labels[i] plt.scatter(variablesdf[x].loc[variablesdf.loc[:, 'Type'] == variabletype[i]], variablesdf[y] .loc[variablesdf.loc[:, 'Type'] == variabletype[i]], facecolor=colors[i], marker=markers[i], s=sizes[i], label=my_label, edgecolor='k') print "plotting %s as %s%s" % (variabletype[i], colors[i], markers[i]) return
[ "def get_variable_stars(df_data, df_variables_names, variabletype=None):\n if variabletype is None:\n variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC',\n 'GDOR', 'SPB', 'M', 'LPV']\n\n print \"Selecting variable stars..\"\n # create a string \"var_type\" of variabletype separated by or ('|').\n # var_type = \"|\".join(variabletype)\n # check if var_type is contained in Type (any or all, partial or not)\n # are_variables = df_variables_names[df_variables_names.Type.str.contains(var_type) == True] # fails with \"is True\"\n # are_variables.Type = are_variables.Type.str.replace(\".*BCEP.*\", \"BCEP\") # rename all types containing 'BCEP'\n are_variables = df_variables_names[df_variables_names.Type.isin(variabletype)]\n types_df = are_variables[['hip', 'tycho2_id', 'source_id', 'Type', 'Name']]\n print \"..Done\"\n print \"Preparing subselection of initial DataFrame..\"\n print \"..Making Hipparcos list..\"\n hip_list = are_variables.hip.tolist()\n hip_list = np.array(hip_list)\n hip_list = hip_list[~np.isnan(hip_list)] # remove the nans\n hip_list = list(hip_list)\n print \"..Making Tycho2 list..\"\n tycho2_list = are_variables.tycho2_id.tolist()\n tycho2_list = np.array(tycho2_list)\n tycho2_list = tycho2_list[tycho2_list != 'nan'] # tycho2 is str\n tycho2_list = list(tycho2_list)\n print \"..Done\\n----------\"\n\n print \"Getting Hipparcos and Tycho variable objects..\"\n hip_objects = df_data[df_data.hip.isin(hip_list)]\n hip_objects = pd.merge(hip_objects, types_df, on='hip', how='inner')\n if 'tycho2_id_y' in hip_objects.columns:\n hip_objects = hip_objects.drop('tycho2_id_y', axis=1)\n hip_objects = hip_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'})\n\n tycho_objects = df_data[df_data.tycho2_id.isin(tycho2_list)]\n tycho_objects = pd.merge(tycho_objects, types_df, on='tycho2_id', how='inner')\n if 'hip_y' in tycho_objects.columns:\n tycho_objects = tycho_objects.drop('hip_y', axis=1)\n tycho_objects = tycho_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'})\n print \"..Done\\n----------\"\n\n print \"Getting roAp stars from file..\"\n # roAP_names.csv contains tycho2_id names of roAp stars\n with open('roAP/roAP_names.csv') as roAP_file:\n roap_objects_list = roAP_file.readlines()\n roap_objects_list = [line.rstrip() for line in roap_objects_list]\n roap_objects = df_data[df_data.tycho2_id.isin(roap_objects_list)]\n column_number = len(roap_objects.columns)\n roap_objects.insert(column_number, 'Type', 'roAp')\n print \"..Done\\n----------\"\n\n variable_df = pd.concat([hip_objects, tycho_objects, roap_objects], axis=0, ignore_index=True)\n variable_df.source_id = variable_df.source_id.fillna(-9999).astype(int)\n\n return variable_df", "def example_star(starname=\"star\"):\n\n expected_radii = R\n steps = 100\n step_size = expected_radii / steps\n\n steps = 7000\n step_size = 1400\n\n\n sun_like_star = star.Star(\n step_size=step_size,\n X=0.55,\n Y=0.43,\n Z=0.02,\n cent_density=162200,\n cent_opticaldepth=2/3,\n #Initial guess for total mass and radius of star based on desired position on HR diagram\n #Define M, R above!\n cent_temperature=2*G*M*Mp/(3*Kb*R),\n cent_radii=0.01\n )\n\n sun_like_star.solve()\n print(sun_like_star)\n\n items = [\"density\", \"temperature\", \"mass\", \"luminosity\", \"opticaldepth\"]\n # This nixt line is needed for saving data to a text file\n array2D = [[] for i in range(len(items) + 1)]\n array2D[0] = sun_like_star.properties['radius'] # Units of solar radius\n n = 1\n print()\n\n # Get user input on wether or not they want to see the plots\n plotshow = input(\"Show individual plots (y/n): \")\n\n for item in items:\n print(item)\n array2D[n] = sun_like_star.properties[item].data(0)\n plt.plot(\n array2D[0], # Units of solar radius\n array2D[n], # Plots the value of item\n label=item)\n plt.xlabel(\"Solar Radii\")\n plt.title(item)\n if plotshow == 'y':\n plt.show()\n plt.close()\n n += 1\n # Get user decision on saving star data\n save = input(\"Save this star? (y/n): \")\n # Save the data to a text file\n if save == 'y':\n data.array2D2txt(array2D, [\"radius\"] + items, starname)", "def query_variable_star_catalogs(self):\n # tabs = self.query_vizier_param('var')\n # if len(tabs)>1:\n # print(tabs)\n # print(\"***Target has a variable star flag!***\")\n # self.variable_star = True\n all_tabs = self.query_vizier(verbose=False)\n\n keys = [\n \"V/150/variabls\",\n \"J/AcA/66/421/ecl\",\n \"B/gcvs/gcvs_cat\",\n \"B/vsx/vsx\",\n \"J/AJ/156/234/table4\",\n \"J/MNRAS/488/4905/table2\",\n \"J/AJ/155/39/Variables\",\n ]\n for n, tab in enumerate(all_tabs.keys()):\n for key in keys:\n if tab in key:\n d = all_tabs[n].to_pandas().squeeze()\n print(f\"{key}:\\n{d}\")\n self.variable_star = True\n\n # check for `var` in catalog title\n idx = [\n n if \"var\" in t._meta[\"description\"] else False\n for n, t in enumerate(all_tabs)\n ]\n for i in idx:\n if i:\n tab = all_tabs[i]\n s = tab.to_pandas().squeeze().str.decode(\"ascii\")\n print(f\"\\nSee also: {tab._meta['name']}\\n{s}\")\n self.variable_star = True", "def plot_data_types(self, variable, **kwargs):\n return self.visualizer.plot_data_types(variable, **kwargs)", "def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))", "def stars(spec_types=(\"A0V\"), mags=(0), filter_name=\"Ks\",\n x=None, y=None, **kwargs):\n\n if isinstance(spec_types, str):\n spec_types = [spec_types]\n\n if isinstance(mags, (int, float)):\n mags = [mags] * len(spec_types)\n\n if len(mags) > 1 and len(spec_types) == 1:\n spec_types *= len(mags)\n elif len(mags) != len(spec_types):\n raise ValueError(\"len(mags) != len(spec_types)\")\n\n mags = np.array(mags)\n\n if x is None:\n x = np.zeros(len(mags))\n if y is None:\n y = np.zeros(len(mags))\n\n # only pull in the spectra for unique spectral types\n\n # assign absolute magnitudes to stellar types in cluster\n unique_types = np.unique(spec_types)\n lam, spec = SED(unique_types, filter_name=filter_name, magnitude=[0]*len(unique_types))\n\n # get the references to the unique stellar types\n ref_dict = {i : j for i, j in zip(unique_types, np.arange(len(unique_types)))}\n if isinstance(spec_types, (list, tuple, np.ndarray)):\n ref = np.array([ref_dict[i] for i in spec_types])\n else:\n ref = np.zeros(len(mags))\n\n weight = 10**(-0.4*mags)\n\n units = \"ph/s/m2\"\n\n src = Source(lam=lam, spectra=spec,\n x=x, y=y,\n ref=ref, weight=weight,\n units=units, **kwargs)\n\n src.info[\"object\"] = \"stars\"\n src.info[\"spec_types\"] = spec_types\n src.info[\"magnitudes\"] = mags\n src.info[\"filter_name\"] = filter_name\n\n return src", "def star(spec_type=\"A0V\", mag=0, filter_name=\"Ks\", x=0, y=0, **kwargs):\n\n thestar = stars([spec_type], [mag], filter_name, [x], [y], **kwargs)\n return thestar", "def showstars(self, verbose=True):\n try:\n import f2n\n except ImportError:\n print(\"Couldn't import f2n -- install it !\")\n return\n\n if verbose:\n print(\"Writing png ...\")\n myimage = f2n.fromfits(self.filepath, verbose=False)\n # myimage.rebin(int(myimage.xb/1000.0))\n myimage.setzscale(\"auto\", \"auto\")\n myimage.makepilimage(\"log\", negative=False)\n # myimage.upsample()\n myimage.drawstarlist(self.starlist, r=8, autocolour=\"flux\")\n myimage.writetitle(os.path.basename(self.filepath))\n # myimage.writeinfo([\"This is a demo\", \"of some possibilities\",\n # \"of f2n.py\"], colour=(255,100,0))\n if not os.path.isdir(\"alipy_visu\"):\n os.makedirs(\"alipy_visu\")\n myimage.tonet(os.path.join(\"alipy_visu\", self.name + \"_stars.png\"))", "def create_plot(x_var, y_var):\r\n\r\n FILE_PATH = 'application/star_data.csv'\r\n TARGET_VAR = 'star_type'\r\n SIZE_VAR = 'r_clipped'\r\n WIDTH = 1000\r\n HEIGHT = 600\r\n\r\n # Get the data\r\n df = pd.read_csv(FILE_PATH)\r\n fig = px.scatter(df, x=x_var, y=y_var, color=TARGET_VAR, size=SIZE_VAR, \r\n width=WIDTH, height=HEIGHT)\r\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n return graphJSON", "def plot_stars(ax, stars, zone_bounds, zbounds): \n\tcmap = plt.get_cmap(CMAP) \n\tstars = stars.filter(\"zone_final\", \">=\", zone_bounds[0]) \n\tstars = stars.filter(\"zone_final\", \"<=\", zone_bounds[1]) \n\tstars = stars.filter(\"abszfinal\", \">=\", zbounds[0]) \n\tstars = stars.filter(\"abszfinal\", \"<=\", zbounds[1]) \n\tstars = stars.filter(\"mass\", \">\", 1) \n\tmed_mass = np.median(stars[\"mass\"]) \n\tstars[\"size\"] = [i[\"mass\"] / med_mass * 20 * (1 - \n\t\tvice.cumulative_return_fraction(i[\"age\"])) for i in stars] \n\treturn ax.scatter(\n\t\tstars[\"[%s/H]\" % (REF_ELEMENT)], \n\t\tstars[\"[%s/%s]\" % (SEC_ELEMENT, REF_ELEMENT)], \n\t\tc = stars[\"age\"], \n\t\ts = stars[\"size\"], \n\t\tcmap = cmap, \n\t\tvmin = 0, \n\t\tvmax = 13.8 \n\t)", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def init_variables_page(self):\n # create elements for variables!!\n self.variables = list(set(re.findall(\n r\"([a-zA-Z_]+\\d*\\b)(?:[^(]|$)\",\"+\".join(self.func + ([aux[1] for aux in self.auxiliaries])))))\n print self.variables\n self.variables = filter(lambda x:x not in[\"x\",\"y\"], self.variables)\n print \"Auxiliaries: \" + str(self.auxiliaries)\n self.variables = filter(lambda x:x not in [a[0] for a in self.auxiliaries], self.variables)\n def filter_func(x):\n if x in [\"abs\", \"sin\", \"cos\", \"log\", \"tan\", \"e\", \"ln\"]:\n return False\n if re.match(\"^e[0-9]*$\", x) is not None:\n return False\n return True\n self.variables = filter(filter_func, self.variables)\n print \"Variables: \" + str(self.variables)\n # an auxiliary function to sort the variables properly\n def varkey(x):\n return x\n self.variables.sort(key=varkey)\n self.sliders = []\n self.xradios = []\n self.yradios = []\n for v in self.variables:\n # a whole container for each variable is necessary\n newvbox = gtk.VBox()\n self.variable_hbox.add(newvbox)\n # description\n newvbox.pack_start(gtk.Label(v), False, False)\n # options for x\n group = None if len(self.xradios)==0 else self.xradios[0]\n newx = gtk.RadioButton(group, \"\")\n self.xradios.append(newx)\n newvbox.pack_start(newx, False, False)\n newx.connect(\"toggled\", self.adjust_and_plot)\n # options for x\n group = None if len(self.yradios)==0 else self.yradios[0]\n newy = gtk.RadioButton(group, \"\")\n self.yradios.append(newy)\n newvbox.pack_start(newy, False, False)\n newy.connect(\"toggled\", self.adjust_and_plot)\n # slider\n newadjustment = self.adjustment_from_range(self.urange)\n if v[0].lower() == \"h\":\n newadjustment = self.adjustment_from_range(self.hrange)\n newadjustment.connect(\"value_changed\", self.redraw)\n self.sliders.append(gtk.VScale(newadjustment))\n self.sliders[-1].set_size_request(10,200)\n self.sliders[-1].set_inverted(True)\n newvbox.add(self.sliders[-1])\n self.xvar = self.variables[0]\n self.yvar = self.variables[0]", "def read_stars(self):\n if self.hip_stars: return\n all_stars = list(hipparcos.stars())\n self.hip_stars = [None]*(max(s[0] for s in all_stars)+1)\n for s in all_stars: self.hip_stars[s[0]] = s", "def single_metric_plot(self, df, x_variable, ax, av_method,\n rho = None, markers = True, x_jitter = None):\n v_cols = ['length', 'sample_size', 'tool', 'polytomies']\n v_order = df[v_cols].drop_duplicates() # find unique combinations\n # sort for display\n v_order = v_order.sort_values(v_cols, ascending=[False, True, True, False])\n ss_order = {v:k for k,v in enumerate(v_order.sample_size.unique())}\n l_order = {v:k for k,v in enumerate(v_order.length.unique())}\n for i, r in enumerate(v_order.itertuples()):\n query = []\n query.append(\"length == @r.length\")\n query.append(\"sample_size == @r.sample_size\")\n query.append(\"tool == @r.tool\")\n query.append(\"polytomies == @r.polytomies\")\n line_data = df.query(\"(\" + \") and (\".join(query) + \")\")\n if not line_data.empty:\n if len(v_order.length.unique()) > 1:\n # all tsinfer tools: use colours for length for polytomy format\n colour = self.length_format[r.tool][l_order[r.length]][\"col\"]\n else:\n # no variable lengths: use standard tool colours\n colour = self.tools_format[r.tool][\"col\"]\n x = line_data[x_variable]\n if x_jitter:\n if x_jitter == 'log':\n x *= 1 + (2*i/len(v_order)-1) * (max(x)/min(x))/5000\n else:\n x += (2 * i - 1) * (max(x)-min(x))/400\n ax.errorbar(\n x, line_data.treedist_mean,\n yerr=line_data.treedist_se if self.error_bars else None,\n linestyle=self.polytomy_and_averaging_format[r.polytomies][av_method][\"linestyle\"],\n fillstyle=self.sample_size_format[ss_order[r.sample_size]]['fillstyle'],\n color=colour,\n marker=self.tools_format[r.tool]['mark'] if markers else None,\n elinewidth=1)\n if rho is not None:\n ax.axvline(x=rho, color = 'gray', zorder=-1, linestyle=\":\", linewidth=1)\n ax.text(rho, ax.get_ylim()[1]/40, r'$\\mu=\\rho$',\n va=\"bottom\", ha=\"right\", color='gray', rotation=90)\n return v_order", "def measure_star_shapes(df, image_file, noweight, wcs, use_ngmix, fwhm, logger):\n logger.info('Read in stars in file: %s',image_file)\n\n ind = df.index[df['star_flag'] == 1]\n logger.info('ind = %s',ind)\n n_psf = len(ind)\n logger.info('n_psf = %s',n_psf)\n\n df['obs_dx'] = [ -999. ] * len(df)\n df['obs_dy'] = [ -999. ] * len(df)\n df['obs_e1'] = [ -999. ] * len(df)\n df['obs_e2'] = [ -999. ] * len(df)\n df['obs_T'] = [ -999. ] * len(df)\n df['obs_flux'] = [ -999. ] * len(df)\n df['obs_flag'] = [ NOT_STAR ] * len(df)\n df.loc[ind, 'obs_flag'] = 0\n\n if 'reserve' in df:\n df.loc[df['reserve'], 'obs_flag'] |= RESERVED\n df.loc[~df['use'] & ~df['reserve'], 'obs_flag'] |= NOT_USED\n else:\n df.loc[~df['use'], 'obs_flag'] |= NOT_USED\n\n full_image = galsim.fits.read(image_file, hdu=0)\n\n if wcs is not None:\n full_image.wcs = wcs\n\n if not noweight:\n full_weight = galsim.fits.read(image_file, hdu=2)\n full_weight.array[full_weight.array < 0] = 0.\n\n stamp_size = 48\n\n for i in ind:\n x = df['x'].iloc[i]\n y = df['y'].iloc[i]\n\n #print('Measure shape for star at ',x,y)\n b = galsim.BoundsI(int(x)-stamp_size/2, int(x)+stamp_size/2,\n int(y)-stamp_size/2, int(y)+stamp_size/2)\n b = b & full_image.bounds\n im = full_image[b]\n\n if noweight:\n wt = None\n else:\n wt = full_weight[b]\n\n if use_ngmix:\n dx, dy, e1, e2, T, flux, flag = ngmix_fit(im, wt, fwhm, x, y, logger)\n else:\n dx, dy, e1, e2, T, flux, flag = hsm(im, wt, logger)\n #logger.info('ngmix measurement: (%f,%f,%f,%f,%f,%f).',dx,dy,e1,e2,T,flux)\n if np.any(np.isnan([dx,dy,e1,e2,T,flux])):\n logger.info(' *** NaN detected (%f,%f,%f,%f,%f,%f).',dx,dy,e1,e2,T,flux)\n flag |= BAD_MEASUREMENT\n else:\n df.loc[i, 'obs_dx'] = dx\n df.loc[i, 'obs_dy'] = dy\n df.loc[i, 'obs_e1'] = e1\n df.loc[i, 'obs_e2'] = e2\n df.loc[i, 'obs_T'] = T\n df.loc[i, 'obs_flux'] = flux\n df.loc[i, 'obs_flag'] |= flag\n logger.info('final obs_flag = %s',df['obs_flag'][ind].values)\n #print('df[ind] = ',df.loc[ind].describe())\n flag_outliers(df, ind, 'obs', 4., logger)\n\n # Any stars that weren't measurable here, don't use for PSF fitting.\n df.loc[df['obs_flag']!=0, 'use'] = False", "def draw_points(stars, errors, ax):\n\n # Open the file of common star names to HIP numbers. Store this in names dictionary.\n names = {}\n with open(os.environ['HOKU_PROJECT_PATH'] + '/data/star-names.dat') as names_f:\n names_f.readline()\n\n for line in names_f:\n n, h = line.split(',')[0].strip(), line.split(',')[1].strip()\n names.update({h: n})\n\n # Plot clean data set as black.\n for star in stars:\n if quiver_flag:\n ax.quiver(0, 0, 0, star[0], star[1], star[2], arrow_length_ratio=0.0000001, alpha=0.2)\n ax.scatter(star[0], star[1], star[2], marker='*', color='k', s=100)\n\n if str(int(star[3])) in names:\n ax.text(star[0], star[1], star[2], names[str(int(star[3]))])\n else:\n ax.text(star[0], star[1], star[2], 'HIP{}'.format(int(star[3])))\n\n # Plot error models with specified colors.\n for model in errors:\n for error in model:\n if quiver_flag:\n ax.quiver(0, 0, 0, error[0], error[1], error[2], arrow_length_ratio=0.0000001, alpha=0.2)\n ax.scatter(error[0], error[1], error[2], marker='*', color=error[4])\n ax.text(error[0], error[1], error[2], 'ERR{}'.format(int(error[3])))", "def get_star_info(star):\n\n return [star.name, str(star.temperature.value),\n str(star.metallicity), str(star.logg),\n str(star.absoluteMagnitude), str(star.apparentMagnitude),\n str(star.color), str(star.numObsPre), str(star.numObsPost)]", "def plot_star_classes(obj_catalog):\n\n fig = plt.figure(num=None,figsize=(8,8), dpi=100)\n ax = fig.add_subplot(1,1,1)\n\n phot_class = obj_catalog.phot_star_class\n sclass = obj_catalog.star_class\n phot_class_num = np.zeros(obj_catalog.shape[0])\n sclass_num = np.zeros(obj_catalog.shape[0])\n\n star_classes = ['WD',\\\n 'O','O8','O9','OB','B0','B1','B2','B3','B5','B6','B7','B8','B9',\\\n 'A0','A1','A2','A3','A4','A5','A6','A8','A9',\\\n 'F0','F2','F3','F5','F6','F8','F9',\\\n 'G0','G1','G2','G3','G4','G5','G8','G9',\\\n 'K0','K1','K2','K3','K4','K5','K7',\\\n 'M0','M1','M2','M3','M4','M5','M6','M7','M8','M9', \\\n 'L0','L1','L2','L3','L4','L5','L9','Ldwarf', \\\n 'T','other','C']\n print len(star_classes)\n\n star_dict = dict(zip(star_classes,np.arange(len(star_classes))))\n\n # print phot_class.value_counts()\n\n for i in range(len(phot_class)):\n print phot_class[i], star_dict[phot_class[i]], sclass[i],star_dict[sclass[i]]\n phot_class_num[i] = star_dict[phot_class[i]]\n sclass_num[i] = star_dict[sclass[i]]\n\n #ax.plot(sclass_num,phot_class_num,'.')\n\n cmap = plt.cm.Blues\n cmap.set_bad('0.85',1.0)\n\n cax = plt.hist2d(sclass_num,phot_class_num, bins=65,range = [[0,65], [0,65]], norm = LogNorm(), cmap=cmap, zorder=0)\n cbar = plt.colorbar(ticks=[1,5,10,15,20,25,30,40])\n cbar.ax.set_yticklabels([1,5,10,15,20,25,30,40],fontsize=12)\n\n ax.plot(np.arange(65),np.arange(65),'r')\n\n plt.xticks(np.arange(len(star_classes)),star_classes,fontsize=8,rotation='vertical')\n plt.yticks(np.arange(len(star_classes)),star_classes,fontsize=8)\n\n plt.grid(True)\n return plt", "def plot_shape(self, theta=0):\n x = np.zeros(self.nz)\n y_re = np.zeros(self.nz)\n y_ri = np.zeros(self.nz)\n for i in range(0, self.nz):\n x[i] = i * self.dz\n y_re[i] = self.re[i][theta]\n y_ri[i] = self.ri[i][theta]\n p = figure(\n title=\"Shapes of stator and rotor along Z; Theta=\" + str(theta),\n x_axis_label=\"Points along Z\",\n y_axis_label=\"Radial direction\",\n )\n p.line(x, y_re, line_width=2, color=\"red\")\n p.line(x, y_ri, line_width=2, color=\"blue\")\n return p" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Child function fo plot_variable_stars. Process the DataFrame to select only stars marked as 'var_type' variable stars.
def get_variable_stars(df_data, df_variables_names, variabletype=None): if variabletype is None: variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR', 'SPB', 'M', 'LPV'] print "Selecting variable stars.." # create a string "var_type" of variabletype separated by or ('|'). # var_type = "|".join(variabletype) # check if var_type is contained in Type (any or all, partial or not) # are_variables = df_variables_names[df_variables_names.Type.str.contains(var_type) == True] # fails with "is True" # are_variables.Type = are_variables.Type.str.replace(".*BCEP.*", "BCEP") # rename all types containing 'BCEP' are_variables = df_variables_names[df_variables_names.Type.isin(variabletype)] types_df = are_variables[['hip', 'tycho2_id', 'source_id', 'Type', 'Name']] print "..Done" print "Preparing subselection of initial DataFrame.." print "..Making Hipparcos list.." hip_list = are_variables.hip.tolist() hip_list = np.array(hip_list) hip_list = hip_list[~np.isnan(hip_list)] # remove the nans hip_list = list(hip_list) print "..Making Tycho2 list.." tycho2_list = are_variables.tycho2_id.tolist() tycho2_list = np.array(tycho2_list) tycho2_list = tycho2_list[tycho2_list != 'nan'] # tycho2 is str tycho2_list = list(tycho2_list) print "..Done\n----------" print "Getting Hipparcos and Tycho variable objects.." hip_objects = df_data[df_data.hip.isin(hip_list)] hip_objects = pd.merge(hip_objects, types_df, on='hip', how='inner') if 'tycho2_id_y' in hip_objects.columns: hip_objects = hip_objects.drop('tycho2_id_y', axis=1) hip_objects = hip_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'}) tycho_objects = df_data[df_data.tycho2_id.isin(tycho2_list)] tycho_objects = pd.merge(tycho_objects, types_df, on='tycho2_id', how='inner') if 'hip_y' in tycho_objects.columns: tycho_objects = tycho_objects.drop('hip_y', axis=1) tycho_objects = tycho_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'}) print "..Done\n----------" print "Getting roAp stars from file.." # roAP_names.csv contains tycho2_id names of roAp stars with open('roAP/roAP_names.csv') as roAP_file: roap_objects_list = roAP_file.readlines() roap_objects_list = [line.rstrip() for line in roap_objects_list] roap_objects = df_data[df_data.tycho2_id.isin(roap_objects_list)] column_number = len(roap_objects.columns) roap_objects.insert(column_number, 'Type', 'roAp') print "..Done\n----------" variable_df = pd.concat([hip_objects, tycho_objects, roap_objects], axis=0, ignore_index=True) variable_df.source_id = variable_df.source_id.fillna(-9999).astype(int) return variable_df
[ "def plot_variable_stars(variablesdf, variabletype=None, x='B_V', y='M_V'):\n if variabletype is None:\n variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR',\n 'SPB', 'M', 'LPV', 'roAp']\n markers = ['^', 'D', 'D', 'v', 's', 'D', 'D', 'D', 'D', 's', 'D', 'D', 'D', 'o', 'p', 'o', 'o']\n colors = ['k', 'k', 'k', '#00c000', 'r', 'r', 'r', 'r', 'r', 'm', 'm', 'm', '#00c0ff', (1, .7, 0), 'w', 'w', 'r']\n sizes = [50, 40, 40, 40, 50, 40, 40, 40, 40, 50, 50, 50, 40, 40, 45, 40, 40]\n labels = ['', \"BCEP, BCEPS\", '', 'DSCT', 'SR', \"SRA, SRB, SRC, SRD\", '', '', '', 'RR', \"RRAB, RRC\", '', 'GDOR',\n 'SPB', '', 'LPV', 'roAp']\n for i in range(len(variabletype)):\n if i in [2, 6, 7, 8, 11]:\n my_label = None\n else:\n my_label = \"%s\" % labels[i]\n plt.scatter(variablesdf[x].loc[variablesdf.loc[:, 'Type'] == variabletype[i]], variablesdf[y]\n .loc[variablesdf.loc[:, 'Type'] == variabletype[i]], facecolor=colors[i], marker=markers[i],\n s=sizes[i], label=my_label, edgecolor='k')\n print \"plotting %s as %s%s\" % (variabletype[i], colors[i], markers[i])\n return", "def query_variable_star_catalogs(self):\n # tabs = self.query_vizier_param('var')\n # if len(tabs)>1:\n # print(tabs)\n # print(\"***Target has a variable star flag!***\")\n # self.variable_star = True\n all_tabs = self.query_vizier(verbose=False)\n\n keys = [\n \"V/150/variabls\",\n \"J/AcA/66/421/ecl\",\n \"B/gcvs/gcvs_cat\",\n \"B/vsx/vsx\",\n \"J/AJ/156/234/table4\",\n \"J/MNRAS/488/4905/table2\",\n \"J/AJ/155/39/Variables\",\n ]\n for n, tab in enumerate(all_tabs.keys()):\n for key in keys:\n if tab in key:\n d = all_tabs[n].to_pandas().squeeze()\n print(f\"{key}:\\n{d}\")\n self.variable_star = True\n\n # check for `var` in catalog title\n idx = [\n n if \"var\" in t._meta[\"description\"] else False\n for n, t in enumerate(all_tabs)\n ]\n for i in idx:\n if i:\n tab = all_tabs[i]\n s = tab.to_pandas().squeeze().str.decode(\"ascii\")\n print(f\"\\nSee also: {tab._meta['name']}\\n{s}\")\n self.variable_star = True", "def plot_data_types(self, variable, **kwargs):\n return self.visualizer.plot_data_types(variable, **kwargs)", "def visual_summary(type_, df, col):\n if df[col].dtype == 'object':\n df[col].value_counts().plot(kind=type_)\n plt.show()\n else:\n df[col].plot(kind=type_)\n plt.show()", "def single_metric_plot(self, df, x_variable, ax, av_method,\n rho = None, markers = True, x_jitter = None):\n v_cols = ['length', 'sample_size', 'tool', 'polytomies']\n v_order = df[v_cols].drop_duplicates() # find unique combinations\n # sort for display\n v_order = v_order.sort_values(v_cols, ascending=[False, True, True, False])\n ss_order = {v:k for k,v in enumerate(v_order.sample_size.unique())}\n l_order = {v:k for k,v in enumerate(v_order.length.unique())}\n for i, r in enumerate(v_order.itertuples()):\n query = []\n query.append(\"length == @r.length\")\n query.append(\"sample_size == @r.sample_size\")\n query.append(\"tool == @r.tool\")\n query.append(\"polytomies == @r.polytomies\")\n line_data = df.query(\"(\" + \") and (\".join(query) + \")\")\n if not line_data.empty:\n if len(v_order.length.unique()) > 1:\n # all tsinfer tools: use colours for length for polytomy format\n colour = self.length_format[r.tool][l_order[r.length]][\"col\"]\n else:\n # no variable lengths: use standard tool colours\n colour = self.tools_format[r.tool][\"col\"]\n x = line_data[x_variable]\n if x_jitter:\n if x_jitter == 'log':\n x *= 1 + (2*i/len(v_order)-1) * (max(x)/min(x))/5000\n else:\n x += (2 * i - 1) * (max(x)-min(x))/400\n ax.errorbar(\n x, line_data.treedist_mean,\n yerr=line_data.treedist_se if self.error_bars else None,\n linestyle=self.polytomy_and_averaging_format[r.polytomies][av_method][\"linestyle\"],\n fillstyle=self.sample_size_format[ss_order[r.sample_size]]['fillstyle'],\n color=colour,\n marker=self.tools_format[r.tool]['mark'] if markers else None,\n elinewidth=1)\n if rho is not None:\n ax.axvline(x=rho, color = 'gray', zorder=-1, linestyle=\":\", linewidth=1)\n ax.text(rho, ax.get_ylim()[1]/40, r'$\\mu=\\rho$',\n va=\"bottom\", ha=\"right\", color='gray', rotation=90)\n return v_order", "def filter_only_data_ratios(self, dataframe):\r\n #dataframe df_scoring filtered with only ratios\r\n df_ratios_only = dataframe[[x for x in dataframe.columns \\\r\n if not str(x).count(\"_nb\") or \\\r\n str(x).count(\"Sujet\")]]\r\n return df_ratios_only", "def select_variables(df, dtype=\"numeric\"):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n if dtype == \"numeric\":\n subset = df.copy().select_dtypes(include = numerics)\n else:\n subset = df.copy().select_dtypes(include != numerics)\n return(subset)", "def plot_variables_nan(self, df):\n\n self.df_nan = pd.DataFrame({'Variables': df.isna().sum().index, 'Number_of_nan': df.isna().sum().values})\n self.df_nan.plot(x='Variables', y='Number_of_nan', kind='bar', legend=False, grid=True, figsize=(10, 5))\n plt.title('NaN for variable in data ')\n plt.ylabel('Count')\n plt.show()", "def create_plot(x_var, y_var):\r\n\r\n FILE_PATH = 'application/star_data.csv'\r\n TARGET_VAR = 'star_type'\r\n SIZE_VAR = 'r_clipped'\r\n WIDTH = 1000\r\n HEIGHT = 600\r\n\r\n # Get the data\r\n df = pd.read_csv(FILE_PATH)\r\n fig = px.scatter(df, x=x_var, y=y_var, color=TARGET_VAR, size=SIZE_VAR, \r\n width=WIDTH, height=HEIGHT)\r\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n return graphJSON", "def identify_variable(adata):\n sc.pp.highly_variable_genes(adata,\n min_mean=0.0125, max_mean=3, min_disp=0.5)\n sc.pl.highly_variable_genes(adata)", "def display_cols(dataframe, type = 'category', num_samples = 7):\n\tmask = dataframe.dtypes == type\n\treturn dataframe.loc[:, mask].sample(num_samples)", "def plot_stars(ax, stars, zone_bounds, zbounds): \n\tcmap = plt.get_cmap(CMAP) \n\tstars = stars.filter(\"zone_final\", \">=\", zone_bounds[0]) \n\tstars = stars.filter(\"zone_final\", \"<=\", zone_bounds[1]) \n\tstars = stars.filter(\"abszfinal\", \">=\", zbounds[0]) \n\tstars = stars.filter(\"abszfinal\", \"<=\", zbounds[1]) \n\tstars = stars.filter(\"mass\", \">\", 1) \n\tmed_mass = np.median(stars[\"mass\"]) \n\tstars[\"size\"] = [i[\"mass\"] / med_mass * 20 * (1 - \n\t\tvice.cumulative_return_fraction(i[\"age\"])) for i in stars] \n\treturn ax.scatter(\n\t\tstars[\"[%s/H]\" % (REF_ELEMENT)], \n\t\tstars[\"[%s/%s]\" % (SEC_ELEMENT, REF_ELEMENT)], \n\t\tc = stars[\"age\"], \n\t\ts = stars[\"size\"], \n\t\tcmap = cmap, \n\t\tvmin = 0, \n\t\tvmax = 13.8 \n\t)", "def plot_type_of_topic(data_frame: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def measure_star_shapes(df, image_file, noweight, wcs, use_ngmix, fwhm, logger):\n logger.info('Read in stars in file: %s',image_file)\n\n ind = df.index[df['star_flag'] == 1]\n logger.info('ind = %s',ind)\n n_psf = len(ind)\n logger.info('n_psf = %s',n_psf)\n\n df['obs_dx'] = [ -999. ] * len(df)\n df['obs_dy'] = [ -999. ] * len(df)\n df['obs_e1'] = [ -999. ] * len(df)\n df['obs_e2'] = [ -999. ] * len(df)\n df['obs_T'] = [ -999. ] * len(df)\n df['obs_flux'] = [ -999. ] * len(df)\n df['obs_flag'] = [ NOT_STAR ] * len(df)\n df.loc[ind, 'obs_flag'] = 0\n\n if 'reserve' in df:\n df.loc[df['reserve'], 'obs_flag'] |= RESERVED\n df.loc[~df['use'] & ~df['reserve'], 'obs_flag'] |= NOT_USED\n else:\n df.loc[~df['use'], 'obs_flag'] |= NOT_USED\n\n full_image = galsim.fits.read(image_file, hdu=0)\n\n if wcs is not None:\n full_image.wcs = wcs\n\n if not noweight:\n full_weight = galsim.fits.read(image_file, hdu=2)\n full_weight.array[full_weight.array < 0] = 0.\n\n stamp_size = 48\n\n for i in ind:\n x = df['x'].iloc[i]\n y = df['y'].iloc[i]\n\n #print('Measure shape for star at ',x,y)\n b = galsim.BoundsI(int(x)-stamp_size/2, int(x)+stamp_size/2,\n int(y)-stamp_size/2, int(y)+stamp_size/2)\n b = b & full_image.bounds\n im = full_image[b]\n\n if noweight:\n wt = None\n else:\n wt = full_weight[b]\n\n if use_ngmix:\n dx, dy, e1, e2, T, flux, flag = ngmix_fit(im, wt, fwhm, x, y, logger)\n else:\n dx, dy, e1, e2, T, flux, flag = hsm(im, wt, logger)\n #logger.info('ngmix measurement: (%f,%f,%f,%f,%f,%f).',dx,dy,e1,e2,T,flux)\n if np.any(np.isnan([dx,dy,e1,e2,T,flux])):\n logger.info(' *** NaN detected (%f,%f,%f,%f,%f,%f).',dx,dy,e1,e2,T,flux)\n flag |= BAD_MEASUREMENT\n else:\n df.loc[i, 'obs_dx'] = dx\n df.loc[i, 'obs_dy'] = dy\n df.loc[i, 'obs_e1'] = e1\n df.loc[i, 'obs_e2'] = e2\n df.loc[i, 'obs_T'] = T\n df.loc[i, 'obs_flux'] = flux\n df.loc[i, 'obs_flag'] |= flag\n logger.info('final obs_flag = %s',df['obs_flag'][ind].values)\n #print('df[ind] = ',df.loc[ind].describe())\n flag_outliers(df, ind, 'obs', 4., logger)\n\n # Any stars that weren't measurable here, don't use for PSF fitting.\n df.loc[df['obs_flag']!=0, 'use'] = False", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def var_type_info(var_type):\n pass", "def star(spec_type=\"A0V\", mag=0, filter_name=\"Ks\", x=0, y=0, **kwargs):\n\n thestar = stars([spec_type], [mag], filter_name, [x], [y], **kwargs)\n return thestar", "def explore_data(df: pd.DataFrame):\r\n \r\n class_counts_df = count_column_values(df)\r\n \r\n plot_df_data(class_counts_df)", "def extract_objects(df, header):\n # Check for unhandled source types and emit warning if any are present.\n valid_types = dict(point='pointSource',\n sersic2d='sersic')\n invalid_types = set(df['SOURCE_TYPE']) - set(valid_types)\n if invalid_types:\n warnings.warn(\"Instance catalog contains unhandled source types:\\n%s\\nSkipping these.\"\n % '\\n'.join(invalid_types))\n\n columns = ('uniqueId', 'galSimType',\n 'magNorm', 'sedFilepath', 'redshift',\n 'raJ2000', 'decJ2000',\n 'halfLightRadius',\n 'minorAxis',\n 'majorAxis',\n 'positionAngle', 'sindex',\n 'properMotionRa', 'properMotionDec',\n 'parallax', 'radialVelocity')\n\n # Process point sources and galaxies separately.\n source_type = 'point'\n stars = df.query(\"SOURCE_TYPE=='%s'\" % source_type)\n phosim_stars = pd.DataFrame(np.zeros((len(stars), len(columns))),\n index=stars.index,\n columns=columns)\n phosim_stars['uniqueId'] = pd.to_numeric(stars['VALUE']).tolist()\n phosim_stars['galSimType'] = valid_types[source_type]\n phosim_stars['magNorm'] = pd.to_numeric(stars['MAG_NORM']).tolist()\n phosim_stars['sedFilepath'] = stars['SED_NAME'].tolist()\n phosim_stars['redshift'] = pd.to_numeric(stars['REDSHIFT']).tolist()\n phosim_stars['raJ2000'] = pd.to_numeric(stars['RA']).tolist()\n phosim_stars['decJ2000'] = pd.to_numeric(stars['DEC']).tolist()\n phosim_stars['properMotionRa'] = pd.to_numeric(stars['PAR5']).tolist()\n phosim_stars['properMotionDec'] = pd.to_numeric(stars['PAR6']).tolist()\n phosim_stars['parallax'] = pd.to_numeric(stars['PAR7']).tolist()\n phosim_stars['radialVelocity'] = pd.to_numeric(stars['PAR8']).tolist()\n if len(phosim_stars) > 0:\n phosim_stars = extract_extinction(stars, phosim_stars, 1)\n\n mjd = ModifiedJulianDate(TAI=header['mjd'])\n raICRS, decICRS = applyProperMotion(phosim_stars.raJ2000.values,\n phosim_stars.decJ2000.values,\n phosim_stars.properMotionRa.values,\n phosim_stars.properMotionDec.values,\n phosim_stars.parallax.values,\n phosim_stars.radialVelocity.values,\n mjd=mjd)\n\n phosim_stars = phosim_stars.assign(raICRS=raICRS, decICRS=decICRS)\n\n source_type = 'sersic2d'\n galaxies = df.query(\"SOURCE_TYPE == '%s'\" % source_type)\n phosim_galaxies = pd.DataFrame(np.zeros((len(galaxies), len(columns))),\n index=galaxies.index,\n columns=columns)\n phosim_galaxies['uniqueId'] = pd.to_numeric(galaxies['VALUE']).tolist()\n phosim_galaxies['galSimType'] = valid_types[source_type]\n phosim_galaxies['magNorm'] = pd.to_numeric(galaxies['MAG_NORM']).tolist()\n phosim_galaxies['sedFilepath'] = galaxies['SED_NAME'].tolist()\n phosim_galaxies['redshift'] = pd.to_numeric(galaxies['REDSHIFT']).tolist()\n phosim_galaxies['raJ2000'] = pd.to_numeric(galaxies['RA']).tolist()\n phosim_galaxies['decJ2000'] = pd.to_numeric(galaxies['DEC']).tolist()\n phosim_galaxies['majorAxis'] = \\\n radiansFromArcsec(pd.to_numeric(galaxies['PAR1'])).tolist()\n phosim_galaxies['minorAxis'] = \\\n radiansFromArcsec(pd.to_numeric(galaxies['PAR2'])).tolist()\n phosim_galaxies['halfLightRadius'] = phosim_galaxies['majorAxis']\n phosim_galaxies['positionAngle'] = \\\n (np.pi/180.*pd.to_numeric(galaxies['PAR3'])).tolist()\n phosim_galaxies['sindex'] = pd.to_numeric(galaxies['PAR4']).tolist()\n n_gal = len(phosim_galaxies.raJ2000.values)\n phosim_galaxies = phosim_galaxies.assign(raICRS=phosim_galaxies.raJ2000,\n decICRS=phosim_galaxies.decJ2000,\n properMotionRa=np.zeros(n_gal),\n properMotionDec=np.zeros(n_gal),\n parallax=np.zeros(n_gal),\n radialVelocity=np.zeros(n_gal))\n\n if len(phosim_galaxies) > 0:\n phosim_galaxies = extract_extinction(galaxies, phosim_galaxies, 5)\n\n return pd.concat((phosim_stars, phosim_galaxies), ignore_index=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
plot a Cepheid at its reddened position on the HR diag. (assume that deredden_cepheids() have been used)
def plot_dereddening(): extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]), '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]), '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])} cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1': np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1': np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])} periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98, '2607-1448-1': 8.54} max_periods = max(periods.values()) new_positions_bv_mv = [] # in M_V vs B-V space colors = [] theoretical_position = [] for obj in extinction_coefficients.keys(): # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj]) new_positions_bv_mv.append(cepheids[obj]) colors.append(periods[obj]/max_periods) theoretical_position.append(-2.78*np.log10(periods[obj])-1.35) for pos in range(len(new_positions_bv_mv)): plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40) plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50) return new_positions_bv_mv, colors
[ "def anharm_plot():\n set_tag(qdt, \"EjdivEc\", log=False)\n set_tag(qdt, \"Ej\", log=False)\n\n #qdt.epsinf=qdt.epsinf/3.72\n #qdt.Np=10\n #qdt.Ec=qdt.fq*0.1*h\n print qdt.max_coupling, qdt.coupling_approx\n anharm=qdt.call_func(\"anharm\", EjdivEc=EjdivEc)\n anharmp=qdt.call_func(\"lamb_shifted_anharm\", EjdivEc=EjdivEc)\n fq=qdt.call_func(\"fq\", Ej=EjdivEc*qdt.Ec)\n ls_fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n ls_fq2=qdt.call_func(\"lamb_shifted_fq2\", EjdivEc=EjdivEc)\n\n pl, pf=line(fq/qdt.f0, (anharmp/h-anharm/h)/(2.0*qdt.max_coupling), linewidth=0.5, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\")\n line(fq/qdt.f0, (ls_fq-fq)/(2.0*qdt.max_coupling), plotter=pl, color=\"blue\", linewidth=0.5, label=r\"$\\Delta_{1,0}$\")\n E0, E1, E2=qdt.call_func(\"transmon_energy_levels\", EjdivEc=EjdivEc, n_energy=3)\n fq2=(E2-E1)/h\n line(fq/qdt.f0, (ls_fq2-fq2)/(2.0*qdt.max_coupling), plotter=pl, color=\"red\", linewidth=0.5, label=r\"$\\Delta_{2,1}$\")\n pl.set_ylim(-1.0, 0.6)\n pl.set_xlim(0.7, 1.3)\n pl.xlabel=r\"$f_{10}/f_{IDT}$\"\n pl.ylabel=r\"$\\Delta/\\Gamma_{10}^{MAX}$\"\n pl.legend(loc='lower left')\n #fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n #line(EjdivEc, fq, plotter=pl, color=\"green\", linewidth=0.5)\n\n #line(EjdivEc, E1p, plotter=pl, color=\"green\", linewidth=0.5)\n #line(EjdivEc, E2p, plotter=pl, color=\"purple\", linewidth=0.5)\n return pl", "def plot_ecdf(data, reverse=False):\n cdf = ecdf(data, reverse=reverse)\n\n # Generate plot\n plt.plot(cdf.x, cdf.y, marker=\".\", linestyle=\"none\")\n\n # Make the margins nice\n plt.margins(0.02)\n\n # Label the axes\n plt.xlabel(\"data\")\n plt.ylabel(\"ECDF\")\n\n # Display the plot\n plt.show()", "def plot(self):\n plt.plot(self.dates,self.discharge)\n plt.title('Hidrograph')\n plt.ylabel('Discharge [$m^3$/$s$]')\n plt.show()", "def plot_cpid(ax, times, cpid, mode):\n from davitpy import pydarn\n from matplotlib.ticker import MultipleLocator\n from matplotlib.dates import SecondLocator\n from matplotlib.dates import date2num\n import numpy as np\n oldCpid = -9999999\n\n # Format the yaxis.\n ax.yaxis.tick_left()\n ax.yaxis.set_tick_params(direction='out')\n ax.set_ylim(bottom=0, top=1)\n ax.yaxis.set_minor_locator(MultipleLocator(1))\n ax.yaxis.set_tick_params(direction='out', which='minor')\n\n # Draw the axes.\n ax.plot_date(date2num(times), np.arange(len(times)),\n fmt='w', tz=None, xdate=True, ydate=False, alpha=0.0)\n\n # Label the CPIDs.\n for i in range(0, len(times)):\n if(cpid[i] != oldCpid):\n ax.plot_date([date2num(times[i]),date2num(times[i])],\n [0, 1], fmt='k-', tz=None, xdate=True, ydate=False)\n oldCpid = cpid[i]\n s = ' ' + pydarn.radar.radUtils.getCpName(oldCpid)\n istr = ' '\n if(mode[i] == 1): istr = ' IF'\n if(mode == 0): istr = ' RF'\n ax.text(times[i], .5, ' ' + str(oldCpid) + s + istr, ha='left',\n va='center', size=10)\n\n # Format the xaxis.\n xmin = date2num(times[0])\n xmax = date2num(times[len(times) - 1])\n xrng = (xmax - xmin)\n inter = int(round(xrng / 6. * 86400.))\n inter2 = int(round(xrng / 24. * 86400.))\n ax.xaxis.set_minor_locator(SecondLocator(interval=inter2))\n ax.xaxis.set_major_locator(SecondLocator(interval=inter))\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(0)\n\n # Identify the CPID axis with a label.\n fig = ax.get_figure()\n bb = ax.get_position()\n x0 = bb.x0\n y0 = bb.y0\n height = bb.height\n width = bb.width\n pos = [x0, y0, width, height]\n fig.text(pos[0] - .07, pos[1] + pos[3] / 2., 'CPID', ha='center',\n va='center', size=8.5, rotation='vertical')\n ax.set_yticks([])", "def anharm_plot2():\n set_tag(qdt, \"EjdivEc\", log=False)\n set_tag(qdt, \"Ej\", log=False)\n pl=Plotter(fig_width=9.0, fig_height=6.0)\n #qdt.epsinf=qdt.epsinf/3.72\n #qdt.Np=10\n #qdt.Ec=qdt.fq*0.1*h\n print qdt.max_coupling, qdt.coupling_approx\n #flux_o_flux0=qdt.call_func(\"flux_over_flux0\", voltage=yoko)\n #Ej=qdt.call_func(\"Ej\", flux_over_flux0=flux_o_flux0)\n #EjdivEc=Ej/qdt.Ec\n anharm=qdt.call_func(\"anharm\", EjdivEc=EjdivEc)\n anharmp=qdt.call_func(\"lamb_shifted_anharm\", EjdivEc=EjdivEc)\n fq=qdt.call_func(\"fq\", Ej=EjdivEc*qdt.Ec)\n ls_fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n ls_fq2=qdt.call_func(\"lamb_shifted_fq2\", EjdivEc=EjdivEc)\n #pl, pf=line(fq, anharm/h, linewidth=0.5, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\")\n\n pl, pf=line(EjdivEc, anharmp/h/1e9, linewidth=1.0, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\", plotter=pl)\n line(EjdivEc, anharm/h/1e9, linewidth=1.0, color=\"purple\", label=r\"anharm\", plotter=pl)\n\n line(EjdivEc, (ls_fq-fq)/1e9, plotter=pl, color=\"blue\", linewidth=1.0, label=r\"$\\Delta_{1,0}$\")\n E0, E1, E2=qdt.call_func(\"transmon_energy_levels\", EjdivEc=EjdivEc, n_energy=3)\n fq2=(E2-E1)/h\n line(EjdivEc, (ls_fq2-fq2)/1e9, plotter=pl, color=\"red\", linewidth=1.0, label=r\"$\\Delta_{2,1}$\")\n pl.set_ylim(-2, 1.5)\n #pl.set_xlim(0.0, 70)\n pl.xlabel=r\"$E_j/E_c$\"\n pl.ylabel=r\"$\\Delta (GHz)$\"\n #pl.legend(loc='lower right')\n #fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n #line(EjdivEc, fq, plotter=pl, color=\"green\", linewidth=0.5)\n\n #line(EjdivEc, E1p, plotter=pl, color=\"green\", linewidth=0.5)\n #line(EjdivEc, E2p, plotter=pl, color=\"purple\", linewidth=0.5)\n return pl", "def plot_hr_diag(hr_df, x='B_V', y='M_V', cutoff=0.2, bvcutoff=0.05):\n plt.figure(figsize=(11., 10.))\n print \"Plotting background stars..\"\n plt.set_cmap('gray_r')\n plt.hist2d(hr_df[x].tolist(), hr_df[y].tolist(), (200, 200), norm=LogNorm(), cmin=10)\n plt.axis([-0.2, 2.35, -3., 7.])\n plt.gca().invert_yaxis()\n plt.xlabel(r'$BT-VT$ (mag)')\n plt.ylabel(r'$M_{VT}$ (mag)') # Plotting M_{VT}\n plt.title(r'$\\sigma_\\pi / \\pi < %s, \\sigma_{BT-VT}< %s$ mag' % (cutoff, bvcutoff))\n print \"..Done\"\n return", "def plot_1d_path(self):\n\n fig = plt.figure(figsize=(8,5))\n \n matches = (self.a_scale == self.c_scale)\n plt.plot(self.a_scale[matches], self.E_coh[matches])\n plt.xlabel('linear deformation coefficient: 0=fcc, 1=bcc')\n plt.ylabel('Cohesive energy (eV/atom)')\n \n return fig", "def plotEcliptic(maptype=Projection()):\n\n\n ra = np.empty(360)\n dec = np.empty(360)\n for i in np.arange(360):\n ra[i] = i + 2.45*np.sin (2 * i * np.pi/180.)\n dec[i] =23.5*np.sin( i*np.pi/180.)\n\n maptype.plotLine(ra, dec, 'r-', lw=4, label=\"Ecliptic\")", "def plot_disc_walkers(self, id_discs=None):\n # Making sure we have a list\n if not id_discs:\n id_discs = range(len(self.axes))\n elif type(id_discs) == int:\n id_discs = [id_discs]\n \n nplots = len(id_discs)\n fig, axes = plt.subplots(nplots, 3, sharex=True, figsize=(20, nplots*5))\n shape = axes.shape\n if len(shape) > 1:\n for axg in axes:\n for ax in axg:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n else:\n for ax in axes:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) \n \n \n for disc_id in id_discs:\n axis_name = {\"x\": \"yz\", \"y\": \"xz\", \"z\": \"xy\"}[self.axes[disc_id]]\n param_name = ['a', 'b', 'M']\n for i in range(3):\n pid = disc_id*3+i\n samples = sampler.chain[:,:,pid].T\n if nplots > 1:\n axis = axes[disc_id][i]\n else:\n axis = axes[i]\n \n axis.plot(samples, color='k', alpha=10.0 / self.n_walkers)\n #axis.yaxis.set_major_locator(MaxNLocator(5))\n axis.set_ylabel('$'+param_name[i]+'_{{{0}{1}}}$'.format(axis_name, disc_id))\n axis.set_xlabel('Iteration')\n\n #plt.title('Parameter values for discs : ' + ', '.join(str(x) for x in id_discs))\n\n return fig", "def plot_discharge(self,grid):\n \n grid._setup_active_inlink_and_outlink_matrices()\n outlink = grid.node_active_outlink_matrix\n inlink = grid.node_active_inlink_matrix\n outlink1 = outlink.tolist()\n inlink1 =inlink.tolist()\n newin0, newout0 = change_signs(inlink1[0], outlink1[0])\n newin1, newout1 = change_signs(inlink1[1], outlink1[1])\n in0 = np.array(newin0)\n in1 = np.array(newin1)\n out0 = np.array(newout0)\n out1 = np.array(newout1)\n self.q_node = self.q[in0]+self.q[in1]+self.q[out0]+self.q[out1] #((q[outlink[1]] -q[inlink[1]]))#+((q[outlink[0]]-(q[inlink[0]])))\n fixed_q = grid.zeros(centering='node')\n for each in self.interior_nodes:\n fixed_q[each] = self.q_node[each]\n plt.figure('DISCHARGE')\n hr = grid.node_vector_to_raster(fixed_q)\n palette = pylab.cm.RdYlBu\n im2 = pylab.imshow(hr, cmap=palette, extent=[0, grid.number_of_node_columns *grid.dx,0, grid.number_of_node_rows * grid.dx])\n pylab.clim(vmin=0.000001)#, mx)\n palette.set_under('w', 0.000001)\n cb = pylab.colorbar(im2)\n cb.set_label('DISCHARGE (m)', fontsize=12)\n pylab.title('DISCHARGE')\n plt.show()", "def plot_electrode_spectrum(self, electrode):\n positive_frequencies = self.index >= 0\n plt.plot(self.index[positive_frequencies],\n np.abs(self.ix[positive_frequencies, electrode]))\n plt.xlabel('Frequency')\n plt.ylabel('Magnitude')\n plt.title('Spectrum of {}'.format(electrode))", "def plot_flammenhoehe(self):\n _x, _y = self.bba.get_flammenhoehe_list()\n self.updatePlot(_x,_y)", "def dendogram(self):\r\n \r\n plt.figure(figsize=(20, 7))\r\n dendrogram = sch.dendrogram(sch.linkage(self.X, method='ward'))\r\n plt.title(\"Dendograms\")\r\n plt.axhline(linestyle='--', y=5) \r\n plt.show()", "def shortPlot (erp, cz=None):\r\n erp = check_y (erp , input_name =\"sample of ERP data\")\r\n import matplotlib.pyplot as plt \r\n fig, ax = plt.subplots(1,1, figsize =(10, 4))\r\n leg =[]\r\n ax.scatter (np.arange(len(erp)), erp, marker ='.', c='b')\r\n zl, = ax.plot(np.arange(len(erp)), erp, \r\n c='r', \r\n label ='Electrical resistivity profiling')\r\n leg.append(zl)\r\n if cz is not None: \r\n cz= check_y (cz, input_name =\"Conductive zone 'cz'\")\r\n # construct a mask array with np.isin to check whether \r\n # `cz` is subset array\r\n z = np.ma.masked_values (erp, np.isin(erp, cz ))\r\n # a masked value is constructed so we need \r\n # to get the attribute fill_value as a mask \r\n # However, we need to use np.invert or tilde operator \r\n # to specify that other value except the `CZ` values mus be \r\n # masked. Note that the dtype must be changed to boolean\r\n sample_masked = np.ma.array(\r\n erp, mask = ~z.fill_value.astype('bool') )\r\n \r\n czl, = ax.plot(\r\n np.arange(len(erp)), sample_masked, \r\n ls='-',\r\n c='#0A4CEE',\r\n lw =2, \r\n label ='Conductive zone')\r\n leg.append(czl)\r\n\r\n ax.set_xticks(range(len(erp)))\r\n ax.set_xticklabels(\r\n ['S{0:02}'.format(i+1) for i in range(len(erp))])\r\n \r\n ax.set_xlabel('Stations')\r\n ax.set_ylabel('app.resistivity (ohm.m)')\r\n ax.legend( handles = leg, \r\n loc ='best')\r\n \r\n plt.show()", "def plot_ell(SEMA, ECC, INC, PHA, IND=[1]):\n\n len_IND = len(IND)\n if IND:\n cmd = 'sub2ind(size_SEMA, '\n if len_IND == 1:\n titletxt = 'Ellipse '\n else:\n titletxt = 'Ellipse ('\n\n for k in range(len_IND):\n if k == 0:\n cmd = cmd + '[' + str(IND[k])\n else:\n cmd = cmd + ',' + str(IND[k])\n\n if k < len_IND-1:\n titletxt = titletxt + str(IND[k]) + ','\n elif len_IND == 1:\n titletxt = titletxt + str(IND[k])\n else:\n titletxt = titletxt + str(IND[k]) + ')'\n\n cmd = 'n = ' + cmd + '])'\n # This is pretty nasty, but it works.\n exec(cmd)\n\n plt.gcf()\n plt.clf()\n do_the_plot(SEMA.flatten()[n], ECC.flatten()[n], INC.flatten()[n], PHA.flatten()[n])\n titletxt = titletxt + ', (red) green (anti-) clockwise component'\n plt.title(titletxt)\n elif len_IND:\n print('IND input contains zero element(s)!\\nNo ellipse will be plotted.')", "def plot_XDR_PDR():\n\n fig,axes = plt.subplots(nrows=2, ncols=2, squeeze=True, sharex='col', sharey='row', figsize=(6,6))\n fig.subplots_adjust(hspace=0, wspace=0) #, top=0.80, bottom=0.04, left=0.04, right=0.93)\n\n # get data\n sscs = [SSC['no'] for SSC in SSCs]\n colors = [plt.cm.inferno(i/(len(SSCs)+1)) for i in SSCs['no']]\n HCO_HCN, HNC_HCN, HNC_HCO = [],[],[]\n HCO_HCN_err, HNC_HCN_err, HNC_HCO_err = [],[],[]\n for SSC in SSCs:\n try:\n hco_hcn_med = ratios['HCO+/HCN'][str(SSC['no'])]['median']\n hco_hcn_p16 = ratios['HCO+/HCN'][str(SSC['no'])]['16th']\n hco_hcn_p84 = ratios['HCO+/HCN'][str(SSC['no'])]['84th']\n hco_hcn_low = hco_hcn_med-hco_hcn_p16\n hco_hcn_hig = hco_hcn_p84-hco_hcn_med\n HCO_HCN.append( np.log10(hco_hcn_med) )\n HCO_HCN_err.append( [0.434*hco_hcn_low/hco_hcn_med,0.434*hco_hcn_hig/hco_hcn_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n try:\n hnc_hcn_med = ratios['HNC/HCN'][str(SSC['no'])]['median']\n hnc_hcn_p16 = ratios['HNC/HCN'][str(SSC['no'])]['16th']\n hnc_hcn_p84 = ratios['HNC/HCN'][str(SSC['no'])]['84th']\n hnc_hcn_low = hnc_hcn_med-hnc_hcn_p16\n hnc_hcn_hig = hnc_hcn_p84-hnc_hcn_med\n HNC_HCN.append( np.log10(hnc_hcn_med) )\n HNC_HCN_err.append( [0.434*hnc_hcn_low/hco_hcn_med,0.434*hnc_hcn_hig/hco_hcn_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n try:\n hnc_hco_med = ratios['H15NC/HCO+'][str(SSC['no'])]['median']*ratios['14N/15N'][str(SSC['no'])]['median']\n hnc_hco_p16 = ratios['H15NC/HCO+'][str(SSC['no'])]['16th']*ratios['14N/15N'][str(SSC['no'])]['median']\n hnc_hco_p84 = ratios['H15NC/HCO+'][str(SSC['no'])]['84th']*ratios['14N/15N'][str(SSC['no'])]['median']\n hnc_hco_low = hnc_hco_med-hnc_hco_p16\n hnc_hco_hig = hnc_hco_p84=hnc_hco_med\n HNC_HCO.append( np.log10(hnc_hco_med) )\n HNC_HCO_err.append( [0.434*hnc_hco_low/hnc_hco_med,0.434*hnc_hco_hig/hnc_hco_med] )\n except:\n HCO_HCN.append( np.nan )\n HCO_HCN_err.append( [np.nan,np.nan] )\n\n # comparison from Baan+08\n B_hcn = [318.2, 14]\n B_hnc = [234.0, 7]\n B_hco = [276.1, 14]\n B_hco_hcn = [B_hco[0]/B_hcn[0], B_hco[0]/B_hcn[0]*np.sqrt((B_hco[1]/B_hco[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hcn = [B_hnc[0]/B_hcn[0], B_hnc[0]/B_hcn[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hcn[1]/B_hcn[0])**2)]\n B_hnc_hco = [B_hnc[0]/B_hco[0], B_hnc[0]/B_hco[0]*np.sqrt((B_hnc[1]/B_hnc[0])**2+(B_hco[1]/B_hco[0])**2)]\n B_HCO_HCN = [np.log10(B_hco_hcn[0]), 0.434*B_hco_hcn[1]/B_hco_hcn[0]]\n B_HNC_HCN = [np.log10(B_hnc_hcn[0]), 0.434*B_hnc_hcn[1]/B_hnc_hcn[0]]\n B_HNC_HCO = [np.log10(B_hnc_hco[0]), 0.434*B_hnc_hco[1]/B_hnc_hco[0]]\n\n def format_panel(ax):\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.25))\n ax.yaxis.set_major_locator(MultipleLocator(0.5))\n ax.yaxis.set_minor_locator(MultipleLocator(0.25))\n ax.set_axisbelow(True)\n ax.grid(axis='both', which='both')\n\n def label_regions(ax):\n ax.text(0.95, 0.9, 'XDR', color='k', transform=ax.transAxes, ha='right', va='top', weight='bold', fontsize=16)\n ax.text(0.05, 0.1, 'PDR', color='k', transform=ax.transAxes, ha='left', va='bottom', weight='bold', fontsize=16)\n\n # panel 1: HCO+/HCN over HNC/HCO+\n ax = axes[0][0]\n ax.plot([-10,10],[10,-10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c,s in zip(HNC_HCO, HCO_HCN, HNC_HCO_err, HCO_HCN_err, colors, SSCs):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, color=c, elinewidth=1, ecolor=c, label='SSC '+str(s['no']), zorder=3)\n ax.errorbar(B_HCO_HCN[0],B_HNC_HCO[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, color='lime', elinewidth=1, ecolor='lime', label=r'NGC 253 (Baan +08)', zorder=4)\n ax.set_xlim(-0.75,0.75)\n ax.set_ylim(-0.85,0.65)\n format_panel(ax)\n ax.set_ylabel(r'log HCO$^+$ / HCN', fontsize=12)\n\n # panel 2: HNC/HCN over HCO/HCN\n ax = axes[0][1]\n ax.plot([0,0],[-10,10], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([0,10],[-10,-10],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCN, HCO_HCN, HNC_HCN_err, HCO_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCN[0],B_HCO_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-0.95,0.55)\n ax.set_ylim(-0.85,0.65)\n format_panel(ax)\n ax.tick_params(labelbottom=True)\n ax.set_xlabel(r'log HNC / HCN', fontsize=12)\n\n # panel 3: HNC/HCO over HNC/HCN\n ax = axes[1][0]\n ax.plot([-10,10],[0,0], ls='-', lw=1, c='grey', zorder=2)\n ax.fill_between([-10,10],[0,0],[10,10], color='lightgrey', alpha=0.5, zorder=1)\n label_regions(ax)\n for a,b,a_err,b_err,c in zip(HNC_HCO, HNC_HCN, HNC_HCO_err, HNC_HCN_err, colors):\n if np.isfinite(a) and np.isfinite(b):\n ax.errorbar(a,b, xerr=[[a_err[0]],[a_err[1]]], yerr=[[b_err[0]],[b_err[1]]], marker='o', ms=5, color=c, elinewidth=1, ecolor=c, zorder=3)\n ax.errorbar(B_HNC_HCO[0],B_HNC_HCN[0], xerr=B_HCO_HCN[1], yerr=B_HNC_HCO[1], marker='o', ms=5, color='lime', elinewidth=1, ecolor='lime', zorder=4)\n ax.set_xlim(-0.75,0.75)\n ax.set_ylim(-1.05,0.45)\n format_panel(ax)\n ax.set_xlabel(r'log HNC$^{**}$ / HCO$^+$', fontsize=12)\n ax.set_ylabel(r'log HNC / HCN', fontsize=12)\n\n # panel 4: legend\n ax = axes[1][1]\n ax.set_axis_off()\n fig.legend(loc=3, bbox_to_anchor=(0.55,0.1,0.14,0.3), ncol=1, mode=\"expand\", borderaxespad=0., fontsize=12, frameon=False)\n\n savepath = escape_fname(os.path.join(plotdir, '04.XCLASS_final', 'XDR-PDR_column_density.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_Nnches='tight')", "def generate_plot():\n # Size of the coordinates\n limit = 3\n\n # Plotting fundamental domain\n fundamental_domain = (\n (0.02, 0.02), (0.02, limit), (limit, limit),\n )\n\n # Probe points\n probes = ({\"label\": \"A\", \"x\": (1, 2), \"color\": \"black\"},)\n\n # Action over probes\n actions = (\n (\n lambda x: (x[0], x[1]),\n lambda x: (x[1], x[0])),\n )\n\n # Identification lines\n id_lines = ({\"begin\": [0.02, 0.02], \"end\": [limit, limit], \"color\": \"blue\"},)\n\n # Execute only if run as a script\n plot_dyad_orbichord(\n x_lim=[0, limit],\n y_lim=[0, limit],\n x_label=\"$x_1$\",\n y_label=\"$x_2$\",\n fundamental_domain=fundamental_domain,\n probes=probes,\n actions=actions,\n id_lines=id_lines\n )", "def get_diagonal_correlationplot(self) -> None:", "def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the connection with IMDb.
def initialize_connection(): session = imdb.IMDb() return session
[ "def _setup_connection(cls):\n try:\n cls.imdb_access = imdb.IMDb()\n except imdb.IMDbError, err:\n print \"Problem with connectivity to imdb.com due to %s \" \\\n % (err)", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def init_db(self) -> None:\n self._db = DB()", "def initConnections(self):\n raise NotImplementedError", "def initialize_api(self):\n\n self.check_connection()\n\n # Subscribe to some channels\n self.wss.subscribe_to_trades(self.symbol)\n self.wss.subscribe_to_order_book(pair=self.symbol, len=100)\n\n # Initialize a DataBase object\n self.db = DataBase(symbol=self.symbol)\n\n logger.info('API connection initialized.')", "def init_connection(self):\n self.log('Initializing connection to %s' % (self.bosh_service.netloc))\n self.connection = httplib.HTTPConnection(self.bosh_service.netloc)\n self.log('Connection initialized')\n # TODO add exceptions handler there (URL not found etc)", "def connect(self):\n self.conn.connect()", "async def initialize(self):\n initial_conn = await aiomysql.connect(\n host=config.mysql_host,\n port=config.mysql_port,\n user=config.mysql_user,\n password=config.mysql_password,\n autocommit=True,\n loop=asyncio.get_event_loop()\n )\n\n async with initial_conn.cursor() as cur:\n await cur.execute('CREATE DATABASE IF NOT EXISTS ' +\n config.mysql_database_name + ';')\n initial_conn.close()\n\n self._pool = await aiomysql.create_pool(\n host=config.mysql_host,\n port=config.mysql_port,\n user=config.mysql_user,\n password=config.mysql_password,\n autocommit=True,\n db=config.mysql_database_name,\n loop=asyncio.get_event_loop()\n )", "def init_db(self):\n raise NotImplementedError()", "def initialize(self):\n self.connection = HTTPConnection(self.host, self.port)\n self.fetch_description()", "def __init__(self):\n self.conn = SQLTools().connect(credentials)\n self.db = returnDB()", "def _init_connection(self, settings):\n raise NotImplementedError()", "def setup_connection(self):\n self.db = MySQLdb.connect(self.host, self.usr, self.psw, self.dbname)\n self.cursor = self.db.cursor()", "def connect(self):\n self.engine = create_engine(self.connection_string)\n self.conn = self.engine.connect()\n self.connected = True", "def _connect_to_db(cls):\n cls._cfgdb_map = {}\n if cls._cfgdb is None:\n sip = cfg.CONF.APISERVER.api_server_ip\n sport = cfg.CONF.APISERVER.api_server_port\n # Initialize connection to DB and add default entries\n cls._cfgdb = ctdb.config_db.DBInterface(cls._admin_user,\n cls._admin_password,\n cls._admin_tenant_name,\n sip, sport)\n cls._cfgdb.manager = cls", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays a generic error message when there is a connection error.
def display_error(): clear_screen() line = '#' * 20 print(f'{line}\n# CONNECTION ERROR #\n{line}') exit(1)
[ "def show_connection_failed(self):\n self.set_status_text1(\n '<b>RPC network status:</b> failed connection to %s' % self.dashd_intf.get_active_conn_description(),\n 'error')", "def __display_error(self, socket_error):\r\n\t\tif socket_error == QAbstractSocket.RemoteHostClosedError:\r\n\t\t\tself._window.open_dialog(\"Serveur déconnecté\", \"Le serveur s'est déconnecté !\")\r\n\t\t\t# Add signal to be emitted that pops up a dialog window\r\n\t\telif socket_error == QAbstractSocket.OperationError: # Raised when the socket already is connected\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Erreur de connection\",\r\n\t\t\t\t\t\t\t\t\t \"L'erreur suivante est survenue : {}.\".format(self.__tcpSocket.errorString()),\r\n\t\t\t\t\t\t\t\t\t type=\"error\")", "def on_connect_failed(self):\n print('connect failed...')", "def _connect_failed(self):\n\t\tself.root.stdout.write(\"Error: Connection Failed!\\n\")\n\t\tself.client = False", "def displayError(*args, **kwargs):\n \n pass", "def connection_failed(self, connection, error):\n assert False", "def db_connection_error(error):\n return internal_server_error(error)", "def send_error(error_msg):\n print(error_msg)", "def display_error(self, message):\n self.ui_widget.display_error(message=message)", "def offline_error():\n\n colored('No available internet connection\\n', 'red')", "def send_server_error(self):\n\n self.send_message(\n Message(\n Codes.SERVER_ERROR,\n { 'message': 'The server has encountered an internal error.' }\n )\n )", "def db_error(request, message):\n return render(request, 'error/db.html', {'error': message})", "def onConnectError(self, fetcher, error): #$NON-NLS-1$\r", "def _on_server_error(server, *_):\n exception = sys.exc_info()[1]\n if isinstance(exception, ConnectionError):\n # These are expected errors when the browser closes the connection.\n return\n # Other errors would be unexpected, so print them.\n traceback.print_exc()", "def error(msg):\n\tprint 'Error:', msg", "def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")", "def error():\n title = session.get('title', 'Error')\n error_message = session.get('error_message', 'An error has occurred.')\n level = session.get('level', 'error')\n logger.error(\"Displaying error to the user\", error_message=error_message, level=level)\n return render_template('errors/error.html', title=title, error_message=error_message, level=level)", "def error(request):\n return render(request, utils.get_setting('MISFIT_ERROR_TEMPLATE'), {})", "def show_error(self):\n print('LSE Error : {}'.format(self._error))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays the ratings that were scraped.
def display_ratings(ratings): # only attempt to display the ratings if any were found if ratings: print('\n[RATINGS]\n') for rating in ratings: print(f' {rating}', end=' ') # needed to get printing back to normal print()
[ "def view_ratings(request):\n profs = Professor.objects.all()\n if profs:\n prof_table = \"\"\n for prof in profs:\n prof_ratings = Rating.objects.all().filter(prof=prof.id)\n if prof_ratings:\n average_rating = prof_ratings.aggregate(ave=Avg('rating'))['ave']\n average_rating = int(decimal.Decimal(average_rating)\n .quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))\n\n prof_table += \"{} has a rating of {}\\n\".format(prof.name, \"*\" * average_rating)\n else:\n prof_table += \"{} has no ratings!\\n\".format(prof.name, prof.id)\n\n return Response(prof_table)\n else:\n return Response(\"No lecturers exist!\")", "def print_ratings(all_ratings):\n print(\"\\nRESTAURANT RATINGS:\")\n for restaurant, rating in sorted(all_ratings.items()):\n print(f' {restaurant} is rated at {rating}.')", "def print_ratings(self):\n # Filter out teams not in tournament\n tournament_teams = self.teams.items()\n #tournament_teams = filter(lambda team: team[1]['participating'], self.teams.items())\n\n sorted_ratings = sorted(tournament_teams, key=lambda team: team[1]['rating'], reverse=True)\n\n print 'Rank\\tRating\\tTeam'\n for n,team in enumerate(sorted_ratings):\n if team[1]['participating']:\n print '{}\\t{}\\t({}) {}'.format(\n n+1,\n #len(sorted_ratings)-n,\n int(team[1]['rating']),\n team[1]['seed'],\n team[1]['name'].replace('_', ' '),)", "def display_player_ratings(player_ratings):\r\n print('\\nCLASSEMENT DES PARTICIPANTS:\\n Nom ELO Score')\r\n for i in range(0, len(player_ratings)):\r\n print(players_table.get(doc_id=player_ratings[i][0])['Nom'],\r\n players_table.get(doc_id=player_ratings[i][0])['ELO'],\r\n player_ratings[i][1])", "def ratings(self):\n return self._ratings", "def get_ratings(self):\n return self.ratings", "def printJudgeRatings(self):\n\n try:\n judgeNotesLogger.info(\"printJudgeRatings: Printing out judge ratings from '%s'\\n\", self.notesFile)\n\n # Print Normal List First.\n for ratingTuple in self.judgedSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n\n # Print Special List Second.\n for ratingTuple in self.specialSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n \n except:\n judgeNotesLogger.warning(\"printJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def display_recommendation(self, n=5):\n self.n = n # update the number of recommendations to display\n if len(self.recomm) == 0:\n print(\"Sorry, there is no matching recommendations.\")\n elif self.n < len(self.recomm): # display only the top n from the recommendation list\n print(\"Below is a list of the top {} recommended restaurants for you: \".format(self.n))\n print(self.recomm.iloc[:self.n][self.column_to_display])\n else: # display all if # of recommendations is less than self.n\n print(\"Below is a list of all {} recommended restaurants for you: \".format(len(self.recomm)))\n print(self.recomm[self.column_to_display])", "def display_visual_number_of_ratings_statistics(list_of_number_ratings):\n #get the statistical results for number of ratings\n number_ratings_mean = calculate_mean(list_of_number_ratings)\n number_ratings_sd = calculate_standard_deviation(list_of_number_ratings)\n number_ratings_mode = calculate_mode(list_of_number_ratings)\n number_ratings_median = calculate_median(list_of_number_ratings)\n \n #create a dictionary of statistical results\n number_ratings_data = {'Mean':number_ratings_mean,\n 'Standard Deviation':number_ratings_sd,\n 'Mode':number_ratings_mode,\n 'Median':number_ratings_median}\n \n fig, ax = plt.subplots()\n \n #plot number of ratings analysis\n ax.set_title(\"Number of Ratings Analysis\")\n \n y_pos = [i for i in range(len(number_ratings_data))]\n \n ax.set_yticks(y_pos)\n ax.set_yticklabels(number_ratings_data.keys())\n \n ax.set_ylabel(\"Analysis\")\n ax.set_xlabel(\"Result\")\n \n ax.barh(y_pos, number_ratings_data.values(), align=\"center\")\n \n plt.show() \n \n fig.savefig(\"number_of_reviews_statistics.png\", bbox_inches=\"tight\")", "def __extract_ratings(self, soup):\n try:\n review_ratings = []\n added_or_rated = soup.find_all('div', attrs={'class':'reviewHeader uitext stacked'})\n for ar in added_or_rated:\n if 'added it' in ar.get_text().strip():\n rating = 0\n elif 'rated it' in ar.get_text().strip():\n rating_text = ar.find_next('span', attrs={'class':' staticStars'})\n rating_text = rating_text.find_next('span').get_text()\n rating = self.__rating_text_to_num(rating_text)\n else:\n rating = 0\n review_ratings.append(rating)\n\n return review_ratings\n except:\n raise", "def render_rating(context, template_name=DEFAULT_TEMPLATE):\n # gathering the basic information\n request = context['request']\n page_url = request.path\n # this sorts the get parameters alphabetically via keys\n # it's split at \"&\" then sorted and then joined again with \"&\"\n page_query = '&'.join(sorted(request.GET.urlencode().split('&')))\n\n # getting the page rating object and incrementing the view counter\n rating, created = Rating.objects.get_or_create(\n page_url=page_url, page_query=page_query)\n if not created:\n rating.page_views += 1\n rating.save()\n\n # find out how many people found this page helpful\n total_votes = rating.upvotes + rating.downvotes\n try:\n percentage = int(float(rating.upvotes) / float(total_votes) * 100)\n except ZeroDivisionError:\n percentage = None\n\n # get the list of urls, the user already voted on\n rated_pages = request.session.get('rated_pages', None)\n if rated_pages is None or page_url + page_query not in rated_pages:\n already_voted = 0\n else:\n already_voted = 1\n\n # get the user's IP\n # When we are e.g. on a webfaction apache, REMOTE_ADDR will be localhost,\n # so we first look into the HTTP_X_FORWARDED_FOR list and take the first\n # entry we find\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0].strip()\n else:\n ip = request.META.get('REMOTE_ADDR')\n\n context.update({'ip': ip, 'page_query': page_query,\n 'percentage': percentage, 'already_voted': already_voted})\n t = loader.get_template(template_name)\n return t.render(context)", "def printRatingsToSongs(self):\n judgeNotesLogger.info(\"printRatingsToSongs: Printing songs for each rating parsed\")\n try:\n\n # Print out normal ratings first.\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.ratingsToSongs[rating]\n print(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n\n # Print out special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.specialRatingsToSongs[rating]\n print(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n \n print(\"\") # For neater printing. Newline still occurs here\n except:\n judgeNotesLogger.warning(\"printRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def print_recommendations(self):\n\n rec_vector = self.generate_recommendation()\n\n print(\"Recommendations for user {} \".format(self.username))\n\n for ranking, subreddit_name in enumerate(rec_vector, 1):\n print(\"{}.: {}\".format(ranking, subreddit_name))\n\n if ranking%10 == 0 and ranking!=0:\n check_if_move_on = True\n print(\"\\nType c and press enter for the next 10 subreddits.\\n\")\n print(\"Type q and press enter to return to main menu.\\n\")\n\n while check_if_move_on:\n choice = input()\n\n if choice == 'c':\n break\n\n elif choice == 'q':\n break\n\n else:\n print(\"Not a valid entry, please enter again.\")\n\n # break the whole thing if they want to quit\n if choice == 'q':\n break", "def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df", "def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)", "def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()", "def display_visual_average_rating_statistics(list_of_ratings_info):\n #get the statistical results for average ratings\n av_ratings_mean = calculate_mean(list_of_ratings_info)\n av_ratings_sd = calculate_standard_deviation(list_of_ratings_info)\n av_ratings_mode = calculate_mode(list_of_ratings_info)\n av_ratings_median = calculate_median(list_of_ratings_info)\n \n #create a dictionary of statistical results\n av_ratings_data = {'Mean':av_ratings_mean,\n 'Standard Deviation':av_ratings_sd,\n 'Mode':av_ratings_mode,\n 'Median':av_ratings_median}\n \n fig, ax = plt.subplots()\n \n #plot average rating analysis\n ax.set_title(\"Average Rating Analysis\")\n \n y_pos = [i for i in range(len(av_ratings_data))]\n \n ax.set_yticks(y_pos)\n ax.set_yticklabels(av_ratings_data.keys())\n \n ax.set_ylabel(\"Type of Analysis\")\n ax.set_xlabel(\"Average Rating\")\n \n ax.barh(y_pos, av_ratings_data.values(), align=\"center\")\n \n plt.show() \n \n #save the file as a png\n fig.savefig(\"average_rating_statistics.png\", bbox_inches=\"tight\")", "def display_bestratings(request, count=0, template='product/best_ratings.html'):\n if count is None:\n count = config_value('PRODUCT','NUM_DISPLAY')\n \n ctx = RequestContext(request, {\n 'products' : highest_rated(),\n })\n return render_to_response(template, context_instance=ctx)", "def all_ratings(self):\n\n for u, u_ratings in iteritems(self.ur):\n for i, r in u_ratings:\n yield u, i, r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrapes the plot from the provided URL.
def get_plot(url): soup = get_soup(url.rsplit('/', 1)[0]) if soup: # scrape the plot section plot_div = soup.find('div', {'id': 'titleStoryLine'}) # fixes bug were no plot is found try: plot_class = plot_div.find('span', {'itemprop': 'description'}) plot = plot_class.text.strip() return ' '.join(plot.split()) except AttributeError: return 'The plot was not available.' else: display_error()
[ "def fn_GetMoviePlot(self, details):\n\n # If the custom url was not actually defined and we had no cached\n # data, then there is nothing to do.\n #\n if details is None:\n return\n\n dom = parseString(details)\n d = dom.firstChild\n self.plot = get_child_data(d, \"plot\", self.plot)\n dom.unlink()", "def get_data(self, url):\n # Initialize the button that needs to be pressed to get download the data\n button = None\n # While this button is of type 'None' we reload the browser\n while button is None:\n try:\n # Navigate to the URL\n self.go_to_url(url)\n # Sleep the code by the defined time plus a random number of seconds between 0s and 2s. This should\n # reduce the likelihood that Google detects us as a scraper\n time.sleep(self.sleep + 2 * np.random.rand(1))\n # Try to find the button and click it\n line_chart = self.browser.find_element_by_css_selector(\n \"widget[type='fe_line_chart']\")\n button = line_chart.find_element_by_css_selector(\n '.widget-actions-item.export')\n button.click()\n except exceptions.NoSuchElementException:\n # If the button cannot be found, try again (load page, ...)\n pass\n # After downloading, wait again to allow the file to be downloaded\n time.sleep(self.sleep)\n # Load the data from the csv-file as pandas.DataFrame object\n data = pd.read_csv(self.filename, skiprows=1)\n # Set date as index:\n if 'Day' in data.columns:\n data.Day = pd.to_datetime(data.Day)\n data = data.set_index(\"Day\")\n frequency = 'Daily'\n elif 'Week' in data.columns:\n data.Week = pd.to_datetime(data.Week)\n data = data.set_index(\"Week\")\n frequency = 'Weekly'\n else:\n data.Month = pd.to_datetime(data.Month)\n data = data.set_index(\"Month\")\n frequency = 'Monthly'\n # Sleep again\n time.sleep(self.sleep)\n # Delete the file\n while os.path.exists(self.filename):\n try:\n os.remove(self.filename)\n except:\n pass\n return data, frequency", "def get(self, url):\n self.browser.get(url)", "def scrape(self):\n self._getContent()\n self._getImageURLs()\n self._getImages()\n \n # Finds the comic's title\n #titles = ComicScraper.titleExpr.findall(html)\n #if titles and len(titles) > 0:\n # self.title = titles[0]", "def get_plot(session_id, test_name):\n return Plot.get_plot(session_id, test_name)", "def load(self, url):\n pass", "def the_scrape(url):\n\n req = requests.get(url)\n url_list = parse_urls(req.text)\n email_list = parse_email_addresses(req.text)\n phone_list = parse_phone_numbers(req.text)\n\n print_list('Emails', email_list)\n print_list('Phone Numbers', phone_list)\n print_list('Urls', url_list)", "def plot_race(url):\n #hey, thanks again for these functions!\n idrace = id_from_url(url)\n xml = get_poll_lxml(idrace) \n colors = plot_colors(xml)\n\n if len(colors) == 0:\n return\n \n #really, you shouldn't have\n result = race_result(url)\n \n poll_plot(idrace)\n plt.xlabel(\"Date\")\n plt.ylabel(\"Polling Percentage\")\n for r in result:\n plt.axhline(result[r], color=colors[_strip(r)], alpha=0.6, ls='--')", "async def breakdown(self, webpage_url, session):\n\n def wrapper(url):\n try:\n new_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(new_loop)\n r = new_loop.run_until_complete(self.extract_info(webpage_url=url, collaborate=False))\n new_loop.close()\n return r\n except:\n traceback.print_exc()\n return False\n\n webpage_content = await self.request(\n url=webpage_url,\n session=session,\n headers=self.general_headers(user_agent=self.random_ua())\n )\n try:\n selector = Selector(text=webpage_content)\n except TypeError:\n return False\n iframe_src = selector.css('iframe::attr(src)').extract()\n with futures.ThreadPoolExecutor(max_workers=min(10, os.cpu_count())) as executor: ## set up processes\n executor.submit(wrapper)\n future_to_url = [executor.submit(wrapper, url=iframe) for iframe in iframe_src]\n results = []\n try:\n for f in futures.as_completed(future_to_url, timeout=max([len(iframe_src) * 3, 15])):\n try:\n result = f.result()\n for ele in result:\n ele['playlist_url'] = webpage_url\n results.append(ele)\n except:\n traceback.print_exc()\n continue\n except:\n traceback.print_exc()\n pass\n return results", "def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img", "def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text", "def open_url(self, url):\n\n # use quote to deal with arabic/... url, safe ':/' is needed\n url = quote(url, safe = ':/')\n\n try:\n # timeout is necessary here\n response = self.scraper.get(url, timeout = 30, cookies = self.cookies)\n except:\n # catch all Exceptions here\n logger.warning('error when sending request to \"%s\".' % url)\n return None\n\n # checks response's status code\n if response.status_code == 200:\n logger.debug('received response from \"%s\".' % url)\n else:\n logger.warning('request sent to \"%s\" returned error code: %u.' % (url, response.status_code))\n\n # add sleep here to avoid ddos to website\n time.sleep(self.scrapy_interval)\n\n return response.content", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def scrape_url(url):\n r = requests.get(url)\n url_list = get_urls(r.text)\n email_list = get_email_addresses(r.text)\n phone_list = get_phone_numbers(r.text)\n\n print_list('Urls', url_list)\n print_list('Emails', email_list)\n print_list('Phone Numbers', phone_list)", "def fetch(self, url, listener, useCache = True): #$NON-NLS-1$\r", "def _setContentFromUrl(self, url):\n urlgrabber = UrlGrabber(url)\n self._htmlContent = urlgrabber.get()", "def getData(self, url):\r\n \r\n if self.type == 'unix-socket':\r\n try:\r\n session = requests_unixsocket.Session()\r\n r = session.get(url)\r\n r.raise_for_status()\r\n return r\r\n except requests.exceptions.HTTPError as errorMsg:\r\n print(errorMsg)\r\n exit() \r\n except:\r\n print ('Cannot connect to the Docker daemon at \"{}\"'.format(url[:url.find('.sock')+5])) \r\n print('Is the docker daemon running?')\r\n exit()\r\n \r\n\r\n if self.type == 'http':\r\n try:\r\n r = requests.get(url)\r\n r.raise_for_status()\r\n return r\r\n except requests.exceptions.HTTPError as errorMsg:\r\n print(errorMsg)\r\n exit()\r\n except:\r\n print('Something went wrong ...')\r\n print('URL: ', url)\r\n print(sys.exc_info()[0])\r\n exit()", "def retrieve_episode_html(url):\n response = requests.get(url)\n return response.content", "def open(self):\r\n self._stream = chunked_requests.Stream('stream.plot.ly',\r\n 80,\r\n {'Host': 'stream.plot.ly',\r\n 'plotly-streamtoken': self.stream_id})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans up the given comments.
def cleanup_comments(comments): clean_comments = [] if comments: for comment in comments: cleaned_up = sub(r'\n\n {8}\n {8}\n {12}\n {16}\n {16}\n {12}\nEdit', '', comment) clean_comments.append(cleaned_up) return clean_comments
[ "def clear_comments(self):\n content_string_list = self.doc_object.tex_file_contents\n comment_removed_content_list = []\n for cont_str in content_string_list:\n comment_removed_content = strip_comments(cont_str)\n comment_removed_content_list.append(comment_removed_content)\n\n self.doc_object.sanitized_file_strings = comment_removed_content_list", "def del_comments(self):\n self.content = re.sub('!.*|#.*', '', self.content)", "def clean_comments(self):\r\n while True:\r\n logger.info('Starting comment cleaning')\r\n for comment in self.reddit.user.me().comments.new():\r\n logger.info(f'Considering comment {comment.submission}/{comment} with score {comment.score}')\r\n if comment.score < MIN_COMMENT_SCORE:\r\n logger.info(f'Score too low! Deleting comment')\r\n comment.delete()\r\n time.sleep(CLEAN_COMMENT_INTERVAL)", "def del_comm(self, blocks=False):\n logging.debug('Delete comments from text')\n if not(self.check()):\n raise GcodeError(\"Invalid g-codes\")\n temp = []\n comment = re.compile(';\\ .*')\n for line in self.blocks:\n n = comment.search(line)\n if n:\n line = line[:n.span()[0]]\n line = line.strip()\n if line != \"\":\n temp.append(line)\n if blocks:\n return temp\n return \"\\n\".join(temp)", "def delete_all_comments(dom: Any) -> None:\r\n if dom is None:\r\n return\r\n\r\n if isinstance(dom, CommentedMap):\r\n for key, val in dom.items():\r\n Parsers.delete_all_comments(key)\r\n Parsers.delete_all_comments(val)\r\n elif isinstance(dom, CommentedSeq):\r\n for ele in dom:\r\n Parsers.delete_all_comments(ele)\r\n try:\r\n # literal scalarstring might have comment associated with them\r\n attr = \"comment\" if isinstance(dom, ScalarString) \\\r\n else ruamel.yaml.comments.Comment.attrib\r\n delattr(dom, attr)\r\n except AttributeError:\r\n pass", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def remove_comments(doc):\n class Remove(Visitor):\n \"\"\" Visitor implementation of comment removal. \"\"\"\n def raw_line(self, line):\n if line.type == 'comment':\n return []\n else:\n return [line.original]\n\n return Remove().top_level(doc)", "def correct_tokenization(self, comments):\n\t\tself.yap(\"Joining orphaned lines of punctuation...\")\n\t\tcorrected = []\n\t\tfor line in comments:\n\t\t\tif all([w in punct for w in line]):\n\t\t\t\tcorrected[-1] = corrected[-1] + line if corrected else \"\"\n\t\t\telse:\n\t\t\t\tcorrected.append(line)\n\t\t#combine punctuation sequences into a single token\n\t\tself.yap(\"Joining punctuation sequences... \")\n\t\tcorrected = [self.joinPunctuationSequence(c) for c in corrected]\n\t\treturn corrected", "def clean(self, file_contents):\n removed_multiline_contents = re.sub(MATCH_MULTILINE_COMMENT, '', file_contents)\n removed_comments_contents = re.sub(MATCH_INLINE_COMMENT, '', removed_multiline_contents)\n return removed_comments_contents", "def _removeComments(code):\r\n # remove all occurance streamed comments (/*COMMENT */) from string\r\n text = re.sub(re.compile('/\\*.*?\\*/', re.DOTALL), '', code)\r\n # remove all occurance singleline comments (//COMMENT\\n ) from string\r\n return re.sub(re.compile('//.*?\\n'), '', text)", "def clean_text(self, comment):\n # convert comment to lower case\n comment = comment.lower()\n\n # remove \\n (new line characters)\n comment = re.sub(\"\\\\n\", \" \", comment)\n\n # remove URLs\n comment = re.sub(\n r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''',\n \" \", comment)\n\n # remove ip addresses\n comment = re.sub(\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\", \" \", comment)\n\n # remove usernames\n comment = re.sub(\"\\[\\[.*\\]\", \" \", comment)\n\n # remove date time and time zone\n comment = self.remove_dates(comment)\n\n # remove repeating characters in a word ex: abbbbcd ==> abcd\n pattern = re.compile(r\"(.)\\1{2,}\", re.DOTALL)\n comment = pattern.sub(r\"\\1\", comment)\n\n # remove repeating words ex: you said that that that ==> you said that\n comment = re.sub(r'(\\W|^)(.+)\\s\\2', '', comment)\n\n # substitute regex patterns for vulgar words ex: f***k ==> fuck\n for target, patterns in RE_PATTERNS.items():\n for pat in patterns:\n comment = re.sub(pat, target, comment)\n\n # remove if there are any extra spaces in comment\n comment = \" \".join(comment.split())\n\n # perform tokenization\n words = self.tokenizer.tokenize(comment)\n\n # (')aphostophe replacement (ie) you're --> you are\n words = [APOSTROPHE_MAP[word] if word in APOSTROPHE_MAP else word for word in words]\n\n comment = \" \".join(words)\n # remove special chars\n comment = re.sub(r\"[^a-z0-9!#\\$%\\^\\&\\*_\\-,\\.\\'()\\/ ]\", ' ', comment)\n\n # perform lemmatization\n words = [self.lemmatizer.lemmatize(word, \"v\") for word in comment.split()]\n words = [w for w in words if not w in STOPWORDS]\n\n clean_sent = \" \".join(words)\n # remove any non alphanum,digit character\n clean_sent = re.sub(\"\\W+\", \" \", clean_sent)\n clean_sent = re.sub(\" \", \" \", clean_sent)\n return (clean_sent)", "def _strip_comments(text: Text) -> Text:\n return re.sub(comment_regex, \"\", text)", "def test_remove_comments(self):\n \n bufin = \"not comment /*comment\\n\\n*/\\n\\n//comment\\n\\n/*\\nabc\\n*/soemthing//comment\"\n #print \"input buffer:\\n\" + bufin\n output_buffer = saputils.remove_comments(bufin)\n #print \"output buffer:\\n\" + bufout\n \n self.assertEqual(len(output_buffer) > 0, True)", "def _strip_comment_tags(comments, tags):\n def _strip(line):\n for tag in tags:\n if line.startswith(tag):\n return line[len(tag):].strip()\n return line\n comments[:] = map(_strip, comments)", "def strip_comments(tokens):\n prev_typ = None\n prev_end_col = 0\n for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:\n if typ in (tokenize.NL, tokenize.NEWLINE):\n if prev_typ in (tokenize.NL, tokenize.NEWLINE):\n start_col = 0\n else:\n start_col = prev_end_col\n end_col = start_col + 1\n elif typ == tokenize.COMMENT and start_row > 2:\n continue\n prev_typ = typ\n prev_end_col = end_col\n yield typ, tok, (start_row, start_col), (end_row, end_col), line", "def remove_comments(source):\n comment_re = r'(/[*].*?[*]/)|(//[^\\n]*)'\n return re.sub(comment_re,'', source, flags=re.MULTILINE | re.DOTALL)", "def handle_comments():\n comments = db.CommentGetNext(limit=5) # Get up to 5 comments\n for comment in comments:\n # Note that notify_bug makes multiple retries\n success = bz.notify_bug(comment.comment, comment.bug)\n if success:\n # Posted. Get rid of it.\n db.CommentDelete(comment)\n elif comment.attempts == 5:\n # 5 attempts have been made, drop this comment as it is\n # probably not going anywhere.\n try:\n with open('failed_comments.log', 'a') as fc_log:\n fc_log.write('%s\\n\\t%s'\n % (comment.bug, comment.comment))\n except IOError, err:\n log.error('Unable to append to failed comments file.')\n log.error(\"Could not post comment to bug %s. Dropping comment: %s\"\n % (comment.bug, comment.comment))\n db.CommentDelete(comment.id)\n else:\n comment.attempts += 1\n db.CommentUpdate(comment)", "def comments(self, comments):\n self._comments = comments", "def _clean_up(mech_str, remove_comments=True):\n mech_str = _convert_comment_lines(mech_str)\n if remove_comments:\n mech_str = remove_comment_lines(\n mech_str, delim_pattern=app.escape('!'))\n mech_str = remove_whitespace(mech_str)\n return mech_str" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the certificates specific to the United States.
def parse_certificates(soup): # removes the first item because it does not needed rating_tags = soup.find_all('a')[1:] rating_codes = [code.string for code in rating_tags] mpaa = [] if rating_codes: for rating in rating_codes: # sorry international folks, only interested in the US ratings if rating.startswith('United States'): mpaa.append(rating) return mpaa
[ "def populate_sagepay_country_code(self):\n try:\n r = requests.get('http://www.iso.org/iso/home/standards/country_codes/country_names_and_code_elements_txt.htm')\n except requests.RequestException:\n return False\n countries = r.text.split('\\r\\n')\n for country in countries[1:-2]:\n country = country.split(';')\n try:\n c = CountryCode(name=country[0].capitalize(), code=country[1])\n c.save()\n except: #todo don't catch all the exceptions, this is just temporary\n return False\n return True", "def test_get_country_states(self):\n pass", "def get_senate_offices_from_contactcongressdb(ccdump):\n d = {}\n for line in ccdump.strip().split('\\n'):\n if len(line.strip()) > 1:\n parts = line.split('\\t')\n dist = parts[1]\n state = dist[:2]\n email_form = parts[-2]\n #(fullname, dist, name, party, dc_office, dc_voice, district_voice, email_form, web) = line.split('\\t')\n if dist[2:]==\"JR\" or dist[2:]==\"SR\":\n d.setdefault(str(state), []).append(str(email_form))\n return d", "def check_country_names_being_valid(self):\n\n errors = []\n\n LOGGER.debug(\"Validating country codes ...\")\n attribute_name = f\"country_{INDEX_LIST_SUFFIX}\"\n\n merged_country_codes = {c.upper() for c in\n getattr(self,\n attribute_name)}\n\n for country in merged_country_codes:\n if pycountry.countries.get(alpha_2=country) is None:\n suggestion = (pycountry.countries.lookup(country)).alpha_2\n LOGGER.debug(f\"Invalid country code detected: '{country}', \"\n f\"try '{suggestion}'\")\n errors.append(InvalidCountryCodeError(\n f\"Country '{country}'' is invalid, \"\n f\"did you mean '{suggestion}'?\")\n )\n\n LOGGER.debug(\"Country codes validated.\")\n\n err_len = len(errors)\n\n if err_len == 0:\n LOGGER.debug(\"Country codes validated.\")\n return None\n elif err_len > 0:\n LOGGER.error(f\"Country codes validated with {err_len} error(s).\")\n return errors", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def read_and_load_email_domains():\n\twith open(\"world_universities_and_domains.json\") as json_file:\n\t\traw_json_text = json_file.read()\n\n\traw_universities_json = json.loads(raw_json_text)\n\tuniversity_lookup = {}\n\tfor university in raw_universities_json:\n\t\t# print(university)\n\t\t# input()\n\t\tfor domain in university.get(\"domains\"):\n\n\t\t\tuniversity_summary = {}\n\n\t\t\tif university.get(\"name\"):\n\t\t\t\tuniversity_summary[\"name\"] = university[\"name\"]\n\t\t\tif university.get(\"country\"):\n\t\t\t\tuniversity_summary[\"country\"] = university[\"country\"]\n\t\t\tif university.get(\"alpha_two_code\"):\n\t\t\t\tuniversity_summary[\"alpha_two_code\"] = university[\"alpha_two_code\"]\n\t\t\tif university.get(\"state-province\"):\n\t\t\t\tuniversity_summary[\"state-province\"] = university[\"state-province\"]\n\n\t\t\tuniversity_lookup[domain] = university_summary\n\n\treturn(university_lookup)", "def parse_counties(county_args):\n c = []\n for county_state in county_args:\n try:\n county, state = county_state.split(\",\")\n c.append([county, state])\n except ValueError:\n print(f\"Couldn't parse {county_state} as a City,State pair. Please specify counties as a county,state pair.\")\n exit(1)\n return c", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)", "def test_tlsaCert(self):\n fakeGetdns = FakeGetdns(\n generalResult=createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,\n selector=_dane.SELECTOR.CERT.value,\n certificate_association_data=b'FOOBAR'))\n _, (res,) = _dane.lookup_tlsa_records(\n 'example.com', 443, 'tcp', getdns=fakeGetdns)\n self.assertEqual(\n (_dane.SELECTOR.CERT, b'FOOBAR'),\n (res.selector, res.payload)\n )", "def get_FIPS_lookup():\n\n\tcounty_file = open('state-county-fips-codes.txt').readlines()\n\tcounties = [l.strip('\\n').strip().split(' ', 1) for l in county_file]\n\n\tcounty_lookup = {}\n\n\tfor county in counties:\n\t\tfips_code = county[0]\n\t\tcounty_name = county[1].strip()\n\n\t\tcounty_lookup[fips_code] = county_name\n\n\treturn county_lookup", "def _parse_domains(self, raw_config):\n def add_domain(domain, cert_info):\n \"\"\"Add domain data to configuration.\"\"\"\n if domain in self.domains:\n raise Exception('Domain identifier \"{0}\" appears more than once!'\n .format(domain))\n if not os.path.isfile(cert_info.cert()):\n raise Exception('Cannot find certificate chain \"{0}\" for domain \"{1}\"!'\n .format(cert_info.cert(), domain))\n if not os.path.isfile(cert_info.chain()):\n raise Exception('Cannot find certificate chain file \"{0}\" for domain \"{1}\"!'\n .format(cert_info.chain(), domain))\n if not os.path.isfile(cert_info.rootchain()):\n raise Exception('Cannot find certificate root chain file \"{0}\" for domain \"{1}\"!'\n .format(cert_info.rootchain(), domain))\n if not os.path.isfile(cert_info.ocsp_response()) and os.path.exists(cert_info.ocsp_response()):\n raise Exception('OCSP response file \"{0}\" for domain \"{1}\" exists, but is not a file!'\n .format(cert_info.ocsp_response(), domain))\n # Insert\n self.domains[domain] = cert_info\n\n # Scan for certificates\n for scan_key_data in raw_config.get('scan_keys', []):\n scan_key_folder = scan_key_data.get('folder', '')\n scan_key_recursive = scan_key_data.get('recursive', True)\n scan_key_cert_mask = scan_key_data.get('cert_mask', '{domain}.pem')\n scan_key_chain_mask = scan_key_data.get('chain_mask', '{domain}-chain.pem')\n scan_key_rootchain_mask = scan_key_data.get('rootchain_mask', '{domain}-rootchain.pem')\n scan_key_ocsp_mask = scan_key_data.get('ocsp_mask', '{domain}.ocsp-resp')\n for domain, cert_info in self._scan_certs(scan_key_folder, scan_key_cert_mask, scan_key_chain_mask,\n scan_key_rootchain_mask, scan_key_ocsp_mask,\n recursive=scan_key_recursive).items():\n add_domain(domain, cert_info)\n\n # Explicitly listed certificates\n for domain, data in raw_config.get('domains', {}).items():\n if 'cert' not in data:\n raise Exception('Explicit domain \"{0}\" does not contain \"cert\" record!'.format(domain))\n if 'chain' not in data:\n raise Exception('Explicit domain \"{0}\" does not contain \"chain\" record!'.format(domain))\n if 'rootchain' not in data:\n raise Exception('Explicit domain \"{0}\" does not contain \"rootchain\" record!'.format(domain))\n if 'ocsp' not in data:\n raise Exception('Explicit domain \"{0}\" does not contain \"ocsp\" record!'.format(domain))\n ocsp_responder_uri = None\n if 'ocsp_responder_uri' in data:\n ocsp_responder_uri = data['ocsp_responder_uri']\n if ocsp_responder_uri == 'certificate':\n ocsp_responder_uri = None\n add_domain(domain, CertInfo(data['cert'], data['chain'], data['rootchain'],\n os.path.join(self.ocsp_folder, data['ocsp']),\n ocsp_responder_uri=ocsp_responder_uri))", "def test_x509_multi_cert(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_multi_cert, quiet=True), self.x509_multi_cert_json)", "def test_x509_cert_nodata(self):\n self.assertEqual(jc.parsers.x509_cert.parse('', quiet=True), [])", "def _get_countries():\n print('-c, -C [country]\\\n \\n [country]=\\\n \\n AR\\t: Argentina\\\n \\n AT\\t: Austria\\\n \\n BR\\t: Brazil\\\n \\n BY\\t: Belarus\\\n \\n CA\\t: Canda\\\n \\n DE\\t: Germany\\\n \\n FR\\t: France\\\n \\n GB\\t: Great Britain\\\n \\n GH\\t: Ghana\\\n \\n HU\\t: Hungary\\\n \\n ID\\t: Indonesia\\\n \\n IL\\t: Israel\\\n \\n JP\\t: Japan\\\n \\n KR\\t: Korea\\\n \\n MA\\t: Morocco\\\n \\n MY\\t: Malaysia\\\n \\n NL\\t: Netherlands\\\n \\n NO\\t: Norway\\\n \\n OM\\t: Oman\\\n \\n PK\\t: Pakistan\\\n \\n RU\\t: Russia\\\n \\n SA\\t: Saudi Arabia\\\n \\n TH\\t: Thailand\\\n \\n TW\\t: Taiwan\\\n \\n UA\\t: Ukraine\\\n \\n US\\t: United States\\\n \\n UY\\t: Uruguay\\\n \\n VE\\t: Venezuela\\\n \\n VN\\t: Vietnam\\\n \\n .....\\n common usage: opengate -c JP')", "def parse_locations(person):\n if person.get('location').get('state'):\n state = person['location']['state'].upper()\n elif person.get('locations'):\n v = {i['state'].upper() for i in person['locations'] if i['state']}\n if len(v) != 1:\n return\n state = v.pop()\n else:\n return\n\n if state not in states:\n return # Prevent SQL injections via malformed state responses\n return state", "def test_valid_country_format(self, cred, country):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'country': country})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def parse_citystate(self):\n \n index = self.index\n \n if self.words[index]['tag'] != Vocabulary.NAME:\n return None, None, 0, 0\n \n if self.words[index]['word'] == 'mt':\n city = \"mountain\"\n else:\n city = self.words[index]['word']\n start = index\n \n index += 1\n if index == self.length:\n return None, None, 0, 0\n \n if self.words[index]['word'] == ',':\n index += 1\n if index == self.length:\n return None, None, 0, 0\n elif self.words[index]['tag'] == Vocabulary.NAME: \n # Hack\n state, n = self.state_hack(index)\n if n > 0:\n index += n\n return city, state, index - start + 1, index\n \n #if self.words[index]['word'] == 'medical doctor':\n #return city, \"ISO3166-2:US-MD\", index - start + 1, index\n try:\n state = self._state_dict[self.words[index]['word']]\n return city, state, index - start + 1, index\n except:\n city += ' ' + self.words[index]['word']\n index += 1\n if index == self.length:\n return None, None, 0, 0\n \n if self.words[index]['word'] == ',':\n index += 1\n if index == self.length:\n return None, None, 0, 0\n\n # Hack\n state, n = self.state_hack(index)\n if n > 0:\n index += n\n if index == self.length: index -= 1 # Hack\n return city, state, index - start + 1, index\n \n if self.words[index]['tag'] not in [Vocabulary.NAME, Vocabulary.ACRONYM]:\n return None, None, 0, 0\n \n try:\n state = self._state_dict[self.words[index]['word']]\n return city, state, index - start + 1, index\n except: \n return None, None, 0, 0", "def countries_from_summary(summary):\n return list({d[\"ct\"] for d in summary})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the given section.
def parse_section(soup): section_tag = soup.find_all('a', {'class': 'advisory-severity-vote__message'}) section_scale = [code.string for code in section_tag] section = section_scale[0] if section_scale else None section_comment_tags = soup.find_all('li', {'class': 'ipl-zebra-list__item'}) section_comment_list = [comment.text.strip() for comment in section_comment_tags] comments = cleanup_comments(section_comment_list) return section, comments
[ "def _parse_section(self, template, start_index, section_key):\n parsed_section, content_end_index, end_index = (\n self.parse(template=template,\n start_index=start_index,\n section_key=section_key)\n )\n\n return (parsed_section, template[start_index:content_end_index],\n end_index)", "def do_section(parser, token, template='parts/section.html', end='endsection'):\n bits = token.split_contents()[1:]\n if len(bits) is 0:\n title, attrs = '', {}\n elif len(bits) is 1:\n title, attrs = bits[0], {}\n elif len(bits) % 2 is 0:\n raise template.TemplateSyntaxError(\"Your attributes don't match up: %s\" % ', '.join(bits[1:]))\n else:\n title = bits[0]\n attrs = dict(zip(bits[1::2], bits[2::2]))\n nodelist = parser.parse((end,))\n parser.delete_first_token()\n return SectionNode(template, title, attrs, nodelist)", "def _parse_tag_section(self, line, dictionary) :\n# print '------>>>>>NOW IN parse_tag_section<<<<<<<<<----------', line\n\n\n if not line.startswith(\"; \") :\n raise ValueError(\"Expected line starting '; '\")\n while line.startswith(\"; \") :\n tag, value = line[2:].strip().split(\": \",1)\n #fasta34 and fasta35 will reuse the pg_name and pg_ver tags\n #for the program executable and name, and the program version\n #and the algorithm version, respectively. This may be a bug.\n #if tag in dictionary :\n # raise ValueError(\"Repeated tag '%s' in section\" % tag)\n dictionary[tag] = value\n# print line.strip(), tag, value\n line = self.handle.readline()\n return line", "def _read_section(file, section_ids, pix_container):\n data_type = ''\n props = []\n data = []\n data_index = 0\n section_type = ''\n while data_type != 'SE_E':\n data_type, line = next_line(file)\n if data_type in ('EOF', 'ERR'):\n break\n # print('%s =-> \"%s\"' % (type, line))\n if data_type == 'Prop':\n # print(' -++- line: %s' % line)\n prop = _get_prop(line)\n # print('prop: %s' % prop)\n if prop is not None:\n props.append(prop)\n elif data_type == 'data':\n # print('line: \"%s\"' % line)\n dat_index, dat = _get_data(file, line)\n if dat_index == data_index:\n # print('dat: %s' % dat)\n data_index += 1\n if dat != []:\n data.append(dat)\n else:\n print('WARNING - Inconsistent data indexing in line: \"%s\"! Skipping...' % line)\n elif data_type == 'empty_line':\n props.append((\"\", \"\"))\n elif data_type == 'line_C':\n comment = line.strip()\n props.append((\"#\", comment[2:]))\n elif data_type == 'SE_C':\n # comment_section = data_structures.section_data(\"#comment\")\n print('comment section: \"%s\"' % line)\n elif data_type == 'SE_S':\n # section_type = re.split(r'[ ]+', line)[0]\n type_line = re.split(r'[ ]+', line)\n for rec in type_line:\n if rec != '':\n try:\n section_type = re.split(r'[ ]+', line)[1]\n except:\n section_type = ''\n print('WARNING - Unknown data in line: \"%s\"! Skipping...' % line)\n break\n new_section_ids = _SectionData(section_type)\n new_section, pix_container = _read_section(file, new_section_ids, pix_container)\n section_ids.sections.append(new_section)\n # pix_container.append(new_section)\n if data_type != 'SE_E':\n section_ids.props = props\n section_ids.data = data\n return section_ids, pix_container", "def parse_create_section(xml_course):\n\n attrs = [\n \"section\",\n 'crn',\n \"start-time\",\n \"end-time\",\n \"meeting-days\",\n \"location\",\n \"section-number\",\n \"instructor\"\n ]\n\n section = pull_attributes_from_xml(xml_course, attrs)\n\n section[\"places\"] = []\n\n # Create Place attribute pointer based on location string\n # Get places from Parse\n places = get_places()[\"results\"]\n # Get location info from section (of form [\"BRK 101\", \"TBA\"])\n all_locations = section[\"location\"].split(\", \")\n # Filter out TBA\n # TODO Maybe do something else with them\n locations = [location for location in all_locations if location != \"TBA\"]\n\n for location in locations:\n building_code = location.split(\" \")[0]\n for place in places:\n if place.get(\"symbol\") and place[\"symbol\"] == building_code:\n section[\"places\"].append(place[\"objectId\"])\n break;\n\n\n return section", "def read_section(self, text):\r\n if text.lstrip(' ').startswith('$'):\r\n index = text.index('$')\r\n begin_indent, text2 = text[:index], text[index+1:]\r\n ahead = self.python_lookahead(text2)\r\n \r\n if ahead == 'var':\r\n return self.read_var(text2)\r\n elif ahead in STATEMENT_NODES:\r\n return self.read_block_section(text2, begin_indent)\r\n elif ahead in KEYWORDS:\r\n return self.read_keyword(text2)\r\n elif ahead.strip() == '':\r\n # assignments starts with a space after $\r\n # ex: $ a = b + 2\r\n return self.read_assignment(text2)\r\n return self.readline(text)", "def parse_section(section, env):\n\n target = fetch_instance(section, nodes.target)\n if target is None:\n return (None, None)\n\n refid = target.get('refid')\n\n # Fetch Metadata\n abstract = fetch_instance(section, abstract_node)\n parent = find_parent(section, nodes.document)\n\n # Fetch Docname\n if parent is None:\n return (None, None)\n path = parent.get('source')\n docname = env.path2doc(path)\n\n # Determine Output\n if abstract is not None:\n body = abstract.astext()\n title = fetch_instance(abstract, nodes.paragraph)[0]\n\n if isinstance(title, nodes.literal):\n title = \"<code>%s</code>\" % title.astext()\n else:\n title = title.astext()\n\n else:\n title = fetch_instance(section, nodes.title).astext()\n body = title\n\n return (refid, {\"title\": title, \"abstract\": body, \"docname\": docname})", "def process_section(self):\n section_name = [\"part\", \"chapter\", \"section\", \"subsection\", \"subsubsection\",\n \"paragraph\", \"subparagraph\"][self.nesting_level+nesting_level_offset]\n emit(r\"\\%s{\" % section_name)\n self.children[0].process()\n emit(\"}\\n\\n\")\n for child in self.children[1:]:\n child.process()", "def parse_text(self, text: str) -> SectionDict:", "def parse_section(Config, section):\n dict1 = {}\n options = Config.options(section)\n for option in options:\n try:\n dict1[option] = Config.get(section, option)\n if dict1[option] == -1:\n DebugPrint(\"skip: %s\" % option)\n except:\n print((\"exception on %s!\" % option))\n dict1[option] = None\n return dict1", "def _parse_psf_section(psf):\n conv = OplsPsfFile._convert\n line = psf.readline()\n while not line.strip():\n if not line:\n raise CharmmPsfEOF('Unexpected EOF in PSF file')\n else:\n line = psf.readline()\n if '!' in line:\n words = line[:line.index('!')].split()\n title = line[line.index('!')+1:].strip().upper()\n # Strip out description\n if ':' in title:\n title = title[:title.index(':')]\n else:\n raise CharmmPSFError('Could not determine section title')\n if len(words) == 1:\n pointers = conv(words[0], int, 'pointer')\n else:\n pointers = tuple([conv(w, int, 'pointer') for w in words])\n line = psf.readline().strip()\n if not line and title.startswith('NNB'):\n # This will correctly handle the NNB section (which has a spurious\n # blank line) as well as any sections that have 0 members.\n line = psf.readline().strip()\n data = []\n if title == 'NATOM' or title == 'NTITLE' or title == 'NUMLP NUMLPH' or title == 'NUMANISO':\n # Store these four sections as strings (ATOM section we will parse\n # later). The rest of the sections are integer pointers\n while line:\n data.append(line)\n line = psf.readline().strip()\n else:\n while line:\n words = line.split()\n data.extend([conv(w, int, 'PSF data') for w in words])\n line = psf.readline().strip()\n return title, pointers, data", "def parse_section_string(s):\n return Section(*s.split(\"-\"))", "def parse_section_header(section_header, title, required=False):\n print(\"parse_section_header\")\n\n parsed_data = [{\n \"type\": \"sectionHeader\",\n \"title\": title,\n \"helpText\": \"\"\n }]\n\n return parsed_data", "def __init__(self, _parser, _section):\n self.parser = _parser\n _parser.add_section(_section)\n self.section = _section", "def parse_get_section(xml_course):\n parse_section = parse_create_section(xml_course)\n query_constraints = {\n \"crn\": parse_section[\"crn\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (SECTIONS_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None", "def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)", "def parse(self, section_dict):\n self.dict = section_dict\n for option in section_dict:\n if option not in self.optionnames:\n print(\"Warning: Unknown option: {:s} in section {:s}\".format(\n option, self.name), file=sys.stderr\n )\n for option, name in zip(self.options, self.optionnames):\n self.dict[name] = option.parse(self)\n return self.dict", "def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None", "def parse_color_section(conf, sname):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if ``value`` is an health check.
def is_healthcheck(self, value): return is_healthcheck(value)
[ "def is_true(value):\n \n return (value is True)", "def isTrue(value):\n if type(value) == str:\n return value.lower() == \"on\"\n elif value:\n return True\n return False", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def is_type(value, value_type):\n if isinstance(value, value_type):\n return True\n else: \n return False", "def isPass(value: Any) -> bool: # pragma: no cover\n if isinstance(value, bool):\n return True\n return PASS in value", "def check(self, value: ATTRIBUTE_TYPES) -> bool:\n if self.type == ConstraintTypes.EQUAL:\n return self.value == value\n if self.type == ConstraintTypes.NOT_EQUAL:\n return self.value != value\n if self.type == ConstraintTypes.LESS_THAN:\n return self.value < value\n if self.type == ConstraintTypes.LESS_THAN_EQ:\n return self.value <= value\n if self.type == ConstraintTypes.GREATER_THAN:\n return self.value > value\n if self.type == ConstraintTypes.GREATER_THAN_EQ:\n return self.value >= value\n if self.type == ConstraintTypes.WITHIN:\n low = self.value[0]\n high = self.value[1]\n return low <= value <= high\n if self.type == ConstraintTypes.IN:\n return value in self.value\n if self.type == ConstraintTypes.NOT_IN:\n return value not in self.value\n if self.type == ConstraintTypes.DISTANCE:\n if not isinstance(value, Location): # pragma: nocover\n raise ValueError(\"Value must be of type Location.\")\n location = cast(Location, self.value[0])\n distance = self.value[1]\n return location.distance(value) <= distance\n raise ValueError(\"Constraint type not recognized.\") # pragma: nocover", "def is_valid(self, value):\n return True", "def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))", "def is_valid(self, value: Any) -> bool:\n return self.__call__(value)", "def check_type(value, type_def):\n if type_def == 'integer':\n try:\n # We accept string with integer ex: '123'\n int(value)\n return True\n except ValueError:\n return isinstance(value, six.integer_types) and not isinstance(value, bool)\n elif type_def == 'number':\n return isinstance(value, (six.integer_types, float)) and not isinstance(value, bool)\n elif type_def == 'string':\n return isinstance(value, (six.text_type, six.string_types, datetime.datetime))\n elif type_def == 'boolean':\n return isinstance(value, bool) or (isinstance(value, (six.text_type, six.string_types,)) and value.lower() in ['true', 'false'])\n else:\n return False", "def is_true(value: str) -> bool:\n if isinstance(value, bytes):\n value = value.decode()\n\n if not isinstance(value, str):\n logging.warning(\n f\"A non-string value ({str(value)}) was passed to dmod.evaluation_service.utilities.common.is_true\"\n )\n return bool(value)\n\n return str(value).lower() in (\"yes\", \"y\", \"1\", 'true', 'on')", "def accepts(cls, value: Any) -> bool:\n try:\n cls.convert(value)\n return True\n except ValueError:\n return False", "def get_status_health(self) -> bool:\n try:\n url = f\"http://{self.ip}:{self.port}{STATUS_HEALTH_ENDPOINT}\"\n response = requests.get(url)\n return response.status_code == 200\n except ConnectionError:\n return False", "def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)", "def is_valid_confidence(self, value: List) -> bool:\n\n if self._confidence_values is None or value is None:\n return True\n else:\n return value in self._confidence_values", "def check(validator, value, *args, **kwargs):\n try:\n validator(*args, **kwargs)(None, value)\n except ValidationError:\n return False\n return True", "def is_value_legit(self, value):\n return value in self.domain and value in self.possible_domain", "def test(self, value):\n if isinstance(value, self.result_type):\n return True\n try:\n self.cast(value)\n return True\n except:\n return False", "def is_true(value):\n return value.lower() == 'true'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return copy of TestSuite where only health checks remain.
def filter_suite(self, suite): if isinstance(suite, unittest.TestSuite): suite_copy = self.suiteClass() for sub in suite: if isinstance(sub, unittest.TestSuite): suite_copy.addTest(self.filter_suite(sub)) else: if self.is_healthcheck(sub): suite_copy.addTest(sub) elif self.is_healthcheck(suite): suite_copy = suite.copy() return suite_copy
[ "def filter_tests(suite):\r\n result = unittest.TestSuite()\r\n for test in suite:\r\n if isinstance(test, unittest.TestSuite):\r\n result.addTest(filter_tests(test))\r\n elif not test.__class__.__name__.startswith(\"_\"):\r\n result.addTest(test)\r\n return result", "def filter_suite(self, suite, pred):\n newtests = []\n for test in suite._tests:\n if test.__class__.__name__.endswith('TestSuite'):\n self.filter_suite(test, pred)\n newtests.append(test)\n else:\n if pred(test):\n newtests.append(test)\n suite._tests = newtests", "def loadTestsFromTestCase(self, testCaseClass):\n suite = super(HealthCheckLoader, self).loadTestsFromTestCase(\n testCaseClass)\n return self.filter_suite(suite)", "def test_clone_included_suite(self):\r\n rs = self.F.RunSuiteFactory.create()\r\n\r\n new = rs.run.clone()\r\n\r\n self.assertNotEqual(new.runsuites.get(), rs)", "def remove_empty_suites(self):\n self.visit(EmptySuiteRemover())", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def suite():\n suite_obj = unittest.TestSuite()\n suite_obj.addTest(TestEssentials())\n return suite_obj", "def GetTestSuite(self, test_type=None):\n if test_type is None:\n test_type = self.GetTestType()\n test_suite = unittest.TestSuite()\n if self._test_cases:\n for suite_type, test_cases in self._test_cases.iteritems():\n if suite_type in test_type:\n test_suite.addTests(unittest.TestSuite([self._loadTestsFromTestCase(tc)\n for tc in test_cases]))\n\n if self._child_test_cases:\n for child in self._child_test_cases:\n test_suite.addTests(child.TEST_SUITE_MANAGER.GetTestSuite(test_type))\n\n return test_suite", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def make_suite():\n suite = unittest.TestSuite()\n return suite", "def suite():\n\tts = unittest.TestSuite()\n\tfor test_module in __all__:\n\t\tm = importlib.import_module(\"pyroclast.test.\" + test_module)\n\t\tfor n in dir(m):\n\t\t\tc = getattr(m, n)\n\t\t\tif is_test_case(c):\n\t\t\t\ts = unittest.TestLoader().loadTestsFromTestCase(c)\n\t\t\t\tts.addTests(s)\n\treturn ts", "def _getTestSuite(self, category):\n test_suite = self.test_suites.get(category)\n if not test_suite:\n test_suite = unittest.TestSuite()\n self.test_suites[category] = test_suite\n return test_suite", "def suite():\r\n return unittest.TestLoader().loadTestsFromTestCase(ClassCTestCase)", "def _flattenTestSuite(self, testSuite):\n l = []\n try:\n for test_suite in testSuite._tests:\n l = l + self._flattenTestSuite(test_suite)\n except AttributeError:\n l.append(testSuite)\n return l", "def filter_tests(self, status):\n filtered_tests = {}\n for test in self.tests:\n if self.tests[test][\"status\"] == status:\n filtered_tests[test] = self.tests[test]\n\n return filtered_tests", "def suite():\n # patch it to work here\n package_def = 'app.test'\n\n suite = unittest.TestSuite()\n\n for other_suite in iter_suites(package_def):\n suite.addTest(other_suite)\n return suite", "def keep_stats(self):\n filtered = copy.deepcopy(self)\n filtered.benchmark = []\n for b in self.benchmarks:\n if any(b[\"name\"].endswith(suffix)\n for suffix in (\"_mean\", \"_median\", \"_stddev\")):\n filtered.benchmarks += [b]\n return filtered", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def suite():\n loader = unittest.TestLoader()\n testsuite = loader.loadTestsFromModule(sys.modules[__name__])\n return testsuite" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load healthchecks from TestCase.
def loadTestsFromTestCase(self, testCaseClass): suite = super(HealthCheckLoader, self).loadTestsFromTestCase( testCaseClass) return self.filter_suite(suite)
[ "def test_health_check_get(self):\n pass", "def test_health_get(self):\n pass", "def test_v1_check_health(self):\n pass", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def test_fake_health_get(self):\n pass", "def test_v1alpha3_check_health(self):\n pass", "def test_list_service_health_checks(self):\n with self.rbac_utils.override_role(self):\n self.service_client.list_service_health_checks()", "def _loadChecks(self,withIdentityProvider=None):\n loaded_checks = []\n\n if withIdentityProvider:\n # Useful if doing grouping async requests with a shared identityprovider\n # and then spawning async call\n for checkdata in self._loadTestSetList():\n if checkdata['data']['identity_provider'].lower() == withIdentityProvider.lower():\n #loaded_checks.append({'data': checkdata['data']})\n loaded_checks.append(checkdata)\n\n else:\n loaded_checks = self._loadTestSetList()\n\n return loaded_checks", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def test_load(self, clean_level, inst_dict):\n\n test_inst, date = initialize_test_inst_and_date(inst_dict)\n if len(test_inst.files.files) > 0:\n # Set Clean Level\n test_inst.clean_level = clean_level\n target = 'Fake Data to be cleared'\n test_inst.data = [target]\n try:\n test_inst.load(date=date, use_header=True)\n except ValueError as verr:\n # Check if instrument is failing due to strict time flag\n if str(verr).find('Loaded data') > 0:\n test_inst.strict_time_flag = False\n with warnings.catch_warnings(record=True) as war:\n test_inst.load(date=date, use_header=True)\n assert len(war) >= 1\n categories = [war[j].category for j in range(0, len(war))]\n assert UserWarning in categories\n else:\n # If error message does not match, raise error anyway\n raise(verr)\n\n # Make sure fake data is cleared\n assert target not in test_inst.data\n\n # If cleaning not used, something should be in the file\n # Not used for clean levels since cleaning may remove all data\n if clean_level == \"none\":\n assert not test_inst.empty\n else:\n pytest.skip(\"Download data not available\")\n\n return", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))", "def haproxy_healthcheck(self):\n charm_instance = self.charm_instance or {}\n return getattr(charm_instance, 'healthcheck', {})", "def test_sites_site_id_health_levels_get(self):\n pass", "def __load_test(self):\n print(\"loading testing data...\")\n with open('test/test.json') as test_file:\n return json.load(test_file)", "def test_load_data(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load healthchecks from module.
def loadTestsFromModule(self, module, *args, **kwargs): suite = super(HealthCheckLoader, self).loadTestsFromModule( module, *args, **kwargs) return self.filter_suite(suite)
[ "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "async def load(self, module):", "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "async def load_module(self, module: \"Module\"):", "def test_v1_check_health(self):\n pass", "def haproxy_healthcheck(self):\n charm_instance = self.charm_instance or {}\n return getattr(charm_instance, 'healthcheck', {})", "def _load_checks(configs):\n checks = []\n\n # active checks\n if configs['actives']:\n logger.debug(\"Loading active checks.\")\n actives = utility.get_package_classes('actives', configs['actives'])\n checks += [clazz() for clazz in actives]\n\n # blind checks\n if configs['blinds']:\n logger.debug(\"Loading blind checks.\")\n listeners = configs['blinds']\n blinds = utility.get_package_classes('blinds', list(listeners))\n checks += [clazz(listeners[clazz.__module__.split('.')[-1]]) for clazz in blinds]\n\n # passive checks\n if configs['passives']:\n logger.debug(\"Loading passive checks.\")\n passives = utility.get_package_classes('passives', configs['passives'])\n checks += [clazz() for clazz in passives]\n\n return checks", "def load():\n ultilib.createServerVar(\"uam_viphealth\", VIPHealth, True, \"VIP Health\")\n ultilib.createServerVar(\"uam_guardhealth\", GuardHealth, True, \"Guard Health\")\n ultilib.createServerVar(\"uam_assassinhealth\", AssassinHealth, True, \"Assassin Health\")\n ultilib.createServerVar(\"uam_assassinteam\", AssassinTeam, True, \"Assassin Team\")\n ultilib.createServerVar(\"uam_vipteam\", VIPTeam, True, \"VIP Team\")\n es.ServerVar(\"ultiassassin_version\", info.version, \"UltiAssassin Version\").makepublic()\n es.addons.registerTickListener(ticklistener)", "def test_health_check_get(self):\n pass", "def test_v1alpha3_check_health(self):\n pass", "def test_health_get(self):\n pass", "def load_checks(self):\n self.checks = []\n limiters.get_limiter('api').get_token()\n for c in self.driver.list_checks(self.driver_entity):\n self.checks.append(Check(self.driver, c, self.id))", "async def _load_hsm_status(self) -> None:\n hsm: Dict[str, str] = await self._api_request(\"hsm\")\n _LOGGER.debug(\"Loaded hsm status\")\n self._hsm_status = hsm[\"hsm\"]", "def load_hooks(self):\n self.manager.load(hooks_path=self.hooks_directory,\n globals_dict=self.globals_dict)", "def _loadChecks(self,withIdentityProvider=None):\n loaded_checks = []\n\n if withIdentityProvider:\n # Useful if doing grouping async requests with a shared identityprovider\n # and then spawning async call\n for checkdata in self._loadTestSetList():\n if checkdata['data']['identity_provider'].lower() == withIdentityProvider.lower():\n #loaded_checks.append({'data': checkdata['data']})\n loaded_checks.append(checkdata)\n\n else:\n loaded_checks = self._loadTestSetList()\n\n return loaded_checks", "def _on_modules_load(self):", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def module_load(module_name):\n\n #print (sys.path, file=sys.stderr)\n #print (\"module name: %s\" % module_name, file=sys.stderr)\n full_name = 'modules.%s.module' % module_name\n\n # Create Global Environment Vars\n global ENV_VARS\n if ENV_VARS is None:\n ENV_VARS = {\n 'working_dir': os.getcwd()\n }\n\n # User Module Table\n user_module = {\n 'env' : ENV_VARS,\n 'name' : module_name,\n 'fullname' : full_name,\n 'module_obj' : None,\n 'config' : None,\n 'config_name:': '',\n 'entries' : {}\n }\n\n # 1. Load user module\n umodule = None\n try:\n #print (\"Loading user module: %s\" % full_name, file=sys.stderr)\n\n umodule = importlib.import_module(full_name)\n except Exception as ex:\n Logger.fatal('module_load', 'Cannot load user module {}: {}'.format( \\\n module_name, str(ex)), 'please check the module whether exist')\n raise\n\n user_module['module_obj'] = umodule\n\n # 2. Loader user module entries into user_module\n # 2.1 Load module_init\n if _load_user_entry(module_name, MODULE_INIT_FUNCNAME, umodule, user_module, True) is None:\n return False\n\n # 2.2 Load module_release\n if _load_user_entry(module_name, MODULE_RELEASE_FUNCNAME, umodule, user_module, True) is None:\n return False\n\n # 2.3 Load module_run\n if _load_user_entry(module_name, MODULE_RUN_FUNCNAME, umodule, user_module, True) is None:\n return False\n\n # 2.4 Load module_pack\n _load_user_entry(module_name, MODULE_PACK_FUNCNAME, umodule, user_module, False)\n\n # 2.5 Load module_unpack\n _load_user_entry(module_name, MODULE_UNPACK_FUNCNAME, umodule, user_module, False)\n\n # 3. Assembe user module into Global Module Table\n USER_MODULE_TABLES[module_name] = user_module\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load healthchecks from name.
def loadTestsFromName(self, name, module=None): suite = super(HealthCheckLoader, self).loadTestsFromName(name, module) return self.filter_suite(suite)
[ "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def load_checks(self):\n self.checks = []\n limiters.get_limiter('api').get_token()\n for c in self.driver.list_checks(self.driver_entity):\n self.checks.append(Check(self.driver, c, self.id))", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def _load_checks(configs):\n checks = []\n\n # active checks\n if configs['actives']:\n logger.debug(\"Loading active checks.\")\n actives = utility.get_package_classes('actives', configs['actives'])\n checks += [clazz() for clazz in actives]\n\n # blind checks\n if configs['blinds']:\n logger.debug(\"Loading blind checks.\")\n listeners = configs['blinds']\n blinds = utility.get_package_classes('blinds', list(listeners))\n checks += [clazz(listeners[clazz.__module__.split('.')[-1]]) for clazz in blinds]\n\n # passive checks\n if configs['passives']:\n logger.debug(\"Loading passive checks.\")\n passives = utility.get_package_classes('passives', configs['passives'])\n checks += [clazz() for clazz in passives]\n\n return checks", "def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def get_health_check(self, HealthCheckId: str) -> Dict:\n pass", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "def load(self, name):\n self._cache[name] = self._loader(self.path_translator(name))\n return self._cache[name]", "def test_v1_check_health(self):\n pass", "def health_check(self):\n endpoint = self._api_endpoint('healthcheck')\n try:\n response = self._send_request(endpoint, method='get')\n health_data = response.json()\n except Exception as e:\n msg = 'Could not get Znail health data: {reason}'.format(reason=str(e))\n logger.debug(msg, exc_info=True)\n logger.error(msg)\n raise ZnailError(msg)\n\n for name, status in health_data.items():\n if not status:\n raise ZnailError('Health check failed: {name}'.format(name=name))", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def haproxy_healthcheck(self):\n charm_instance = self.charm_instance or {}\n return getattr(charm_instance, 'healthcheck', {})", "def test_list_service_health_checks(self):\n with self.rbac_utils.override_role(self):\n self.service_client.list_service_health_checks()", "def activate_load_case(self, name):\n self.activate_load_case = name", "def _load_support(name):\n curr = P.dirname(P.abspath(__file__))\n with open(P.join(curr, \"data\", \"%s.yml\" % name)) as fin:\n return yaml.full_load(fin)", "def test_health_check_get(self):\n pass", "def test_v1alpha3_check_health(self):\n pass", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load healthchecks from names.
def loadTestsFromNames(self, names, module=None): suite = super(HealthCheckLoader, self).loadTestsFromNames(names, module) return self.filter_suite(suite)
[ "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def _load_checks(configs):\n checks = []\n\n # active checks\n if configs['actives']:\n logger.debug(\"Loading active checks.\")\n actives = utility.get_package_classes('actives', configs['actives'])\n checks += [clazz() for clazz in actives]\n\n # blind checks\n if configs['blinds']:\n logger.debug(\"Loading blind checks.\")\n listeners = configs['blinds']\n blinds = utility.get_package_classes('blinds', list(listeners))\n checks += [clazz(listeners[clazz.__module__.split('.')[-1]]) for clazz in blinds]\n\n # passive checks\n if configs['passives']:\n logger.debug(\"Loading passive checks.\")\n passives = utility.get_package_classes('passives', configs['passives'])\n checks += [clazz() for clazz in passives]\n\n return checks", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def load_checks(self):\n self.checks = []\n limiters.get_limiter('api').get_token()\n for c in self.driver.list_checks(self.driver_entity):\n self.checks.append(Check(self.driver, c, self.id))", "def do_health_checks(self, list_of_ips):\n raise NotImplementedError()", "def _loadChecks(self,withIdentityProvider=None):\n loaded_checks = []\n\n if withIdentityProvider:\n # Useful if doing grouping async requests with a shared identityprovider\n # and then spawning async call\n for checkdata in self._loadTestSetList():\n if checkdata['data']['identity_provider'].lower() == withIdentityProvider.lower():\n #loaded_checks.append({'data': checkdata['data']})\n loaded_checks.append(checkdata)\n\n else:\n loaded_checks = self._loadTestSetList()\n\n return loaded_checks", "def loadTestsFromNames(self, names, module=None):\r\n suites = [self.loadTestsFromName(name, module) for name in names]\r\n return self.suiteClass(suites)", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def test_list_service_health_checks(self):\n with self.rbac_utils.override_role(self):\n self.service_client.list_service_health_checks()", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n names = []\n return names", "def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute", "def health_check(self):\n endpoint = self._api_endpoint('healthcheck')\n try:\n response = self._send_request(endpoint, method='get')\n health_data = response.json()\n except Exception as e:\n msg = 'Could not get Znail health data: {reason}'.format(reason=str(e))\n logger.debug(msg, exc_info=True)\n logger.error(msg)\n raise ZnailError(msg)\n\n for name, status in health_data.items():\n if not status:\n raise ZnailError('Health check failed: {name}'.format(name=name))", "def load_all_extensions(self, names):\n loaded = True\n for name in names:\n if not self.load(name):\n loaded = False\n return loaded", "def load_plugins(names=()):\n for name in names:\n modname = f'{PLUGIN_NAMESPACE}.{name}'\n try:\n try:\n namespace = __import__(modname, None, None)\n except ImportError as exc:\n # Again, this is hacky:\n if exc.args[0].endswith(' ' + name):\n log.warning('** plugin {0} not found', name)\n else:\n raise\n else:\n for obj in getattr(namespace, name).__dict__.values():\n if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \\\n and obj != BeetsPlugin and obj not in _classes:\n _classes.add(obj)\n\n except Exception:\n log.warning(\n '** error loading plugin {}:\\n{}',\n name,\n traceback.format_exc(),\n )", "def _load_label_names():\n return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']", "def startup(file_names):\n metric_choice, metric_is_percent = get_user_analysis_choice()\n sheets = load_files(metric_choice, file_names)\n hpo_name_col = generate_hpo_id_col(file_names)\n\n return metric_choice, metric_is_percent, \\\n sheets, hpo_name_col" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the public key if it is related to the given EC curve and formats the public key to a uncompressed byte string. Afterwards the function create a hash value of the uncompressed public key value
def get_public_key_fingerprint(curve: object, temp_public_key: object) \ -> object: vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve) uncompressed_pub_key = vk.to_string('uncompressed') pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key) return pub_key_hash_fingerprint.hexdigest()
[ "def verify(self, pkey, e, sig):\n # First we define modular exponent, which is\n # used to calculate the y from a compressed\n # public key.\n # This only works for curves with an integer\n # order n that is congruent to 3 mod 4.\n def pow_mod(x, y, z):\n n = 1\n while y:\n if y & 1:\n n = n * x % z\n y >>= 1\n x = x * x % z\n return n\n # Now unmarshall the public key\n P = Point(None, None)\n if pkey[:2] == '04':\n P = Point(int(pkey[2:66], 16), int(pkey[66:]))\n else:\n y_parity = int(pkey[:2]) - 2\n x = int(pkey[2:], 16)\n a = (pow_mod(x, 3, self.p) + self.b) % self.p\n y = pow_mod(a, (self.p + 1) // 4, self.p)\n if y % 2 != y_parity:\n y = -y % self.p\n P = Point(x, y)\n # P must not be point at infinity\n assert P != EllipticCurve.inf\n # P must lie on the curve\n y = P.y * P.y\n x = P.x * P.x * P.x + self.a * P.x + self.b\n assert y % self.p == x % self.p\n # Now unmarshall the signature\n assert sig[:2] == '30' # DER SEQUENCE byte\n mBn = int(sig[2:4], 16) # bytes in message\n assert sig[4:6] == '02' # DER INTEGER byte\n rBn = int(sig[6:8], 16) # bytes in r\n r = int(sig[8:8 + rBn * 2], 16) # r value\n assert sig[8 + rBn * 2:8 + rBn * 2 + 2] == '02' # DER INTEGER byte\n sBn = int(sig[8 + rBn * 2 + 2:8 + rBn * 2 + 4], 16) # bytes in s\n assert sBn == len(sig[8 + rBn * 2 + 4:4 + mBn * 2]) // 2\n s = int(sig[8 + rBn * 2 + 4:4 + mBn * 2], 16) # s value\n # Now we have (r,s) and can verify\n z = self.shrink_message(e)\n w = invmod(s, self.n)\n U1 = self.point_multiply(z * w % self.n, self.G)\n U2 = self.point_multiply(r * w % self.n, P)\n R = self.point_add(U1, U2)\n assert r == R.x\n return True", "def get_public_compressed_curve_point(private_key):\n encoded_point = private_key.public_key().public_numbers().encode_point()\n return base64.b64encode(encoded_point)", "def DecodePublic(curve, bb):\n pk = curve.Decode(bb)\n if pk.is_neutral():\n raise Exception('Invalid public key (neutral point)')\n return pk", "def public_key(self):\n pkn = self._public_key_native[\"public_key\"]\n if self._is_ec:\n return hexify(pkn)\n else:\n return hexify(pkn[\"modulus\"])", "def convert_public_key_to_ecdsa(self, public_key):\n return PublicKey.fromPem('\\n-----BEGIN PUBLIC KEY-----\\n'+public_key+'\\n-----END PUBLIC KEY-----\\n')", "def PublicKey(self) -> _n_9_t_1:", "def public(self) -> PublicKey:", "def test_public_key_ec(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_EC)", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def public_key(sk):\n return encodepoint(scalarmult_base(sk))", "def get_pubkey(self):\n assert(self.valid)\n ret = ECPubKey()\n p = SECP256K1.mul([(SECP256K1_G, self.secret)])\n ret.p = p\n ret.valid = True\n ret.compressed = self.compressed\n return ret", "def import_public_key(self, hex_bytes: str) -> str:\n return self.context.post(\n \"/dsum/public_key\", {\"key\": hex_bytes}, None, \"DSum: failed importing a Curve 25519 public key\")['uid']", "def public_key_to_address(public_key):\n public_key = normalize_public_key(public_key)\n public_key_hash = keccak(decode_hex(public_key)[1:])\n address = to_checksum_address(public_key_hash[12:])\n return address", "def _ecdsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n\n public_key = private_key.public_key()\n serialized = public_key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n\n # The SSH agent format somehow combines the elliptic curve's\n # `x` and `y` values (in `numbers.public_numbers`) into a single\n # `Q` value. I couldn't figure the specifics out exactly, but\n # the format is used exactly the same way int the OpenSSH\n # public key format, so we'll just reuse that one instead.\n\n pk_data = b64decode(serialized.split(None,2)[1])\n content.data.extend(pk_data)\n\n # nist = self._ecdsa_nists[private_key.curve.name]\n # content.write_string('ecdsa-sha2-{}'.format(nist))\n # content.write_string(nist)\n #\n # buffer = bytearray()\n # buffer.extend(b'0x04')\n #\n # x = numbers.public_numbers.x\n # y = numbers.public_numbers.y\n # for number in [x,y]:\n # tmp = WriteMessage()\n # tmp.write_mpint(number)\n # buffer.extend(tmp.data[4:])\n\n content.write_mpint(numbers.private_value)\n return content.data", "def q_hashpubkey(abe, page, chain):\n pubkey = wsgiref.util.shift_path_info(page['env'])\n if pubkey is None:\n return \\\n \"Returns the 160-bit hash of PUBKEY.\\n\" \\\n \"For example, the Bitcoin genesis block's output public key,\" \\\n \" seen in its transaction output scriptPubKey, starts with\\n\" \\\n \"04678afdb0fe..., and its hash is\" \\\n \" 62E907B15CBF27D5425399EBF6F0FB50EBB88F18, corresponding\" \\\n \" to address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa.\\n\" \\\n \"/q/hashpubkey/PUBKEY\\n\"\n try:\n pubkey = pubkey.decode('hex')\n except Exception:\n return 'ERROR: invalid hexadecimal byte string.'\n return util.pubkey_to_hash(pubkey).encode('hex').upper()", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def export_public_key(self):\r\n return self.public_key.encode('hex')", "def ECDH(sk, pk, peer_pk):\n curve = pk.curve\n enc_peer_pk = bytes(peer_pk)\n peer_pk_good = True\n if isinstance(peer_pk, Jq255Curve.Point):\n if not(pk.curve is peer_pk.curve):\n raise Exception('Curve mismatch in ECDH')\n if pk.is_neutral():\n raise Exception('Peek public key is invalid (neutral element)')\n else:\n # We are going to decode the public key bytes. In that mode,\n # failures should trigger the alternate key derivation feature,\n # instead of being reported as exceptions. This implementation\n # is not constant-time, and the exception-catching process below\n # may leak to outsider through timing-based side channels that\n # the received bytes were not a valid public key; in a\n # production-level secure implementation, this side channel\n # should be avoided as well.\n try:\n peer_pk = pk.curve.Decode(enc_peer_pk)\n if peer_pk.is_neutral():\n raise Exception('key is neutral')\n except Exception:\n peer_pk_good = False\n peer_pk = curve.G\n\n # The ECDH core: multiply the peer point by our private key.\n # The shared secret is the _square_ of the w coordinate of the result\n # (a square is used to make ECDH implementable with a ladder\n # algorithm that avoids full decoding of the input point).\n P = peer_pk * sk\n\n # For key generation, we want to use the digest over the concatenation of:\n # - the two public keys;\n # - a byte of value 0x53 (on success) or 0x46 (on failure, because the\n # provided peer key bytes are not the valid encoding of a valid\n # public key);\n # - the shared secret (our own private key on failure).\n # We order the public keys by interpreting them as integers\n # (big-endian convention) so that both parties use the same order\n # (equivalently, the two keys are ordered lexicographically).\n pk1 = bytes(pk)\n ipk1 = int.from_bytes(pk1, byteorder='big')\n pk2 = enc_peer_pk\n ipk2 = int.from_bytes(pk2, byteorder='big')\n if ipk1 > ipk2:\n (pk1, pk2) = (pk2, pk1)\n\n sh = hashlib.blake2s()\n sh.update(pk1)\n sh.update(pk2)\n if peer_pk_good:\n sh.update(b'\\x53')\n sh.update(bytes(P))\n else:\n sh.update(b'\\x46')\n sh.update(bytes(sk))\n return (sh.digest(), peer_pk_good)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save model hyperparameters/metadata to output directory. model_options is an argparse Namespace, and is converted to a dictionary and pickled.
def save_model_options(output_dir, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model_options', '.pkl', training_data, predictor, model_options.model, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(vars(model_options), f)
[ "def export_model(self, model):\n os.mkdir(model)\n classifier_path = os.path.join(model, 'classifier')\n parameters_path = os.path.join(model, 'parameters.pkl')\n parameters = {'label_mapping': self.label_mapping, 'word_regex': self.word_regex, 'tokenize': self.tokenize,\n 'ignored_regexes': self.ignored_regexes, 'stem': self.stem,\n 'tfidf_vectorizer': self.tfidf_vectorizer}\n with open(classifier_path, 'wb') as f:\n dill.dump(self.clf, f)\n with open(parameters_path, 'wb') as f:\n dill.dump(parameters, f)", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')", "def save_model(self):\n self.filename = f\"{self.model_type}.joblib\"\n joblib.dump(self.pipeline, self.filename)", "def save_model(self):\n joblib.dump(self.pipeline, 'trainer_model.joblib')", "def save_model(self, path_dir, charset=None):\n\n model_json = self.model_train.to_json()\n with open(path_dir + \"/model_train.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n model_json = self.model_pred.to_json()\n with open(path_dir + \"/model_pred.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n model_json = self.model_eval.to_json()\n with open(path_dir + \"/model_eval.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n model_json = self.model_init.to_json()\n with open(path_dir + \"/model_init.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n param = {'greedy': self.greedy, 'beam_width': self.beam_width, 'top_paths': self.top_paths, 'charset': self.charset}\n\n output = open(path_dir + \"/model_param.pkl\", 'wb')\n p = pickle.Pickler(output)\n p.dump(param)\n output.close()", "def _dump(\n self,\n model,\n features,\n targets,\n split_generator,\n fit_kwargs=None,\n predict_kwargs=None,\n dump_time_format=\"datetime\",\n ):\n\n if fit_kwargs is None:\n fit_kwargs = {}\n if predict_kwargs is None:\n predict_kwargs = {}\n\n # Detect model type.\n if isinstance(model, Model):\n self.model_type = \"keras\"\n elif isinstance(model, BaseEstimator):\n self.model_type = \"sklearn\"\n else:\n LOGGER.warning(\"Model type not recognised\")\n self.model_type = \"unrecognised\"\n\n # Create subdirectory using UTC time.\n if dump_time_format == \"datetime\":\n self.dump_time = utc_datetime_now()\n elif dump_time_format == \"timestamp\":\n self.dump_time = utc_timestamp_now()\n else:\n raise ValueError(\n \"`dump_time_format` must be either 'datetime' or 'timestamp'\"\n )\n LOGGER.info(\"Creating run directory \" + self.sub_dir.format(self.dump_time))\n os.mkdir(self.sub_dir.format(self.dump_time))\n self.dump_time_format = dump_time_format\n\n LOGGER.info(\"Writing model to disk\")\n if self.model_type == \"keras\":\n # Save Keras model.\n self.model_base = \"model.h5\"\n model.save(\n os.path.join(self.sub_dir.format(self.dump_time), self.model_base)\n )\n else:\n # Save model of other type.\n self.model_base = \"model.pkl\"\n joblib.dump(\n model,\n os.path.join(self.sub_dir.format(self.dump_time), self.model_base),\n )\n\n # Convert arrays to lists for storage in JSON.\n LOGGER.info(\"Writing features and targets to disk\")\n if not isinstance(features, list):\n raise TypeError(\"`features` must be a list of Numpy arrays.\")\n if not isinstance(targets, list):\n raise TypeError(\"`targets` must be a list of Numpy arrays.\")\n features = [x.tolist() for x in features]\n targets = [y.tolist() for y in targets]\n\n # Save features and targets.\n with open(\n os.path.join(self.sub_dir.format(self.dump_time), self.features_base), \"w\"\n ) as f:\n json.dump(features, f)\n with open(\n os.path.join(self.sub_dir.format(self.dump_time), self.targets_base), \"w\"\n ) as f:\n json.dump(targets, f)\n\n # Save instructions for the individual runs.\n for split_id, (i_train, i_test) in enumerate(split_generator):\n\n # Put together run instructions.\n LOGGER.info(\"Writing run instructions to disk (split {})\".format(split_id))\n instructions = {\n \"split_id\": split_id,\n \"model_path\": os.path.join(\n self.sub_dir.format(self.dump_time), self.model_base\n ),\n \"model_type\": self.model_type,\n \"features_path\": os.path.join(\n self.sub_dir.format(self.dump_time), self.features_base\n ),\n \"targets_path\": os.path.join(\n self.sub_dir.format(self.dump_time), self.targets_base\n ),\n \"training_indices\": i_train.tolist(),\n \"test_indices\": i_test.tolist(),\n \"fit_kwargs\": fit_kwargs,\n \"predict_kwargs\": predict_kwargs,\n }\n\n # Create sub-sub-directory using split ID.\n _dirname = self.subsub_dir.format(self.dump_time, split_id)\n os.mkdir(_dirname)\n\n # Save run instructions.\n with open(os.path.join(_dirname, self.in_base), \"w\") as f:\n json.dump(instructions, f)\n self.split_ids.append(split_id)\n\n # Save class in json format.\n with open(\n os.path.join(self.sub_dir.format(self.dump_time), self.validator_base),\n \"w\",\n ) as f:\n json.dump(self, f, cls=JobsCrossValidatorEncoder)", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def store_model(model, filepath):\n\n joblib.dump(model, filepath)", "def save_model(self, fileout):\n\n # we'll make a json blob to save\n model_json = {}\n model_json['params'] = self.net.params[:].tolist()\n model_json['args'] = self._args\n model_json['kwargs'] = self._kwargs\n model_json['training_params'] = self._training_params\n\n # save to the file\n if isinstance(fileout, basestring):\n with open(fileout, 'w') as f:\n json.dump(model_json, f)\n else:\n json.dump(model_json, fileout)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_model_params(self):\n\n assert self.model\n assert self.model_params\n\n # If it's a Keras model, we save not only model_params but the actual\n # architecture of the model, since the code that constructs models from model_params\n # might change over time.\n if self.model_library == 'keras':\n model_JSON = self.model_params\n model_JSON['_keras_model_params'] = json.loads(self.model.to_json())\n model_JSON_str = json.dumps(model_JSON, cls=DatasetEncoder)\n \n timeStr = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\")\n self.model_stamp = \"%s-%s\" % (self.model_params['model_type'], timeStr)\n self.model_path = os.path.join(MODELS_ROOT, \"%s.json\" % self.model_stamp)\n\n assert os.path.isfile(self.model_path) == False\n\n with open(self.model_path, 'w') as model_file:\n model_file.write(model_JSON_str)\n\n return None", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def _save_model(pipeline: Pipeline) -> None:\n joblib.dump(pipeline, MODEL_FILE_NAME)", "def save_model(self, suffix: str = '', unwrap_parallel: bool = True) -> None:\n # TODO: Logging\n model = self.model\n # We do this awkard check because there are too many different\n # parallel wrappers in PyTorch and some of them have changed names\n # in different releases (DataParallel, DistributedDataParallel{,CPU}).\n is_wrapped = (\n hasattr(model, 'module') and\n 'parallel' in str(type(model)).lower() and\n isinstance(model.module, torch.nn.Module)\n )\n if is_wrapped and unwrap_parallel:\n # If a parallel wrapper was used, the only thing we should save\n # is the model.module, which contains the actual model and params.\n # If we saved the wrapped module directly, deserialization would\n # get unnecessarily difficult.\n model = model.module\n\n state_dict_path = os.path.join(self.save_path, f'state_dict{suffix}.pth')\n model_path = os.path.join(self.save_path, f'model{suffix}.pt')\n\n torch.save(model.state_dict(), state_dict_path)\n torch.save(model, model_path)", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model_params(self):\n\n\t\tsave_path = os.path.join(self.config[\"trainer\"][\"save_dir\"],\n\t\t\tself.config[\"experiment_name\"])\n\t\tif not os.path.exists(save_path):\n\t\t\tos.makedirs(save_path)\n\t\ttorch.save(self.model.state_dict(), save_path + \"/\" + self.config[\"trainer\"][\"save_trained_name\"])\n\t\tself.logger.info(\"Model parameters saved\")", "def store_sklearn_model(file_path, model):\n joblib.dump(model, file_path, compress=9)", "def save_model(model, file_path=\"myModel.model\"):\n save_data(model, file_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Charge given price to the card, assuming sufficient card limit Return True if charge was processed;False if charge was denied
def charge(self,price): if price + self._balance> self._limit: return False else: self._balance+=price return True
[ "def charge(self, price):\n if price + self._balance > self._limit: #if charge would exceed limit\n return False #can not accept charge\n else:\n self._balance += price\n return True", "def charge(self, price):\n\n if price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError('Price must be numeric')\n if price + self._balance > self._limit: # if charge would exceed limit\n return False # cannot accept charge\n self._balance += price\n return True", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def price_check(cash, price, shares):\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False", "def buy(self, card):\n if self.buying_power < card.cost or self.num_buys < 1:\n raise InsufficientFundsError(card)\n # try:\n self.game.sell(card)\n # except Exception as e:\n # return False\n self.buying_power -= card.cost\n self.num_buys -= 1\n self.discard.append(card)\n return True", "def __can_recharge(self):\r\n if self.current_service:\r\n if self.account_expiring and self.current_service.is_active:\r\n return True\r\n return False", "def charge_card(card, amount, description='', live_mode=False):\n _initialize_stripe(live_mode=live_mode)\n charge = safe_stripe_call(\n stripe.Charge.create,\n **{\n 'amount' : amount, # amount in cents\n 'currency' : 'usd',\n 'card' : card,\n 'description' : '',\n }\n )\n return charge", "def buy(self, price, amount):\n if (self.balance.asset1 >= price * amount):\n self.balance.asset1 -= price * amount * (1 + self.fees.taker)\n self.balance.asset2 += amount\n return True\n else:\n print(tc.colorize_s(tc.red, \"ERROR: Insuficient Asset1\"))\n return False", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def __float_item_recharge(self):\r\n if not self.current_service:\r\n return False\r\n if not self.current_service.is_active:\r\n return False\r\n if not self.current_service.is_float:\r\n return False\r\n if self.account_expired:\r\n return False\r\n return True", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def check_price(self, price_diff):\n chance = exp(price_diff / self.T)\n\n if price_diff < 0 and not chance > random():\n return True\n \n return False", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def is_charge(self):\n return self._is_charge", "def changeCharge(self, change):\n if((self.charge + change) > 100):\n #Cannot exceed 100%.\n self.charge = 100\n print(\"Car\", self.getID(), \"is now at full charge.\")\n self.state = 1\n else:\n self.charge = self.charge + change;\n print(\"Car\", self.getID(), \"is now at \"+str(self.charge)+\"%\", \"charge.\")\n return self.charge", "def charge_customer(customer, amount):\n stripe.api_key = Config.STRIPE_SECRET_KEY\n\n if not customer.cards:\n return False # This situation is impossible, but anyway\n try:\n charge = stripe.Charge.create(\n amount=int(amount * 100),\n currency='AUD',\n customer=customer.stripe_customer_id,\n source=customer.cards[-1].stripe_card_id,\n description='Payment for donations.'\n )\n except Exception as e:\n print(e.args[0])\n return False\n\n if charge.status == 'succeeded':\n return True\n return False", "def charge(self):\n\n self.charge_level = 100\n print(\"The vehicule is fully charge.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process customer payment that reduces balance
def make_payment(self,amount): self._balance-=amount
[ "def make_payment(self, amount):\n self._balance -= amount #UGH NO RETURN STATEMENT WAS NEEDED", "def make_payment(self, payment):\n self._balance -= payment", "def monthlyProcess(self):\n n=0\n if self.numWithdrawals>4:\n n=self.numWithdrawals-4\n self.balance-=self.carrega\n self.calcMonthlyInterest()\n self.balance-=n\n self.__canviarstatus()\n self.__reinicialitzar()\n self.__guardarcompte()", "def ProcessPayment(self,\n customer,\n merchants,\n num_coins,\n details,\n msg_fn,\n can_overdraft=False,\n merchant_weights=None):\n if num_coins < 0:\n logging.error('ProcessPayment called with negative value: %s, %s -> %s',\n num_coins, customer, merchants)\n return False\n if isinstance(merchants, user_pb2.User):\n merchants = [merchants]\n if merchant_weights is None:\n merchant_weights = [1] * len(merchants)\n total_weight = sum(merchant_weights)\n merchant_weights = [w / total_weight for w in merchant_weights]\n\n amount_paid = 0\n success = True\n for i, (merchant, weight) in enumerate(zip(merchants, merchant_weights)):\n # Ensure we don't overpay due to rounding.\n merchant_amount = min(\n int(round(num_coins * weight)), num_coins - amount_paid)\n # Give the last person the extra coin to compensate for them losing a coin\n # sometimes.\n if i == len(merchants) - 1:\n merchant_amount = num_coins - amount_paid\n if merchant_amount > 0:\n withdrawl_entry = bank_pb2.LedgerEntry(\n details=details, counterparty=merchant)\n withdrawl_entry.create_time.GetCurrentTime()\n deposit_entry = bank_pb2.LedgerEntry(\n details=details,\n counterparty=customer,\n create_time=withdrawl_entry.create_time)\n if (self._Withdraw(customer, merchant_amount, withdrawl_entry, msg_fn,\n can_overdraft) and\n self._Deposit(merchant, merchant_amount, deposit_entry, msg_fn)):\n amount_paid += merchant_amount\n else:\n success = False\n return success", "def make_payment(self, amount):\n self.balance = self.balance + amount", "def withdraw(self, amount):\n self.__balance -= amount\n \n if self.__balance < 0:\n self.__balance -= 5\n self.__fees_paid += 5", "def process_month(self):\n self._balance = self._balance * ((1 + self._apr) ** (1/12))\n if self._count > 10:\n temp = self._count - 10\n self._balance += PredatoryCreditCard.SURCHARGE * temp\n self._count = 0", "def payment(self, request, *args, **kwargs):\n amount = self.get_amount(request, *args, **kwargs)\n return self.decrement_amount(amount)", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def minPayment(balance, annualInterestRate, monthlyPaymentRate):\n months = 1\n while months < 13:\n balance -= balance*monthlyPaymentRate\n balance += balance*(annualInterestRate/12)\n months += 1\n print(\"Remaining balance: \" + str(round(balance, 2)))", "def takeCash(self, amount):\r\n self.cash -= amount", "def deposit(self, amount):\n self.balance += amount", "def OperateAccount(self, user_id, amount_money):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n old_balance = user_data.GetAmountMoney()\n new_balance = int(old_balance) + int(amount_money)\n if new_balance >= 0:\n user_data.SetAmountMoney(new_balance)\n self.db_manager.UpdateData(user_id, user_data.GetAmountMoney())\n return JsonSerializer.SerializeObject(user_data)\n else:\n return \"{\\\"ERROR\\\":\\\"Operation denied insufficient money\\\"}\"", "def calc_balance(balance, monthlyPayment):\n\n for i in range(months):\n unpaidBalance = balance - monthlyPayment\n interestAmount = unpaidBalance * monIntRate\n endingBalance = unpaidBalance + interestAmount\n balance = endingBalance\n return (endingBalance)", "def pay_using_transaction(self, payment_transaction):\n Date = Pool().get('ir.date')\n AccountMove = Pool().get('account.move')\n AccountMoveLine = Pool().get('account.move.line')\n AccountConfiguration = Pool().get('account.configuration')\n\n if Pool.test:\n # Test has rollbacks and in those cases configuration id\n # increases.\n config, = AccountConfiguration.search([])\n else:\n config = AccountConfiguration(1)\n for line in payment_transaction.move.lines:\n if line.reconciliation:\n continue\n\n if line.account == self.account:\n amount_to_pay = abs(self.amount_to_pay)\n if (\n line.credit and\n line.credit > amount_to_pay and\n not line.amount_second_currency\n ):\n # Split the payment line so remaining can be applied\n # to other invoices as well.\n total_payment = line.credit\n move = line.move\n move.state = 'draft'\n move.save()\n line.credit = amount_to_pay\n line.save()\n AccountMoveLine.copy([line], {\n 'move': move,\n 'credit': (total_payment - amount_to_pay)\n })\n AccountMove.post([move])\n self.write(\n [self], {'payment_lines': [('add', [line.id])]}\n )\n if abs(self.amount_to_pay) <= config.write_off_threshold:\n # Reconcile lines to pay and payment lines from transaction.\n # Write-off journal is required to write-off remaining\n # balance.\n amount = Decimal('0.0')\n for line_ in self.lines_to_pay + self.payment_lines:\n amount += line_.debit - line_.credit\n if amount == Decimal('0.0'):\n AccountMoveLine.reconcile(\n self.lines_to_pay + self.payment_lines,\n date=Date.today()\n )\n elif config.write_off_journal:\n AccountMoveLine.reconcile(\n self.lines_to_pay + self.payment_lines,\n journal=config.write_off_journal,\n date=Date.today()\n )\n return line\n raise Exception('Missing account')", "def _compute_payment_difference(self):\n if len(self.voucher_ids) == 0:\n return super(account_payment, self)._compute_payment_difference()\n else:\n if self.voucher_ids[0].voucher_type in ['purchase']:\n self.payment_difference = self.amount - self._compute_total_voucher_amount()\n else:\n self.payment_difference = self._compute_total_voucher_amount() - self.amount", "def new_balance(curr_balance,transaction_type, transaction_amount):\n\n if transaction_type.lower() == 'd':\n curr_balance += transaction_amount\n else:\n curr_balance -= transaction_amount\n\n return curr_balance", "def deposit(self, amount):\n\t\tself.balance += amount", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate random bytes to use as csrf secret
def gen_csrf_secret(): return Random.new().read(csrf_secret_len)
[ "def generate_csrf_token():\n return binascii.b2a_hex(os.urandom(32))", "def gen_secret():\n return urlsafe_b64encode(bytes([random.getrandbits(8) for _ in range(18)])).decode('ascii')", "def gen_csrf_token():\n\n return \"\".join(random.choice(\n string.ascii_uppercase + string.digits) for x in xrange(32))", "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = binascii.hexlify(os.urandom(32)).decode()\n return session['_csrf_token']", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def random_password():\n return sha(urandom(40)).hexdigest()", "def _random_bytes(self):\n return random.bytes(128)", "def generate_csrf_token() -> int:\r\n ...", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def new_secret():\n return \"\".join(\n SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64)\n )", "def generate_forgotten_token():\n return uuid.uuid4().hex", "def get_random_secret_key():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n return get_random_string(50, chars)", "def salt_shaker() -> bytes:\n return randbits(16 * 8).to_bytes(16, \"big\")", "def make_verification_token():\n return get_random_string(length=40)", "def create_temporary_secret():\n return uuid.uuid4().hex", "def generate_token():\n return uuid4()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read csrf secret from session if it exists; otherwise generate it and store in session
def get_csrf_secret(): sess = managers.request_manager.get_request().session() secret = sess.get(csrf_secret_sess_var_name, None) if not secret: secret = gen_csrf_secret() sess[csrf_secret_sess_var_name] = secret return secret
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = binascii.hexlify(os.urandom(32)).decode()\n return session['_csrf_token']", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def inject_csrf_token():\n return {'csrf_token': generate_csrf_token}", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def generate_csrf_token() -> int:\r\n ...", "def generate_csrf_token():\n return binascii.b2a_hex(os.urandom(32))", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def get_csrf_token():\n response = session.get('https://www.udemy.com/join/login-popup')\n match = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n return match.group(1)", "def _get_csrf_cookie(self):\n url = reverse('courseenrollment', kwargs={\n 'course_id': str(self.course.id)\n })\n resp = self.client.get(url, HTTP_REFERER=self.REFERER)\n assert resp.status_code == 200\n assert 'prod-edx-csrftoken' in resp.cookies\n return resp.cookies['prod-edx-csrftoken'].value", "def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf", "def get_token(request):\n return request.app.settings['tangled.app.csrf.token']", "def get_csrf_token(self, request):\n if request.scheme == \"https\":\n # only consult the secure cookie\n bound_cookies = self.cookie_profile_secure.bind(request)\n else:\n # the only accessible cookie is http insecure\n bound_cookies = self.cookie_profile_http.bind(request)\n token = bound_cookies.get_value()\n if not token:\n token = self.new_csrf_token(request)\n return token", "def csrf_token(self):\n return generate_csrf_token()", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate csrf token based on existing/new csrf secret and provided/new salt
def create_csrf_token(salt=''): if not salt: salt = Random.new().read(csrf_salt_len).encode('hex') h = SHA256.new() h.update(get_csrf_secret() + salt) return h.hexdigest() + salt
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def generate_csrf_token():\n return binascii.b2a_hex(os.urandom(32))", "def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = binascii.hexlify(os.urandom(32)).decode()\n return session['_csrf_token']", "def generate_csrf_token() -> int:\r\n ...", "def gen_csrf_token():\n\n return \"\".join(random.choice(\n string.ascii_uppercase + string.digits) for x in xrange(32))", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def inject_csrf_token():\n return {'csrf_token': generate_csrf_token}", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def csrf_token(self):\n return generate_csrf_token()", "def _generate_token_value():\n return secrets.token_urlsafe()", "def generar_token(self):\n\n salt = get_random_string(length=64)\n unique_salt = salt + str(self.id)\n encoded = hashlib.md5(unique_salt.encode('utf-8')).hexdigest()\n\n self.token = encoded", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def generate_forgotten_token():\n return uuid.uuid4().hex", "def make_verification_token():\n return get_random_string(length=40)", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def GenerateCSRFToken(user_id, time):\n precondition.AssertType(user_id, Text)\n precondition.AssertOptionalType(time, int)\n\n time = time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()\n\n secret = config.CONFIG.Get(\"AdminUI.csrf_secret_key\", None)\n if secret is None:\n raise ValueError(\"CSRF secret not available.\")\n digester = hmac.new(secret.encode(\"ascii\"), digestmod=hashlib.sha256)\n digester.update(user_id.encode(\"ascii\"))\n digester.update(CSRF_DELIMITER)\n digester.update(str(time).encode(\"ascii\"))\n digest = digester.digest()\n\n token = base64.urlsafe_b64encode(b\"%s%s%d\" % (digest, CSRF_DELIMITER, time))\n return token.rstrip(b\"=\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify csrf token against csrf secret from the session; if token is not provided it's read from request arguments
def verify_csrf_token(token=''): if not token: token = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, "") if token: token = token[0] if len(token) != 2 * digest_size + 2 * csrf_salt_len: debug('Incorrect csrf token length') raise VDOM_csrf_exception() salt = token[2*digest_size:] if token != create_csrf_token(salt): debug('Incorrect csrf token value') raise VDOM_csrf_exception()
[ "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def csrf_protect():\n if request.method == 'POST':\n token = session.pop('_csrf_token', None)\n if not token or token != request.form.get('_csrf_token'):\n abort(403)", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def test_csrf_token_process(self):\n request = self.request\n self.assertEqual(request.method, 'POST')\n self.assertEqual(request.site.csrf_validation, True) # ensure default is to be enabled\n self.assertIsNotNone(getattr(request.session, 'csrf_token', None))", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def check_form_token():\n if request.method == 'POST':\n token = session.pop('_form_token', None)\n if not token or token != request.form.get('_form_token'):\n redirect(request.url)", "def check_csrf(csrf_token, form_csrf_token):\n if not csrf_token or csrf_token != form_csrf_token:\n abort(403)\n else:\n return True", "def get_token(request):\n return request.app.settings['tangled.app.csrf.token']", "def validate_token(*args, **kwargs):\n auth_token = app.config[\"AUTH_TOKEN\"]\n auth_header_name = app.config[\"AUTH_TOKEN_HEADER_NAME\"]\n # Check HTTP Header\n headers = [unicode(i) for i in request.headers.keys()]\n if (auth_header_name in headers\n and request.headers.get(auth_header_name) == auth_token):\n return func(*args, **kwargs)\n else:\n return \"no access\"", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def trusted(req):\n # Get the CRSF token from the user session.\n session = req.environ.get('rex.session', {})\n session_csrf_token = session.get('_csrf_token')\n # Get the token value from the request.\n request_csrf_token = req.environ.get('HTTP_X_CSRF_TOKEN') or \\\n req.params.get('_csrf_token')\n # Check if the values coincide.\n if not session_csrf_token or not request_csrf_token:\n return False\n is_equal = True\n for ch1, ch2 in itertools.zip_longest(session_csrf_token,\n request_csrf_token):\n is_equal &= (ch1 == ch2)\n return is_equal", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def csrftoken(me):\n return me.s.cookies.get('csrftoken')", "def _assert_csrf_token(form):\n assert \"csrf_token\" in form\n assert form.csrf_token", "def check_xsrf_cookie(self):\r\n token = (self.get_argument(\"_xsrf\", None) or\r\n self.request.headers.get(\"X-Xsrftoken\") or\r\n self.request.headers.get(\"X-Csrftoken\"))\r\n if not token:\r\n raise HTTPError(403, \"'_xsrf' argument missing from POST\")\r\n if self.xsrf_token != token:\r\n raise HTTPError(403, \"XSRF cookie does not match POST argument\")", "def inject_csrf_token():\n return {'csrf_token': generate_csrf_token}", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def get_csrf_token(self, request):\n if request.scheme == \"https\":\n # only consult the secure cookie\n bound_cookies = self.cookie_profile_secure.bind(request)\n else:\n # the only accessible cookie is http insecure\n bound_cookies = self.cookie_profile_http.bind(request)\n token = bound_cookies.get_value()\n if not token:\n token = self.new_csrf_token(request)\n return token" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
list starter arguments that must be applied conditionally based on version
def get_version_specific_arguments(self, version: str): result = [] semversion = semver.VersionInfo.parse(version) # Extended database names were introduced in 3.9.0 if self.supports_extended_names: result += ["--args.all.database.extended-names-databases=true"] # Telemetry was introduced in 3.11.0 if (semversion.major == 3 and semversion.minor >= 11) or (semversion.major > 3): result += ["--all.server.telemetrics-api=false"] # Column cache if ( self.cfg.enterprise and semver.compare(version, "3.9.5") >= 0 and semver.compare(version, "3.10.0") != 0 and semver.compare(version, "3.10.1") != 0 ): result += ["--args.all.arangosearch.columns-cache-limit=10000"] return result
[ "def test_prelim_opts_args(application):\n opts, args = application.parse_preliminary_options(\n ['--foo', '--verbose', 'src', 'setup.py', '--statistics', '--version'])\n\n assert opts.verbose\n assert args == ['--foo', 'src', 'setup.py', '--statistics', '--version']", "def get_requirement_strings(self) -> Tuple[str, ...]:\n return (self.options.version, *self.options.pytest_plugins)", "def _get_optional_args(self, args):\n optional = []\n if args.dry_run:\n optional.append('--dry-run')\n if not args.quiet:\n optional.append('--verbose')\n return optional", "def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):\n raise NotImplementedError", "def _create_flow_args(names, versions):\n flow_arguments = []\n\n for name in names:\n version = versions.get(name)\n\n if not version:\n _logger.warning(\"No version information found for package '%s'\", name)\n continue\n\n flow_arguments.append({\n 'ecosystem': 'maven',\n 'name': name,\n 'recursive_limit': 0,\n 'force': True,\n 'version': version\n })\n\n return flow_arguments", "def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]:\n cfg = config_settings or {}\n if \"tag-date\" in cfg:\n val = strtobool(str(cfg[\"tag-date\"] or \"false\"))\n yield (\"--tag-date\" if val else \"--no-date\")\n if \"tag-build\" in cfg:\n yield from [\"--tag-build\", str(cfg[\"tag-build\"])]", "def _repair_options(version, ks='', cf=None, sequential=True):\n opts = []\n\n # since version 2.2, default is parallel, otherwise it's sequential\n if sequential:\n if version >= '2.2':\n opts += ['-seq']\n else:\n if version < '2.2':\n opts += ['-par']\n\n # test with full repair\n if version >= '2.2':\n opts += ['-full']\n if ks:\n opts += [ks]\n if cf:\n opts += [cf]\n return opts", "def all_args():\n results = []\n\n for name, value in pwndbg.arguments.arguments():\n results.append(\"%4s = %s\" % (name, pwndbg.chain.format(value)))\n\n return results", "def feedstock_args(self):\n build_args = [\"--working_directory\", self.repository]\n\n if self.channels:\n for channel in self.channels:\n build_args += [\"--channels\", channel]\n\n if self.python:\n build_args += [\"--python_versions\", self.python]\n if self.build_type:\n build_args += [\"--build_types\", self.build_type]\n if self.mpi_type:\n build_args += [\"--mpi_types\", self.mpi_type]\n if self.cudatoolkit:\n build_args += [\"--cuda_versions\", self.cudatoolkit]\n\n\n if self.recipe:\n build_args += [\"--recipes\", self.recipe]\n\n return build_args", "def format_run_args(self) -> t.List[str]:\n # args launcher uses\n args = []\n restricted = [\"chdir\", \"h\", \"stdio_stdout\", \"o\", \"stdio_stderr\", \"k\"]\n if self.mpmd or \"erf_input\" in self.run_args.keys():\n restricted.extend(\n [\n \"tasks_per_rs\",\n \"a\",\n \"np\",\n \"p\",\n \"cpu_per_rs\",\n \"c\",\n \"gpu_per_rs\",\n \"g\",\n \"latency_priority\",\n \"l\",\n \"memory_per_rs\",\n \"m\",\n \"nrs\",\n \"n\",\n \"rs_per_host\",\n \"r\",\n \"rs_per_socket\",\n \"K\",\n \"appfile\",\n \"f\",\n \"allocate_only\",\n \"A\",\n \"launch_node_task\",\n \"H\",\n \"use_reservation\",\n \"J\",\n \"use_resources\",\n \"bind\",\n \"b\",\n \"launch_distribution\",\n \"d\",\n ]\n )\n\n for opt, value in self.run_args.items():\n if opt not in restricted:\n short_arg = bool(len(str(opt)) == 1)\n prefix = \"-\" if short_arg else \"--\"\n if not value:\n args += [prefix + opt]\n else:\n if short_arg:\n args += [prefix + opt, str(value)]\n else:\n args += [\"=\".join((prefix + opt, str(value)))]\n return args", "def get_flagged_args():\n expected = ['os_type', 'os_version']\n arguments = {}\n try:\n opts, adds = \\\n getopt.getopt(sys.argv, '', map(lambda x: x + \"=\", expected))\n except getopt.GetoptError as Error:\n print(str(Error))\n print(\"Defaulting to standard run...\")\n return arguments\n for o, a in opts:\n opt = re.sub('^-+', '', o)\n if opt in expected:\n arguments[opt] = a\n if arguments:\n if 'os_type' not in arguments:\n print(\"Unsupported means of operation!\")\n print(\"You can either specify both os_type and os_version \" +\n \"or just os_type\")\n arguments = {}\n return arguments", "def _test_argv(self, verbose, extra_argv):\r\n #self.package_path = os.path.abspath(self.package_path)\r\n argv = [__file__, self.package_path]\r\n argv += ['--verbosity', str(verbose)]\r\n if extra_argv:\r\n argv += extra_argv\r\n return argv", "def GetMissingArguments(self):\n return []", "def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional", "def extract_args(config_file):", "def test_format_args():\n assert format_args({}) == []\n assert format_args({'foo': 'bar'}) == ['--foo=bar']\n assert format_args({'check': True}) == ['--check']\n assert format_args({'no-check': True}) == ['--no-check']\n assert format_args({'no_check': True}) == ['--no-check']\n assert format_args({'list': ['foo', 'bar', 123]}) ==\\\n ['--list=foo,bar,123']", "def _get_arguments(self, rargs):\r\n\r\n args = []\r\n i = 0\r\n count = len(rargs)\r\n while i < count and not self._is_opt(rargs[i]):\r\n args.append(rargs[i])\r\n i += 1\r\n\r\n return args", "def get_starting_revision_argument() -> Union[str, Tuple[str, ...], None]:", "def args(pytestconfig: \"Config\") -> List[str]:\n args: list = pytestconfig.getoption(\"--args\")\n if args:\n return [\"--\" + \"=\".join(arg_list) for arg_list in args]\n else:\n return list()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the list of dbservers managed by this starter
def get_dbservers(self): ret = [] for i in self.all_instances: if i.is_dbserver(): ret.append(i) return ret
[ "def get_servers_list(self):\n return self.nova_client.servers.list()", "def get_servers(self):\n\t\treturn self.__servers", "def get_all_servers(self) -> List[Server]:", "def get_all_servers(self) -> List[Server]:\n pass", "def servers(self):\n return self._servers", "def bootstrap_servers(self):\n return self._bootstrap_servers", "def databases(self):\n return self._databases", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def discover_servers():\n servers = set()\n for p in glob.glob1(SPDK_SERVER_APP_DIR, \"*\"):\n m = SERVERS_PATTERN.match(p)\n if m:\n servers.add(m.group())\n return list(servers)", "def get_databases ():\n return _dbobjects[:]", "def list_databases(self):\n return self.get(endpoint(self.url, '/_all_dbs')).json()", "def test_list_servers(self):\n pass", "def getAPIServerList(self):\n apiservers = []\n for host in conf.config['api_host_options'].keys():\n apiurl = \"%s://%s\" % (conf.config['scheme'], host)\n return apiservers", "def get_managed_servers():\n for wls in WeblogicServer.objects.all().iterator():\n proxy = wls.environment.api_proxy\n host = wls.connect_to()\n port = wls.adminserver_port\n user = wls.adminserver_user\n pwd = wls.adminserver_pass\n headers = {'Accept': 'application/json'}\n\n if wls.adminserver_ssl:\n scheme = 'https'\n else:\n scheme = 'http'\n\n ckey = 'weblogic:%s:managedservers' % host\n\n uri = '%s/weblogic/%s/%s/%d/management/tenant-monitoring/servers' % (proxy, scheme, host, port)\n\n # Getting info and updating the cache can be an asynchronous operation\n get_weblogic_api.apply_async(args=(ckey, user, pwd, headers, uri))", "def databases():\n return execute_and_fetch(_SHOW_DATABASES)", "def get_databases(instance):\r\n databases = []\r\n steplog.info('Getting list of databases.')\r\n cmd = '. ~%s/sqllib/db2profile ; cd / ; db2 list database directory | grep \"Database name\"' % instance\r\n out, err, retCode = db2tools.run_instance_command(instance, cmd)\r\n steplog.debug(out)\r\n if out:\r\n for line in out.splitlines():\r\n if line.strip().startswith('Database name'):\r\n database_name = line.split('=')[1].strip()\r\n databases.append(database_name)\r\n return databases", "def listServers():\n servers = AdminControl.queryNames( 'type=Perf,*').split(\"\\n\")\n for i in range(0, len(servers)):\n srvName = servers[i].split(\",\")[1].split(\"=\")[1]\n if srvName == \"nodeagent\":\n continue\n print \"Server: \" + srvName\n perfStr = AdminControl.queryNames( 'type=Perf,process=' + srvName +',*')\n perfObj = AdminControl.makeObjectName( perfStr)\n srvrStr = AdminControl.queryNames( 'type=Server,process=' + srvName +',*')\n srvrObj = AdminControl.makeObjectName( srvrStr)\n stats = AdminControl.invoke_jmx( perfObj, 'getStatsObject', [ srvrObj, java.lang.Boolean('true')], ['javax.management.ObjectName', 'java.lang.Boolean'])\n for driver in stats.getStats('connectionPoolModule').subCollections():\n print \"\\tDriver Name: \" + driver.getName()\n for datasource in stats.getStats('connectionPoolModule').getStats(driver.getName()).subCollections():\n print \"\\t\\tDatasource: \" + datasource.getName()\n\t\t\t\tprint quetal(srvName, driver.getName(), datasource.getName())", "def authoritative_servers(self):\n return list(self.masters.all()) + list(self.slaves.all())", "def get_server_list(environment):\n vault_client = get_vault_client()\n proxy_databag = vault_client.read(\"secret/databags/nmdproxy/upstream\")['data']\n if environment=='production':\n server_list = proxy_databag[environment]['webcluster01']['servers']\n elif environment==\"staging\":\n server_list = proxy_databag[environment]['web01']['servers']\n else:\n raise Exception(\"Unrecognized environment of '{environment}. Available options are 'production' and 'development'\".format(environment=environment))\n return None\n return server_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the list of agents managed by this starter
def get_agents(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.AGENT: ret.append(i) return ret
[ "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def items(self):\n return self.agents.items()", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def agents(self):\n return AgentManager(session=self._session)", "def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]", "def support_agent_list(self):\n return self._support_agent_list", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def oc(self, stimulusID):\r\n global stimulusAPI\r\n try:\r\n pageList = stimulusAPI.getStimulusScope(stimulusID)\r\n agentSet = set([])\r\n for page in pageList:\r\n localAgentList = stimulusAPI.getAllAgentsWithViewOfSpecifiedPage(page)\r\n localAgentSet = set(localAgentList)\r\n agentSet.update(localAgentSet)\r\n agentList = list(agentSet)\r\n return agentList\r\n except Exceptions.InvalidStimulusProcessingType as e:\r\n raise e\r\n except Exceptions.ScriptError as e:\r\n raise e\r\n #self.execute(stimulusID)\r\n except Exception as e:\r\n raise Exceptions.ScriptError(e)", "def agent_id_list(self):\n return self._agent_id_list", "def list_agents(self, platform_uuid):\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')", "def Observe(self):\n res = []\n for a in self.agents:\n res.append(a.Observe())\n\n return res", "def test_get_agents_names(self):\n pass", "def get_agents_from_environment(env):\n result = []\n baseResources = json.loads(udcli(\"getEnvironmentBaseResources -application '%s' -environment '%s'\" % (myapplication, env)))\n for resource in baseResources:\n res = json.loads(udcli(\"getResource -resource '%s'\" % resource['id']))\n result = list(set(result + get_agents_from_resource(res)))\n return result", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def agent_arns(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"agent_arns\")", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_active_agents(self):\n return [not self.killed[i] for i in range(self.num_agents)]", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def list_agents(ctx, mac):\n url = \"%s/agents?mac=%s\" % (ctx[\"api_host\"], mac)\n headers = _get_request_headers(ctx)\n try:\n resp = requests.get(url, headers=headers, timeout=API_TIMEOUT)\n resp.raise_for_status()\n return resp.json()\n except Exception as exception:\n logger.error(\"request failed: %s\" % url)\n raise exception" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the first frontendhost of this starter
def get_frontend(self): servers = self.get_frontends() assert servers, "starter: don't have instances!" return servers[0]
[ "def getFrontend(self):\n return self.header['FRONTEND']", "def GetHost():\r\n assert _server_environment is not None\r\n return _server_environment._staging_host if _server_environment._is_staging else _server_environment._prod_host", "def syshost():\n hostA = platform.node().split('.')\n idx = 1 \n if (len(hostA) < 2):\n idx = 0\n return hostA[idx]", "def nscaweb_host(self):\n return self.__get_option('nscaweb_host')", "def get_host(self):\n if not self.host_id:\n return None\n return self.system.hosts.get_by_id_lazy(self.host_id)", "def get_homepage(resource):\n return resource.playlist.consumer_site.domain", "def webpack_dev_server_host(request):\n return settings.WEBPACK_DEV_SERVER_HOST or request.get_host().split(\":\")[0]", "def get_host(request):\n return request.META[\"HTTP_HOST\"].split(\":\")[0]", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def getHostHead(self):\n return self.host_head", "def home(environ, start_response):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n http_host = 'frontpage.' + http_host\n return serve_space(environ, start_response, http_host)", "def getWavefrontHost(self):\n if not self.config:\n self._getConfig()\n return self.config.get(BaseCommand.wavefrontHostKey)", "def GetRedirectHost():\r\n assert _server_environment is not None\r\n return _server_environment._prod_host if _server_environment._is_staging else _server_environment._staging_host", "def _host(self):\n assert len(self) == 1, AssertionError('Found multiple hosts in HostContainer')\n return self._all[0]", "def master_host(self) -> str:\n raise NotImplementedError", "def get_host(req):\n return req.META[\"HTTP_HOST\"].split(\":\")[0]", "def wagtail_site():\n return Site.objects.get(is_default_site=True)", "def get_backend():\n global _ACTIVE_BACKEND\n if not _ACTIVE_BACKEND:\n _ACTIVE_BACKEND = locate(settings.SITE_BACKEND)()\n return _ACTIVE_BACKEND" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the first dbserver of this starter
def get_dbserver(self): servers = self.get_dbservers() assert servers, "starter: don't have instances!" return servers[0]
[ "def get_stored_primary_server_name(db):\n if \"last_primary_server\" in db.collection_names():\n stored_primary_server = db.last_primary_server.find_one()[\"server\"]\n else:\n stored_primary_server = None\n\n return stored_primary_server", "def find_dbserver(self):\n if self.dbserver_id is not None:\n return ItopapiPrototype.get_itop_class('DBServer').find(self.dbserver_id)\n return None", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_server(self) -> str:\n return self.server_selector.get_master_server()", "def current_server():\n if not _current_server:\n create_server()\n return _current_server", "def random_server(self):\n if not self.servers:\n return None\n return random.choice(self.servers)", "def get_server(index=-1):\n #returns a random server\n keys=__servers.keys()\n if index<0 or index>len(keys):key=random.choice(keys)\n else:key=keys[index]\n return (key,__servers[key])", "def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))", "def get_primary_db(force_new=False):\n defaults = get_defaults()\n if 'primary' in defaults.keys():\n primary_host = defaults['primary']\n else:\n raise IndraDatabaseError(\"No primary host available in defaults file.\")\n\n global __PRIMARY_DB\n if __PRIMARY_DB is None or force_new:\n __PRIMARY_DB = DatabaseManager(primary_host, label='primary')\n __PRIMARY_DB.grab_session()\n return __PRIMARY_DB", "def get_server(self, server):\n return self._get(_server.Server, server)", "def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()", "def get_db():\n client = pymongo.MongoClient('localhost', 27017)\n return client.haploqa", "def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def select_db(cli, dbname):\n db = cli[dbname]\n return db", "def get_server(self, name):\r\n\r\n if name not in self.servers_and_loggers:\r\n self._new_server_and_logger(name)\r\n return self.servers_and_loggers[name][0]", "def server(self):\n\t\treturn self._server", "def get_server(self, id):\n\n\t\tfor server in self.__servers:\n\t\t\tif id == server.get_id():\n\t\t\t\treturn server\n\t\t\n\t\traise ServerNotDefinedError(id)", "def get_server(self, name):\n servers_list = self.get_servers_list()\n server_exists = False\n for s in servers_list:\n if s.name == name:\n logger.debug(\"Instance '%s' exists\", name)\n server_exists = True\n break\n if not server_exists:\n return None\n else:\n return s", "def get_server(self, id):\n\t\treturn self.__servers.get_server(id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the first agent of this starter
def get_agent(self): servers = self.get_agents() assert servers, "starter: have no instances!" return servers[0]
[ "def agent(self):\n value = self._data.get('agent', None)\n if value is not None:\n data = {\n 'response': value,\n }\n return self.__class_agent__(self._api, data)", "def getAgent(self):\n return Agent(name = self.__str__(), strategy = (lambda i,s : self.getAction(i, s)))", "def agent(self) -> Entity:\n return self.__agent", "def getfirstbot(self):\n\n return self.bots[0]", "def get_agent(self, name):\n self._lock.acquire()\n try:\n agent = self._agent_map.get(name)\n finally:\n self._lock.release()\n return agent", "def _get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)", "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def get_effective_agent(self):\n raise Unimplemented()", "def _get_solver_agent(self):\n # Determine selectable agent(s)\n sctx = self.context.solver\n\n alist = sctx.agent\n if alist is None:\n # Return empty solver agent\n return CpoSolverAgent(self, sctx.params, sctx)\n elif not (is_string(alist) or is_array(alist)):\n raise CpoException(\"Agent identifier in config.context.solver.agent should be a string or a list of strings.\")\n\n # Create agent\n if is_string(alist):\n aname = alist\n agent = self._create_solver_agent(alist)\n else:\n # Search first available agent in the list\n agent = None\n aname = None\n errors = []\n for aname in alist:\n try:\n agent = self._create_solver_agent(aname)\n break\n except Exception as e:\n errors.append((aname, str(e)))\n # Agent not found\n errstr = ', '.join(a + \": \" + str(e) for (a, e) in errors)\n raise CpoException(\"Agent creation error: \" + errstr)\n\n # Log solver agent\n sctx.log(1, \"Solve model '\", self.model.get_name(), \"' with agent '\", aname, \"'\")\n agent.process_infos[CpoProcessInfos.SOLVER_AGENT] = aname\n return agent", "def agent():\n return DummyAgent()", "def get_control_agent():", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def get_effective_agent(self):\n if self._proxy is not None and self._proxy.has_authentication():\n return self._proxy.get_authentication().get_agent()\n elif self._proxy is not None and self._proxy.has_effective_agent():\n return Agent(identifier=self._proxy.get_effective_agent_id().get_identifier(),\n namespace=self._proxy.get_effective_agent_id().get_namespace(),\n authority=self._proxy.get_effective_agent_id().get_authority())\n else:\n return Agent(identifier='MC3GUE$T@MIT.EDU',\n namespace='osid.agent.Agent',\n authority='MIT-OEIT')", "def _deads_step_first(self) -> AgentID:\n _deads_order = [\n agent\n for agent in self.agents\n if (self.terminations[agent] or self.truncations[agent])\n ]\n if _deads_order:\n self._skip_agent_selection = self.agent_selection\n self.agent_selection = _deads_order[0]\n return self.agent_selection", "def agent_class(self):\r\n return self._agent_class", "def getFirstWorker(self):\n return self.entries[0]", "def get_agent(bot: Text) -> Agent:\n if not AgentProcessor.cache_provider.is_exists(bot):\n AgentProcessor.reload(bot)\n return AgentProcessor.cache_provider.get(bot)", "def getEarliest():\n return _bySequence[0]", "def get_first_incident_node(self):\n return self.first_incident_node # return the first incident node" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }