query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
This API is used to query the list of image sprite generating templates and supports paged queries by filters.
def DescribeImageSpriteTemplates(self, request): try: params = request._serialize() headers = request.headers body = self.call("DescribeImageSpriteTemplates", params, headers=headers) response = json.loads(body) model = models.DescribeImageSpriteTemplatesResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def get_trainingimages(self, request, template='trainingimages.html'):\n query_id = request.GET.get('qsid', None)\n if query_id == None:\n raise Http404(\"Query ID not specified. Query does not exist\")\n\n # get query definition dict from query_ses_id\n query = self.visor_controller.query_key_cache.get_query_details(query_id)\n\n # check that the query is still valid (not expired)\n if query == None:\n message = 'This query has expired. Please enter your query again in the home page.'\n redirect_to = settings.SITE_PREFIX\n return render_to_response(\"alert_and_redirect.html\", context={'REDIRECT_TO': redirect_to, 'MESSAGE': message})\n\n viewmode = request.GET.get('view', None)\n page = request.GET.get('page', None)\n if page:\n page = int(page)\n\n # get list of training images for the query\n trainimgs = self.visor_controller.interface.get_training_images(query, user_ses_id=request.session.session_key)\n\n # dsetname and page variables used to allow the user to return\n # to calling results page\n if query['qtype'] == opts.Qtypes.dsetimage:\n sa_thumbs = 'thumbnails/%s/' % query['dsetname']\n elif query['qtype'] == opts.Qtypes.refine:\n sa_thumbs = 'postrainimgs/'\n elif query['qtype'] == opts.Qtypes.text:\n sa_thumbs = 'postrainimgs/'\n elif query['qtype'] == opts.Qtypes.image:\n sa_thumbs = 'uploadedimgs/'\n\n # No matter the query type, check if it is a curated query\n if query['qdef'][0] == '#':\n sa_thumbs = 'curatedtrainimgs/'\n\n # compute home location taking into account any possible redirections\n home_location = settings.SITE_PREFIX + '/'\n if 'HTTP_X_FORWARDED_HOST' in request.META:\n home_location = 'http://' + request.META['HTTP_X_FORWARDED_HOST'] + home_location\n\n # prepare the details of every image in the list, for rendering the page\n if trainimgs:\n for image in trainimgs:\n image_path = image['image']\n anno = str(image['anno'])\n if anno == '1' or anno == '+1':\n anno_style = 'roi_box_positive'\n anno = '1'\n elif anno == '-1':\n anno_style = 'roi_box_negative'\n else:\n anno_style = 'roi_box_skip'\n image['anno'] = anno\n image['anno_style'] = anno_style\n\n # if case of these two types of search, be sure to add the\n # thumbs suffix, so that a thumbnail image is correctly downloaded\n # if a search is launched from this page\n image_path = image_path.replace('#', '%23') # html-encode curated search character\n if (query['qtype'] == opts.Qtypes.refine or\n query['qtype'] == opts.Qtypes.text):\n img_id = sa_thumbs + image_path\n else:\n img_id = image_path\n\n if 'roi' in image:\n img_id = img_id + ',roi:'\n idx = 0\n roi = image['roi']\n for coord in roi:\n if idx == 0:\n img_id = img_id + '%0.2f' % (coord)\n else:\n img_id = img_id + '_%0.2f' % (coord)\n idx = idx + 1\n image['img_id'] = img_id\n image['image'] = urllib.parse.quote(image['image'])\n\n\n # set up rendering context and render the page\n context = {\n 'QUERY_ID': query_id,\n 'DATASET_NAME': query['dsetname'],\n 'QUERY_TYPE': query['qtype'],\n 'TRAINIMGS' : trainimgs,\n 'PAGE': page,\n 'SA_THUMBS' : sa_thumbs,\n 'ENGINE': request.session['engine']\n }\n return render_to_response(template, context)", "def create_sprite_image(images):\n if isinstance(images, list):\n images = np.array(images)\n img_h = images.shape[1]\n img_w = images.shape[2]\n n_plots = int(np.ceil(np.sqrt(images.shape[0]))) \n if len(images.shape) > 3:\n spriteimage = np.ones(\n (img_h * n_plots, img_w * n_plots, images.shape[3]))\n else:\n spriteimage = np.ones((img_h * n_plots, img_w * n_plots))\n four_dims = len(spriteimage.shape) == 4\n for i in range(n_plots):\n for j in range(n_plots):\n this_filter = i * n_plots + j\n if this_filter < images.shape[0]:\n this_img = images[this_filter]\n if four_dims:\n spriteimage[i * img_h:(i + 1) * img_h,\n j * img_w:(j + 1) * img_w, :] = this_img\n else:\n spriteimage[i * img_h:(i + 1) * img_h,\n j * img_w:(j + 1) * img_w] = this_img\n return spriteimage", "def create_sprite_image(images):\n if isinstance(images, list):\n images = np.array(images)\n img_h = images.shape[1]\n img_w = images.shape[2]\n # sprite图像可以理解成是小图片平成的大正方形矩阵,大正方形矩阵中的每一个元素就是原来的小图片。于是这个正方形的边长就是sqrt(n),其中n为小图片的数量。\n n_plots = int(np.ceil(np.sqrt(images.shape[0])))\n\n # 使用全1来初始化最终的大图片。\n spriteimage = np.ones((img_h*n_plots, img_w*n_plots))\n\n for i in range(n_plots):\n for j in range(n_plots):\n # 计算当前图片的编号\n this_filter = i*n_plots + j\n if this_filter < images.shape[0]:\n # 将当前小图片的内容复制到最终的sprite图像\n this_img = images[this_filter]\n spriteimage[i*img_h:(i + 1)*img_h,\n j*img_w:(j + 1)*img_w] = this_img\n\n return spriteimage", "def generate_images_pred(self, inputs, outputs):\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n # without interpolate\n if self.opt.using_v not in [3,4]:\n disp = F.interpolate(\n disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)#disp_to_depth function is in layers.py\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.opt.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n if self.opt.pose_model_type == \"posecnn\":\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n if not self.opt.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def generate_images_pred(self, inputs, outputs):\n assert outputs[(\"disp\", 0)].shape[-2:] == (\n self.height, self.width), f'{outputs[(\"disp\", 0)].shape[-2:]} should be {(self.height, self.width)} '\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n if frame_id == \"s\":\n T = inputs[\"stereo_T\"]\n else:\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\",\n align_corners=True)\n\n if not self.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def index():\n cursor = get_db().cursor(dictionary=True)\n\n cursor.execute(\n 'SELECT p.id as id, name, created, user_id, username'\n ' FROM images p JOIN users u ON p.user_id = u.id'\n ' WHERE p.user_id = %s '\n ' ORDER BY created DESC', (g.user['id'],)\n )\n\n images = cursor.fetchall()\n\n return render_template('image/index.html', images=images)", "def get_sprites(imag, ctrs, debug=False):\n\n # We make sure that we work on a local copy of the image\n imag = imag.copy()\n\n # We loop through the sprites \n sprts = []\n\n for contour in ctrs:\n\n # We compute the projective transform\n\n destination_points = np.array(\n [\n [28, 28],\n [0, 28],\n [0, 0],\n [28, 0]\n ]\n )\n\n tform = tf.estimate_transform('similarity', contour, destination_points)\n\n\n # We transform the image\n\n warped = tf.warp(imag, inverse_map=tform.inverse)[:28, :28]\n\n if debug:\n _, axis = plt.subplots(nrows=2, figsize=(8, 3))\n axis[0].imshow(imag)\n axis[0].plot(destination_points[:, 0], destination_points[:, 1], '.r')\n axis[1].imshow(warped)\n plt.show()\n\n sprts.append(warped)\n\n return sprts", "def iter_templates(self):\n for page in self.iter_templates_pages():\n results = page.json()['results']\n for item in results:\n yield item", "def generate_images(self, n, *args):\n pass", "def display_images(cls):\n cls.objects.all()", "def get_many(\n self,\n offset: int = 0,\n limit: int = 10,\n image_origin: Optional[ResourceOrigin] = None,\n categories: Optional[list[ImageCategory]] = None,\n is_intermediate: Optional[bool] = None,\n board_id: Optional[str] = None,\n ) -> OffsetPaginatedResults[ImageDTO]:\n pass", "def pictures():\n\n return render_template(\"pictures.html\")", "def par_template_match(self, list_image_file, template_file, draw_images=False, image_num=None,\n normalised_coords=True, threshold=0.99):\n\n import _thread as thread\n\n def unwrap_fun(image_file, template_file):\n obj = YoloTools()\n return obj.template_match(image_file, template_file, draw_images=draw_images, image_num=None,\n normalised_coords=normalised_coords, threshold=threshold)\n\n t = 0\n for image_file in list_image_file:\n print('Thread :: {}'.format(t))\n x, y, w, h = thread.start_new_thread(unwrap_fun, (image_file, template_file,))\n t += 1\n\n return x, y, w, h", "def textures(self, water=None, radii=[5], offsets=[1],\n kinds=['simple', 'advanced', 'higher']):\n polarisations = ['vv']\n if 'vh' in self.available:\n polarisations.append('vh')\n\n for polar in polarisations:\n\n src_array = getattr(self, polar)\n src_array = histogram_cutting(src_array, percent=2, mask=~water)\n src_array = rescale_to_uint8(src_array)\n\n src_profile = self.profile.copy()\n src_profile.update(dtype=src_array.dtype.name)\n\n with TemporaryDirectory() as tmp_dir:\n src_raster = os.path.join(tmp_dir, 'src_raster.tif')\n with rasterio.open(src_raster, 'w', **src_profile) as dst:\n dst.write(src_array, 1)\n\n for radius, offset, kind in product(radii, offsets, kinds):\n fname = '{polar}_{kind}_{size}x{size}_{offset}.tif'.format(\n polar=polar, kind=kind, size=radius*2+1, offset=offset)\n dst_raster = os.path.join(self.dir, fname)\n compute_textures(\n src_raster, dst_raster, kind, radius, offset)\n\n return True", "def getimages(ra,dec,size=1200,filters=\"grizy\"):\n \n service = \"https://ps1images.stsci.edu/cgi-bin/ps1filenames.py\"\n url = (\"{service}?ra={ra}&dec={dec}&size={size}&format=fits\"\n \"&filters={filters}\").format(**locals())\n table = Table.read(url, format='ascii')\n return table", "def make_image_list(self):\n return [\n tools.get_image(48, 0, 16, 16, self.sprite_sheet),\n tools.get_image(0, 0, 22, 16, setup.GFX['sword2'])\n ]", "def get_png_pages(self, stylesheets=None, resolution=None,\n _with_pages=False):\n document = self._get_document(stylesheets, enable_hinting=True)\n return document.get_png_pages(resolution, _with_pages)", "def xmlrpc_get_images(self, info, owner_userid, glob, limit, offset):\n\t\treturn self.get_images(owner_userid, info['userid'], glob, limit, offset)", "def get_images(self):\n pass", "def preprocess_sprites(sprts, debug=False):\n\n out_sprites = []\n\n for imag in sprts:\n\n # We make a local copy\n imag = imag.copy()\n\n # We rescale, inverse and normalize.\n\n imag = 1.0 - imag\n imag = imag - imag.mean()\n imag = imag/imag.std()\n\n if debug:\n plt.imshow(imag)\n plt.title(\"Pre-processed sprites\")\n plt.colorbar()\n plt.show()\n\n out_sprites.append(imag)\n\n return out_sprites" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to query the list of VOD domain names.
def DescribeVodDomains(self, request): try: params = request._serialize() headers = request.headers body = self.call("DescribeVodDomains", params, headers=headers) response = json.loads(body) model = models.DescribeVodDomainsResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def list_domains(self):\n r = self.make_call('execute/DomainInfo/list_domains')\n if r is None:\n return None\n return r['data']", "def get_nameservers(cli):\n for domain in cli.args.domain:\n attrs = {\n 'domain': domain,\n 'type': 'nameservers',\n }\n response = cli.opensrs.post(\"get\", \"domain\", attrs)\n try:\n nameservers_dict_list = response['attributes']['nameserver_list']\n nameserver_list = map(\n lambda d: d['name'],\n nameservers_dict_list\n )\n print '%s:\\n%s' % (domain, '\\n'.join(nameserver_list))\n except KeyError:\n print '%s: %s' % (domain, response)\n print", "def list_domains(self):\n spacer = \" \" * 34\n print ('domain%stype' % spacer)\n print ('-' * 47)\n self._query('SELECT * FROM domains ORDER BY name,type')\n for row in self.cursor.fetchall():\n print ('%s%s') % (row[1].ljust(40), row[4])\n print ('')", "def _find_domain_info(self, domain_name):\n j = self.dc.post(\"/Domain.List\").json()\n self._check_response(j)\n for domain_obj in j[\"domains\"]:\n if domain_name.endswith(domain_obj[\"name\"]):\n return DomainInfo(domain_obj[\"id\"], domain_obj[\"name\"])", "def listDomainsID(self):\n ret = libvirtmod.virConnectListDomainsID(self._o)\n if ret is None: raise libvirtError ('virConnectListDomainsID() failed', conn=self)\n return ret", "def get_dn_list(self, context):\n if context.upper() == \"DOMAIN\":\n search_base = self.con.base_dn\n elif context.upper() == \"CONFIGURATION\":\n search_base = self.con.config_dn\n elif context.upper() == \"SCHEMA\":\n search_base = self.con.schema_dn\n elif context.upper() == \"DNSDOMAIN\":\n search_base = \"DC=DomainDnsZones,%s\" % self.con.base_dn\n elif context.upper() == \"DNSFOREST\":\n search_base = \"DC=ForestDnsZones,%s\" % self.con.root_dn\n\n dn_list = []\n if not self.search_base:\n self.search_base = search_base\n self.search_scope = self.search_scope.upper()\n if self.search_scope == \"SUB\":\n self.search_scope = SCOPE_SUBTREE\n elif self.search_scope == \"BASE\":\n self.search_scope = SCOPE_BASE\n elif self.search_scope == \"ONE\":\n self.search_scope = SCOPE_ONELEVEL\n else:\n raise StandardError(\"Wrong 'scope' given. Choose from: SUB, ONE, BASE\")\n try:\n res = self.con.ldb.search(base=self.search_base, scope=self.search_scope, attrs=[\"dn\"])\n except LdbError, (enum, estr):\n self.outf.write(\"Failed search of base=%s\\n\" % self.search_base)\n raise\n for x in res:\n dn_list.append(x[\"dn\"].get_linearized())\n #\n global summary\n #\n return dn_list", "def dns_names():\n return (\n s.lists(dns_labels(), min_size=1, max_size=10)\n .map(u'.'.join))", "def get_domains_command(client: Client, args: Dict) -> CommandResults:\n domain_id = arg_to_number(args.get('domain_id', None))\n limit = arg_to_number(args.get('limit', DEFAULT_LIMIT)) or DEFAULT_LIMIT\n page = arg_to_number(args.get('page'))\n page_size = arg_to_number(args.get('page_size'))\n if (page and not page_size) or (not page and page_size):\n raise Exception('Please provide both page and page_size arguments.')\n\n response = client.get_domains_request(domain_id)\n results = response.get('DomainDescriptor', {})\n contents = []\n if domain_id is not None:\n title = f'Domain no.{domain_id}'\n results = {\n 'ID': results.get('id'),\n 'Name': results.get('name'),\n 'childdomains': results.get('childdomains')\n }\n contents = [{\n 'ID': results.get('ID'),\n 'Name': results.get('Name')\n }]\n else:\n title = 'List of Domains'\n children = [results]\n h_r_get_domains(children, contents)\n contents = pagination(contents, limit, page, page_size)\n readable_outputs = tableToMarkdown(\n name=title,\n t=contents,\n removeNull=True\n )\n return CommandResults(\n readable_output=readable_outputs,\n outputs_prefix='NSM.Domains',\n outputs=results,\n raw_response=results,\n outputs_key_field='ID'\n )", "def show_dns_entries(domain_service, domain_name):\n try:\n dns_entries = domain_service.get_info(domain_name).dnsEntries\n except WebFault as err:\n print(err)\n sys.exit(1)\n print(dns_entries)", "def domains(ctx: click.Context):\n manifest = ctx.obj[\"manifest\"]\n logger.info(\"Listing domains from manifest at %s\", manifest)\n for domain in mobilesync.iter_domains(manifest):\n print(domain)", "def get_lns_names(self, domain=None, inv=False) -> List[Info]:\n if domain is None:\n domain = self.v_by_addr if not inv else set()\n ret = []\n seen = set()\n with self.lock:\n if inv:\n domain = set(self.v_by_addr) - set(domain)\n for addr in domain:\n infos = self.v_by_addr.get(addr, set())\n for info in infos:\n if info and info not in seen:\n seen.add(info)\n ret.append(info)\n\n return ret", "def __query_all_pd_names(self):\n cerebro_client = CerebroInterfaceTool()\n pd_names = set()\n arg = ListProtectionDomainsArg()\n ret = cerebro_client.list_protection_domains(arg)\n for pd_name in ret.protection_domain_name:\n pd_names.add(PD(pd_name, False))\n\n # Get the name of the System PD\n system_pd_arg = ListProtectionDomainsArg()\n system_pd_arg.list_system_pd = True\n ret = cerebro_client.list_protection_domains(system_pd_arg)\n for pd_name in ret.protection_domain_name:\n pd_names.add(PD(pd_name, True))\n\n return pd_names", "def dga_domain_status(client: Client, args: dict) -> CommandResults:\n domains = argToList(str(args.get('domains')))\n\n response = client.domain_request(domains)\n\n domains_data = response.get('data', {})\n outputs = []\n for domain in domains:\n output = {\n 'domain': domain,\n 'malware_family': domains_data.get(domain, {}).get('malware_family'),\n 'probability': domains_data.get(domain, {}).get('probability')\n }\n outputs.append(output)\n return CommandResults(\n outputs_prefix='AnomaliEnterprise.DGA',\n outputs_key_field='domain',\n outputs=outputs,\n readable_output=tableToMarkdown(name=\"Domains DGA:\", t=outputs, removeNull=True),\n raw_response=response\n )", "def domainIndex(self, name):\n return _cantera.sim1D_domainIndex(self._hndl, name)", "def get_domain_name(DomainName=None):\n pass", "def get_domain_detail(self, domain_name):\n params = {'DomainName': domain_name, }\n return self.make_request(action='GetDomainDetail',\n body=json.dumps(params))", "def domain_name(self):\n ret = self._get_attr(\"domainName\")\n return ret", "async def get_categories_by_name_domain(domain_name: str, db: Session = Depends(get_db)):\n\tdomain_categories = crud.get_categories_by_domain_name(db, domain_name)\n\tcategory_names = [category[0] for category in domain_categories]\n\treturn category_names", "def list_mail_domains(self):\n r = self.make_call('execute/Email/list_mail_domains')\n if r is None:\n return None\n domains = []\n for d in r['data']:\n domains.append(d['domain'])\n return domains" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify an adaptive bitrate streaming template.
def ModifyAdaptiveDynamicStreamingTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyAdaptiveDynamicStreamingTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyAdaptiveDynamicStreamingTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def setBitrate(self, bitrate):\n try:\n # bypassed by request from Ivan\n if (pu.pxpconfig.IgnoreVideoSettings()):\n dbg.prn(dbg.TDK,\"td -- SetBitrate BYBASSED\")\n return\n \n url = \"http://\"+self.ip+\"/cgi-bin/api.cgi\"\n if(not self.tdSession):\n self.login()\n #end if not tdSession\n dbg.prn(dbg.TDK,\"logged in: \", self.tdSession)\n url +=\"?session=\"+self.tdSession\n # bitrate for teradek should be in bps not in kbps:\n bitrate = bitrate * 1000\n dbg.prn(dbg.TDK,\"NEW BITRATE:.....................................\", bitrate)\n setcmd = \"&VideoEncoder.Settings.1.bitrate=\"+str(bitrate)\n savecmd = \"&q=VideoEncoder.Settings.1.bitrate\"\n\n dbg.prn(dbg.TDK,\"setting...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=set\"+setcmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n # apply settings\n dbg.prn(dbg.TDK,\"applying...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=apply\"+savecmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n # save the settings\n dbg.prn(dbg.TDK,\"saving...\")\n iter1=5\n answer = False\n while(not answer and iter1>=0):\n answer = pu.io.url(url+\"&command=save\"+savecmd,timeout=10)\n iter1 -= 1\n dbg.prn(dbg.TDK,answer)\n self.updatedb() \n dbg.prn(dbg.TDK,\"td_cube.setbitrate:\", bitrate, self.ip) \n except Exception as e:\n dbg.prn(dbg.TDK|dbg.ERR,\"[---]encTeradek.setBitrate:\", e, sys.exc_info()[-1].tb_lineno)", "def set_source_template(template):", "def mpeg_bit_rate_test(self, mpeg_bit_rate_test):\n\n self._mpeg_bit_rate_test = mpeg_bit_rate_test", "def _update_template(self, content):\r\n t, created = Template.objects.get_or_create(resource=self.resource)\r\n t.content = content\r\n t.save()", "def UpdateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def UpdateTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def __init__(self,\n active: bool,\n primary_cak: 'GatewayMacsecConfigTemplatePrimaryCak',\n *,\n fallback_cak: 'GatewayMacsecConfigTemplateFallbackCak' = None,\n window_size: int = None) -> None:\n self.active = active\n self.fallback_cak = fallback_cak\n self.primary_cak = primary_cak\n self.window_size = window_size", "def update_cache_settings(self):\n block_size = self.cache_block_size if self.cache_block_size else self.get_current_configuration()[\"cache_settings\"][\"cache_block_size\"]\n threshold = self.cache_flush_threshold if self.cache_flush_threshold else self.get_current_configuration()[\"cache_settings\"][\"cache_flush_threshold\"]\n try:\n rc, cache_settings = self.request(\"storage-systems/%s/symbol/setSACacheParams?verboseErrorResponse=true\" % self.ssid, method=\"POST\",\n data={\"cacheBlkSize\": block_size, \"demandFlushAmount\": threshold, \"demandFlushThreshold\": threshold})\n except Exception as error:\n self.module.fail_json(msg=\"Failed to set cache settings. Array [%s]. Error [%s].\" % (self.ssid, to_native(error)))", "def update_attributes_with_scales(self):\n # transfer buffer info from v. Useful for writing files.\n\n # purge the frame scales # probably develop a better way to handle this.\n for p in [att for att in self.data[\"attributes\"] if att.find(\"FrameScale\") == 0]:\n self.data[\"attributes\"].pop(p)\n\n for p in [att for att in self.data[\"attributes\"] if att.find(\"_SCALE_\") == 0]:\n self.data[\"attributes\"].pop(p)\n\n for a in self.data[\"buffer\"]:\n if a.find(\"scale\") == 0:\n sc = self.data[\"buffer\"][a]\n\n # parse the info from the buffer in suitable format\n\n # only work for frame0 at present\n\n _key = \"_SCALE_%s\" % a[-1].upper()\n _val = \"{0:0.6f} {1:0.6f}\\n{2}\\n\\n\".format(\n sc[\"factor\"], sc[\"offset\"], sc[\"unit\"].strip(\"\\n\").strip(\"\\x00\")\n )\n self.data[\"attributes\"][_key] = _val\n\n val = \"{0:0.6g}\\n{1:0.6g}\\n{2}\\n\".format(\n sc[\"factor\"], sc[\"offset\"], sc[\"unit\"].strip(\"\\n\").strip(\"\\x00\")\n )\n key = \"FrameScale%s0\" % a[-1].upper()\n self.data[\"attributes\"][key] = val", "def modify_packet(self, src_if, packet_size, pkt):\n dst_if_idx = int(packet_size / 10 % 2)\n dst_if = self.flows[src_if][dst_if_idx]\n info = self.create_packet_info(src_if, dst_if)\n payload = self.info_to_payload(info)\n p = pkt / Raw(payload)\n p[IPv6].dst = dst_if.remote_ip6\n info.data = p.copy()\n if isinstance(src_if, VppSubInterface):\n p = src_if.add_dot1_layer(p)\n self.extend_packet(p, packet_size)\n\n return p", "def _t_update_b(self):\n network = self.project.network\n phase = self.project.phases()[self.settings['phase']]\n Vi = network['pore.volume']\n dt = self.settings['t_step']\n s = self.settings['t_scheme']\n if (s == 'implicit'):\n f1, f2, f3 = 1, 1, 0\n elif (s == 'cranknicolson'):\n f1, f2, f3 = 0.5, 1, 0\n elif (s == 'steady'):\n f1, f2, f3 = 1, 0, 1\n x_old = self[self.settings['quantity']]\n b = (f2 * (1-f1) * (-self._A_steady) * x_old\n + f2 * (Vi/dt) * x_old\n + f3 * np.zeros(shape=(self.Np,), dtype=float))\n self._update_iterative_props()\n for item in self.settings['sources']:\n Ps = self.pores(item)\n # Update b\n b[Ps] = b[Ps] - f2 * (1-f1) * (phase[item + '.' + 'rate'][Ps])\n self._b = b\n return b", "def parameter_template(self, value: Template):\n self.__parameter_template = value", "def template_flag(self, template_flag):\n\n self._template_flag = template_flag", "def transform(self, usage_info, block_structure):\n raise NotImplementedError", "async def jsonrpc_stream_update(\n self, claim_id, bid=None, file_path=None,\n channel_id=None, channel_name=None, channel_account_id=None, clear_channel=False,\n account_id=None, wallet_id=None, claim_address=None, funding_account_ids=None,\n preview=False, blocking=False, replace=False, validate_file=False, optimize_file=False, **kwargs):\n wallet = self.wallet_manager.get_wallet_or_default(wallet_id)\n assert not wallet.is_locked, \"Cannot spend funds with locked wallet, unlock first.\"\n funding_accounts = wallet.get_accounts_or_all(funding_account_ids)\n if account_id:\n account = wallet.get_account_or_error(account_id)\n accounts = [account]\n else:\n account = wallet.default_account\n accounts = wallet.accounts\n\n existing_claims = await self.ledger.get_claims(\n wallet=wallet, accounts=accounts, claim_id=claim_id\n )\n if len(existing_claims) != 1:\n account_ids = ', '.join(f\"'{account.id}'\" for account in accounts)\n raise InputValueError(\n f\"Can't find the stream '{claim_id}' in account(s) {account_ids}.\"\n )\n\n old_txo = existing_claims[0]\n if not old_txo.claim.is_stream and not old_txo.claim.is_repost:\n # in principle it should work with any type of claim, but its safer to\n # limit it to ones we know won't be broken. in the future we can expand\n # this if we have a test case for e.g. channel or support claims\n raise InputValueError(\n f\"A claim with id '{claim_id}' was found but it is not a stream or repost claim.\"\n )\n\n if bid is not None:\n amount = self.get_dewies_or_error('bid', bid, positive_value=True)\n else:\n amount = old_txo.amount\n\n if claim_address is not None:\n self.valid_address_or_error(claim_address)\n else:\n claim_address = old_txo.get_address(account.ledger)\n\n channel = None\n if not clear_channel and (channel_id or channel_name):\n channel = await self.get_channel_or_error(\n wallet, channel_account_id, channel_id, channel_name, for_signing=True)\n elif old_txo.claim.is_signed and not clear_channel and not replace:\n channel = old_txo.channel\n\n fee_address = self.get_fee_address(kwargs, claim_address)\n if fee_address:\n kwargs['fee_address'] = fee_address\n\n file_path, spec = await self._video_file_analyzer.verify_or_repair(\n validate_file, optimize_file, file_path, ignore_non_video=True\n )\n kwargs.update(spec)\n\n if replace:\n claim = Claim()\n if old_txo.claim.is_stream:\n if old_txo.claim.stream.has_source:\n claim.stream.message.source.CopyFrom(\n old_txo.claim.stream.message.source\n )\n stream_type = old_txo.claim.stream.stream_type\n if stream_type:\n old_stream_type = getattr(old_txo.claim.stream.message, stream_type)\n new_stream_type = getattr(claim.stream.message, stream_type)\n new_stream_type.CopyFrom(old_stream_type)\n else:\n claim = Claim.from_bytes(old_txo.claim.to_bytes())\n\n if old_txo.claim.is_stream:\n claim.stream.update(file_path=file_path, **kwargs)\n elif old_txo.claim.is_repost:\n claim.repost.update(**kwargs)\n\n if clear_channel:\n claim.clear_signature()\n tx = await Transaction.claim_update(\n old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0],\n channel if not clear_channel else None\n )\n\n new_txo = tx.outputs[0]\n stream_hash = None\n if not preview and old_txo.claim.is_stream:\n old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash)\n old_stream = old_stream[0] if old_stream else None\n if file_path is not None:\n if old_stream:\n await self.file_manager.delete(old_stream, delete_file=False)\n file_stream = await self.file_manager.create_stream(file_path)\n new_txo.claim.stream.source.sd_hash = file_stream.sd_hash\n new_txo.script.generate()\n stream_hash = file_stream.stream_hash\n elif old_stream:\n stream_hash = old_stream.stream_hash\n\n if channel:\n new_txo.sign(channel)\n await tx.sign(funding_accounts)\n\n if not preview:\n await self.broadcast_or_release(tx, blocking)\n\n async def save_claims():\n await self.storage.save_claims([self._old_get_temp_claim_info(\n tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name\n )])\n if stream_hash:\n await self.storage.save_content_claim(stream_hash, new_txo.id)\n\n self.component_manager.loop.create_task(save_claims())\n self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('publish'))\n else:\n await account.ledger.release_tx(tx)\n\n return tx", "def _updateTs(self, newTs):\n self.longPollPayload.update({'ts': newTs})", "def test_12_pblocksize_setting(request):\n depends(request, [\"pool_04\", \"iscsi_cmd_00\"], scope=\"session\")\n iqn = f'{basename}:{target_name}'\n with configured_target_to_file_extent(target_name, pool_name, dataset_name, file_name) as iscsi_config:\n extent_config = iscsi_config['extent']\n with iscsi_scsi_connection(ip, iqn) as s:\n TUR(s)\n data = s.readcapacity16().result\n # By default 512 << 3 == 4096\n assert data['lbppbe'] == 3, data\n\n # First let's just change the blocksize to 2K\n payload = {'blocksize': 2048}\n results = PUT(f\"/iscsi/extent/id/{extent_config['id']}\", payload)\n assert results.status_code == 200, results.text\n\n TUR(s)\n data = s.readcapacity16().result\n assert data['block_length'] == 2048, data\n assert data['lbppbe'] == 1, data\n\n # Now let's change it back to 512, but also set pblocksize\n payload = {'blocksize': 512, 'pblocksize': True}\n results = PUT(f\"/iscsi/extent/id/{extent_config['id']}\", payload)\n assert results.status_code == 200, results.text\n\n TUR(s)\n data = s.readcapacity16().result\n assert data['block_length'] == 512, data\n assert data['lbppbe'] == 0, data\n\n with configured_target_to_zvol_extent(target_name, zvol) as iscsi_config:\n extent_config = iscsi_config['extent']\n with iscsi_scsi_connection(ip, iqn) as s:\n TUR(s)\n data = s.readcapacity16().result\n # We created a vol with volblocksize == 16K (512 << 5)\n assert data['lbppbe'] == 5, data\n\n # First let's just change the blocksize to 4K\n payload = {'blocksize': 4096}\n results = PUT(f\"/iscsi/extent/id/{extent_config['id']}\", payload)\n assert results.status_code == 200, results.text\n\n TUR(s)\n data = s.readcapacity16().result\n assert data['block_length'] == 4096, data\n assert data['lbppbe'] == 2, data\n\n # Now let's also set pblocksize\n payload = {'pblocksize': True}\n results = PUT(f\"/iscsi/extent/id/{extent_config['id']}\", payload)\n assert results.status_code == 200, results.text\n\n TUR(s)\n data = s.readcapacity16().result\n assert data['block_length'] == 4096, data\n assert data['lbppbe'] == 0, data", "def template_data(self, template_data):\n\n self._template_data = template_data", "def set_if_bandwidth(instrument, if_bandwidth, window_num=1, channel_num=1):\n command = ':SENSe%s:BANDwidth:RESolution %G HZ' % (window_num, if_bandwidth)\n instrument.write(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify a custom animated image generating template.
def ModifyAnimatedGraphicsTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyAnimatedGraphicsTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyAnimatedGraphicsTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def generate_image(self) -> None:", "def animSetCustom():\n return \"TODO\"", "def create_image(self, obj):\r\n randomize = random.randint(0,3)\r\n if randomize == 0:\r\n image_id = self.canvas.create_image(25, 50, image=obj)\r\n elif randomize == 1:\r\n image_id = self.canvas.create_image(25, 125, image=obj)\r\n elif randomize == 2:\r\n image_id = self.canvas.create_image(25, 200, image=obj)\r\n else:\r\n image_id = self.canvas.create_image(25, 275, image=obj)\r\n self.animation(image_id)", "def setup_animation(self, img, num_rows, num_columns):\r\n base_image = pyglet.image.load(img)\r\n animation_grid = pyglet.image.ImageGrid(base_image,\r\n num_rows,\r\n num_columns)\r\n image_frames = []\r\n\r\n for i in range(num_rows*num_columns, 0, -1):\r\n frame = animation_grid[i-1]\r\n animation_frame = pyglet.image.AnimationFrame(frame, 0.2)\r\n image_frames.append(animation_frame)\r\n\r\n animation = pyglet.image.Animation(image_frames)\r\n return animation", "def update_image(self):\n image_dict = {} # should be a dict in the form x heading, appropriate animation index basic idea is that it will find the the key with the least difference from the current x heading, and make that value self.image. Will complete when i get the sprite", "def create_output_image(img, instances):\n pass", "def _generateAnimation(self):\n self.animation = FuncAnimation(self._figure, self._animate, interval=self._interval, frames = self._frames)\n\n return self.animation", "def _make_animated_png(self):\n if self._info['comp_out']:\n logger.info('CREATING ANIMATED PNG:: %s:%s' % (self.name, self.locale))\n input_, output = self._get_input_output('animated')\n cmd = self._make_animated_png_cmd(input_, output)\n return_code, stdout_value, stderr_value = self.run_cmd(cmd)\n if return_code != 0:\n raise exceptions.MakeAnimatedPNGError(cmd, return_code, stdout_value, stderr_value)\n return return_code, stdout_value, stderr_value\n else:\n print 'No composite for %s' % self.name", "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def create_anim_gif(source, anim, path):\n\n plist = readPlist(os.path.join(path, source+'.plist'))\n \n key = '{0}_{1}'.format(source, anim)\n key_splited = key.split('_')[0:3]\n\n frames = {}\n for frame in plist.frames.keys():\n if frame.split('_')[0:3] == key_splited:\n f = frame.replace('.png', '').replace(key+'_', '')\n frames[int(f)] = plist.frames[frame]\n\n pil_frames = []\n\n img = Image.open(os.path.join(path, source+'.png'))\n img.putalpha(20)\n string = \"DO NOT MEME !!\"\n cmpt = 0\n colors = ['black', 'green', 'blue', 'red']\n \n for f in frames:\n cmpt+=1\n coords = [int(c) for c in frames[f].frame.replace('{', '').replace('}', '').split(',')]\n coords[2] += coords[0]\n coords[3] += coords[1]\n cropped_img = img.crop((coords))\n \n dynamic_string = string[0:int((cmpt*len(string))/len(frames))]\n #draw_text(cropped_img, dynamic_string, color_rectangle=colors[cmpt%4])\n \n pil_frames.append(cropped_img)\n \n writeGif(os.path.join(path,'{0}_{1}.gif'.format(source, anim)), pil_frames, subRectangles=False)", "def image(self, image_path, text=\"\"):\r\n pass", "def picture(string, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", image=\"string\", tile=bool, useTemplate=\"string\", width=int, highlightColor=float, popupMenuArray=bool, annotation=\"string\", enable=bool, dropCallback=\"string\", exists=bool, enableBackground=bool, numberOfPopupMenus=bool, visibleChangeCommand=\"string\", visible=bool, fullPathName=bool, preventOverride=bool, dragCallback=\"string\", noBackground=bool, backgroundColor=float, manage=bool, isObscured=bool):\n pass", "def setNext(self, img_time, img):", "def drawTemplate(self, target_shape, M):\n\n #\n # Prepare the template canvas\n #\n img = Image.new(\n mode='RGBA', size=target_shape, color=(255, 255, 255, 0))\n ctx = aggdraw.Draw(img)\n brush = aggdraw.Brush(self._color, 255)\n\n # Set the transform\n # Note:\n # aggdraw supports only affine transforms, so we use only the first 6\n # parameters of the projection transform.\n C = np.array(((1 / self._size_ratio, 0, 0), (0, 1 / self._size_ratio,\n 0), (0, 0, 1)))\n M = np.dot(M, C)\n M = M / M[2, 2]\n ctx.settransform(M.ravel()[:6])\n\n # Draw the form of the target\n self._drawForm(ctx, brush)\n\n # Add letter.\n if self._letter is not None:\n # The font is half the size of the form\n C = np.array(((self._size / self._font_size / 2, 0,\n self._size / 2),\n (0, self._size / self._font_size / 2,\n self._size * self._text_offset_ratio), (0, 0, 1)))\n M = np.dot(M, C)\n ctx.settransform(M.ravel()[:6])\n\n self._drawLetter(ctx)\n\n # Flush to apply drawing.\n ctx.flush()\n\n img = np.array(img)\n self._templateImg = img[..., :3]\n self._templateAlpha = img[..., 3].astype(np.float32) / 255", "def _make_animation_copies(self):\n if self._info['animated']:\n self._composite_animation_frames()\n self._make_animated_png()\n self._make_animated_gif()\n self._make_mp4()\n self._make_webm()\n self._rm_temp_files()", "def set_source_template(template):", "def matchTemplate(image, templ, method, result=..., mask=...) -> result:\n ...", "def create_new_image(self):\n logging.info('Starting image \\'' + self.name + '\\' creation')", "def transform(self):\n if self.form == \"human\":\n self.form = \"slime\"\n self._human_frames = self.frames\n self.frames = self._slime_frames\n self.movespeed = 20\n elif self.form == \"slime\":\n self.form = \"human\"\n self.frames = self._human_frames\n self.movespeed = 15\n self.image = self.select_image()\n self.width = self.image.width\n self.height = self.image.height", "def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix): \n resample = sitk.ResampleImageFilter()\n resample.SetReferenceImage(fixed_image)\n \n # SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results. \n resample.SetInterpolator(sitk.sitkLinear) \n resample.SetTransform(transform)\n sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')\n sitk.WriteTransform(transform, outputfile_prefix+'.tfm')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to set the default storage region. A file will be stored in the default region if no region is specified for file upload.
def ModifyDefaultStorageRegion(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyDefaultStorageRegion", params, headers=headers) response = json.loads(body) model = models.ModifyDefaultStorageRegionResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def set_default_storage_location(cls, storage_location: str) -> None:\n if storage_location:\n storage_dict = {'storage_location': storage_location}\n cls.__save(storage_dict)", "def get_default_region(self):\r\n return self._default_region", "def set_region():\n\n region = None\n session = boto3.session.Session()\n region = session.region_name\n if region: # already defined in env var or config file\n return\n else:\n try:\n region = requests.get(\"http://169.254.169.254/latest/dynamic/instance-identity/document\").json()['region']\n boto3.setup_default_session(region_name=region)\n print(json.dumps({\"message\": \"set region to {}\".format(region)}))\n except:\n print(json.dumps({\"message\": \"getting region failed from instance metadata failed\"}))\n pass", "def _create_region_from_default(self, region):\n options = self.default_region_options.copy()\n self.regions[region] = options\n self.bcm.regions.update({region: options})\n beaker.cache.cache_regions.update({region: options})", "def set_region(self, region):\n self._region_name = region", "def set_region(self, region):\n self._region = region", "def set_region(self, region: str) -> None:\n if region != self._region:\n self._region = region\n self.clear()", "def SetIORegion(self, _arg: 'itkImageIORegion') -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetIORegion(self, _arg)", "def assign_region(self, region: Region):\n self.region = region", "def get_default_storage_location(cls) -> str:\n return cls.__open('storage_location')", "def region_name(self) -> Optional[str]:\n aws_region = self._aws_region\n if aws_region:\n return aws_region\n if self._aws_credentials_dir:\n aws_credentials_file = os.path.join(self._aws_credentials_dir, \"credentials\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_credentials_file)\n if aws_region:\n return aws_region\n aws_config_file = os.path.join(self._aws_credentials_dir, \"config\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_config_file)\n if aws_region:\n return aws_region\n aws_region = os.environ.get(\"AWS_REGION\", os.environ.get(\"AWS_DEFAULT_REGION\"))\n if aws_region:\n return aws_region\n aws_credentials_file = os.environ.get(\"AWS_SHARED_CREDENTIALS_FILE\", \"~/.aws/credentials\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_credentials_file)\n if aws_region:\n return aws_region\n aws_config_file = os.environ.get(\"AWS_CONFIG_FILE\", \"~/.aws/config\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_config_file)\n return aws_region", "def set_UserRegion(self, value):\n super(BatchGetItemInputSet, self)._set_input('UserRegion', value)", "def PromptForRegion():\n if console_io.CanPrompt():\n client = global_methods.GetServerlessClientInstance()\n all_regions = global_methods.ListRegions(client)\n idx = console_io.PromptChoice(\n all_regions,\n message='Please specify a region:\\n',\n cancel_option=True,\n allow_freeform=True,\n )\n region = all_regions[idx]\n log.status.Print(\n 'To make this the default region, run '\n '`gcloud config set run/region {}`.\\n'.format(region)\n )\n return region", "def set_resource(name, region):\n global KEY, SECRET\n return boto3.resource(name, region_name=region, aws_access_key_id=KEY, aws_secret_access_key=SECRET)", "def SetRequestedRegion(self, *args) -> \"void\":\n return _itkImagePython.itkImageBase3_SetRequestedRegion(self, *args)", "def region_specific(self, region_specific):\n\n self._region_specific = region_specific", "def region_option(f):\n\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option(\n \"--region\",\n expose_value=False,\n help=\"Set the JDCloud Region of the service (e.g. north-1).\",\n callback=callback,\n )(f)", "def SetRequestedRegion(self, *args) -> \"void\":\n return _itkImagePython.itkImageBase2_SetRequestedRegion(self, *args)", "def SetRegions(self, *args) -> \"void\":\n return _itkImagePython.itkImageBase3_SetRegions(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify a custom image sprite generating template.
def ModifyImageSpriteTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyImageSpriteTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyImageSpriteTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def create_sprite(self):\n rgb = (84, 170, 232)\n height = 15\n length = 15\n self.sprite = BaseStationSprite(rgb)", "def get_sprite(self):\n pass", "def create_sprite(location, image_name, animal_name):\n image = pygame.image.load(image_name)\n image_rect = Rect(location, (24, 24))\n new_sprite = Sprite(image, image_rect, animal_name)\n return new_sprite", "def generate_image(self) -> None:", "def spawn_sprite(location, image_name, animal_name, should_be_pollinated=False):\n image = pygame.image.load(image_name)\n image_rect = Rect(location, (24, 24))\n if should_be_pollinated:\n new_sprite = PlantSprite(image_rect, True)\n else:\n if animal_name == \"wolf\":\n new_sprite = WolfSprite(image_rect)\n elif animal_name == \"deer\":\n new_sprite = DeerSprite(image_rect)\n elif animal_name == \"plant\":\n new_sprite = PlantSprite(image_rect)\n elif animal_name == \"bees\":\n new_sprite = BeeSprite(image_rect)\n else:\n new_sprite = Sprite(image, image_rect, animal_name)\n new_sprite.blit()", "def sprite_path(self) -> str:\n return \"area/{}/tiles/tile{}_{}_0001.png\".format(\n self.sprite_set.name.lower(),\n self.sprite_tile,\n self.sprite_palette + 1,\n )", "def update_image(self):\n image_dict = {} # should be a dict in the form x heading, appropriate animation index basic idea is that it will find the the key with the least difference from the current x heading, and make that value self.image. Will complete when i get the sprite", "def change_img(obj: pygame.sprite.Sprite, img):\r\n obj.image = img\r\n obj.image.set_colorkey(service.colors[\"BLACK\"])", "def create_sprite_sheet(name_to_image_path_dict):\n images = {name: Image.open(os.path.join(settings.PAINTINGS_DIR, os.path.basename(file_path.replace(\"\\\\\", \"/\"))))\n for name, file_path in name_to_image_path_dict.items()}\n image_to_location = {}\n\n name = \"-\".join(name_to_image_path_dict.keys())\n output_file = os.path.join(settings.SPRITE_SHEET_DIR, \"%s.%s\" % (name, settings.SPRITE_SHEET_FILETYPE))\n image_exists = os.path.isfile(output_file)\n\n master_height = max([i.size[1] for i in images.values()]) # Make it as high as the highest image\n master_width = sum([i.size[0] for i in images.values()]) # and as wide as all of them together\n\n if not image_exists:\n master = Image.new(\n mode='RGBA',\n size=(master_width, master_height),\n color=(0, 0, 0, 0)) # fully transparent\n\n cur_width = 0\n for count, name in enumerate(images.keys()):\n image = images[name]\n if not image_exists:\n master.paste(image, (cur_width, 0))\n \n image_to_location[name] = (image.size[0], image.size[1], cur_width, 0)\n cur_width += image.size[0]\n\n if not image_exists:\n if \"gif\" == settings.SPRITE_SHEET_FILETYPE:\n master.save(output_file, transparency=0)\n else:\n master.save(output_file)\n\n return output_file, image_to_location", "def sprite(spr):\r\n\r\n if spr[:4] == [90, 83, 80, 82]:\r\n # stolen from VT's code\r\n gfx_offset = spr[12] << 24 | spr[11] << 16 | spr[10] << 8 | spr[9]\r\n palette_offset = spr[18] << 24 | spr[17] << 16 | spr[16] << 8 | spr[15]\r\n patch = [\r\n {'524288': spr[gfx_offset:gfx_offset + 28671]},\r\n {'905992': spr[palette_offset:palette_offset + 120]},\r\n {'912885': spr[palette_offset + 120:palette_offset + 120 + 3]}\r\n ]\r\n # Else treat it like a SPR file instead\r\n else:\r\n patch = [\r\n {'524288': spr[0:28671]},\r\n {'905992': spr[28672:28791]},\r\n {\r\n '912885': [\r\n spr[28726],\r\n spr[28727],\r\n spr[28756],\r\n spr[28757],\r\n ]\r\n }\r\n ]\r\n return patch", "def render(self, sprites=(), global_state=None):", "def build_sprite(self, side):\n img = Image.new(\"RGBA\", (24,24), self.bgcolor)\n\n side = self.transform_image_side(side)\n otherside = side.transpose(Image.FLIP_LEFT_RIGHT)\n\n alpha_over(img, side, (6,3), side)\n alpha_over(img, otherside, (6,3), otherside)\n return img", "def render_image(self, name, pos):\n\n if \":\" in name:\n # If tileset not loaded, load each image of the tileset into the cache\n if name not in self.image_cache:\n base_name = name[:name.index(\":\")]\n tileset = pygame.image.load(\"res/gfx/\" + base_name + \".png\")\n tileset_rect = tileset.get_rect()\n tileset_width = int(tileset_rect.w / 64)\n tileset_height = int(tileset_rect.h / 64)\n for x in range(0, tileset_width):\n for y in range(0, tileset_height):\n index = x + (y * tileset_width)\n if index in self.level.map.alphas:\n self.image_cache[base_name + \":\" + str(index)] = tileset.subsurface(pygame.Rect(x * 64, y * 64, 64, 64))\n else:\n self.image_cache[base_name + \":\" + str(index)] = tileset.subsurface(pygame.Rect(x * 64, y * 64, 64, 64)).convert()\n\n # If the image object for the passed string isn't in the cache, add it to the cache\n if name not in self.image_cache:\n self.image_cache[name] = pygame.image.load(\"res/gfx/\" + name + \".png\")\n\n # Reset the timeout for these variables since we've just used them\n if self.enable_cache_timeout:\n self.image_timeout[name] = 0\n\n draw_x = 0\n draw_y = 0\n\n if pos[0] == \"CENTERED\":\n draw_x = (self.SCREEN_WIDTH / 2) - (self.image_cache[name].get_rect().w / 2)\n else:\n draw_x = pos[0]\n if pos[1] == \"CENTERED\":\n draw_y = (self.SCREEN_HEIGHT / 2) - (self.image_cache[name].get_rect().h / 2)\n else:\n draw_y = pos[1]\n\n self.screen.blit(self.image_cache[name], (draw_x, draw_y))", "def set_texture(self, value):\n self._rect.texture = value", "def setTextureFunction(state: 'SoState', value: 'SbBool const') -> \"void\":\n return _coin.SoShapeStyleElement_setTextureFunction(state, value)", "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def __init__(self, file_name):\n self.sprite_sheet = pygame.image.load(file_name).convert()", "def generateImage(self, **kwargs):\n\n start_x = kwargs.get('start_x', None)\n start_y = kwargs.get('start_y', None)\n tile_width = kwargs.get('tile_width', 5)\n tile_height = kwargs.get('tile_height', 5)\n\n # Check that we have x and y tile coordinates\n if start_x == None or start_y == None :\n start_x, start_y = self.getXY()\n\n # Determine the size of the image\n width, height = 256 * tile_width, 256 * tile_height\n\n #Create a new image of the size require\n map_img = Image.new('RGB', (width,height))\n sat_img = Image.new('RGB', (width,height))\n\n for x in range(0, tile_width):\n for y in range(0, tile_height) :\n if True:\n if args.label:\n # Store the image with labels\n url = 'https://mt0.google.com/vt/lyrs=y&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt/lyrs=s&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n sat_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n\n if True:\n if args.label:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom) # work needs to be done\n if args.debug: print(url)\n\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n map_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n return map_img, sat_img", "def texturePlacementContext(labelMapping=bool, image1=\"string\", history=bool, exists=bool, image2=\"string\", name=\"string\", image3=\"string\"):\n pass", "def SoShapeStyleElement_setTextureFunction(state: 'SoState', value: 'SbBool const') -> \"void\":\n return _coin.SoShapeStyleElement_setTextureFunction(state, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify the storage class of media files.
def ModifyMediaStorageClass(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyMediaStorageClass", params, headers=headers) response = json.loads(body) model = models.ModifyMediaStorageClassResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def change_storage_class(self, new_storage_class, dst_bucket=None,\n validate_dst_bucket=True):\n bucket_name = dst_bucket or self.bucket.name\n if new_storage_class == 'STANDARD':\n return self.copy(bucket_name, self.name,\n reduced_redundancy=False, preserve_acl=True,\n validate_dst_bucket=validate_dst_bucket)\n elif new_storage_class == 'REDUCED_REDUNDANCY':\n return self.copy(bucket_name, self.name,\n reduced_redundancy=True, preserve_acl=True,\n validate_dst_bucket=validate_dst_bucket)\n else:\n raise BotoClientError('Invalid storage class: %s' %\n new_storage_class)", "def _object_storage_class(self, filename) -> S3StorageClass:\n attribute_block = self._object_attribute_block(filename)\n return attribute_block.storage_class", "def storage_class_name(self, value: str):\n self._properties[\"storageClassName\"] = value", "def _set_object_storage_class_for_testing(self, s3_filename, value: S3StorageClass):\n attribute_block = self._object_attribute_block(s3_filename)\n attribute_block.initialize_storage_class(value)", "def storage(self, **kwargs):\n return current_files_rest.storage_factory(fileinstance=self, **kwargs)", "def update_media(self):\r\n\r\n self.update_media_flag=False\r\n\r\n #Check converted media \r\n keys=MEDIA_EXTENSIONS.keys()\r\n media_files=dict(zip(keys,[{} for _ in xrange(len(keys))]))\r\n for folder in keys: \r\n folder_path=join(self.media_path,folder,'Converted')\r\n for obj in listdir(folder_path):\r\n obj_path=join(folder_path,obj)\r\n if isfile(obj_path):\r\n fileName, fileExtension = splitext(obj)\r\n if fileExtension in MEDIA_EXTENSIONS[folder]:\r\n timeout=self.check_timeout(fileName)\r\n media_files[folder][fileName]=(timeout,obj_path)\r\n elif isdir(obj_path):\r\n for file in listdir(obj_path):\r\n file_path=join(obj_path,file)\r\n if isfile(file_path):\r\n fileName, fileExtension = splitext(file)\r\n if fileExtension in MEDIA_EXTENSIONS[folder]:\r\n timeout=self.check_timeout(fileName)\r\n media_files[folder][fileName]=(timeout,file_path)\r\n break \r\n\r\n #Updates the database \r\n self.media_files=media_files\r\n \r\n #Convert PPT 2 MP4\r\n self.ppt_sniffer({'teasers':'ppSaveAsMP4','daily_specials':'ppSaveAsMP4','deec':'ppSaveAsJPG','quiz':'ppSaveAsJPG','AoW':'ppSaveAsJPG','CoW':'ppSaveAsJPG'})\r\n self.movie2mp4(['teasers','video','daily_specials']) \r\n\r\n self.update_media_flag=True", "def media_content_type(self):\n _LOGGER.debug(\"media_content_type()\")\n return MEDIA_TYPE_MUSIC", "def mime_class(self, value):\n self.logger.warn(\n \"Setting values on mime_class will NOT update the remote Canvas instance.\"\n )\n self._mime_class = value", "def media_type(self):\n # TODO: Support parameter", "def save(self, storage, name, meta):\n method = self._method\n # Calculate sizes.\n display_size = meta[\"size\"]\n image_data, original_size = self._image_data_and_size\n data_size = method.get_data_size(display_size, display_size.intersect(original_size))\n # Check whether we need to make a thumbnail.\n if data_size == original_size:\n super(ThumbnailAsset, self).save(storage, name, meta)\n else:\n # Use efficient image loading.\n image_data.draft(None, data_size)\n # Resize the image data.\n try:\n image_data = method.do_resize(image_data, original_size, display_size, data_size)\n except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(\n raise ThumbnailError(str(ex))\n # Parse the image format.\n _, extension = os.path.splitext(name)\n format = extension.lstrip(\".\").upper().replace(\"JPG\", \"JPEG\") or \"PNG\"\n # If we're saving to PNG, make sure we're not in CMYK.\n if image_data.mode == \"CMYK\" and format == \"PNG\":\n image_data = image_data.convert(\"RGB\")\n # If the storage has a path, then save it efficiently.\n try:\n thumbnail_path = storage.path(name)\n except NotImplementedError:\n # No path for the storage, so save it in a memory buffer.\n buffer = StringIO()\n try:\n image_data.save(buffer, format)\n except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(\n raise ThumbnailError(str(ex))\n # Write the file.\n buffer.seek(0, os.SEEK_END)\n buffer_length = buffer.tell()\n buffer.seek(0)\n file = File(buffer)\n file.size = buffer_length\n storage.save(name, file)\n else:\n # We can do an efficient streaming save.\n try:\n os.makedirs(os.path.dirname(thumbnail_path))\n except OSError:\n pass\n try:\n image_data.save(thumbnail_path, format)\n except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(\n try:\n raise ThumbnailError(str(ex))\n finally:\n # Remove an incomplete file, if present.\n try:\n os.unlink(thumbnail_path)\n except:\n pass", "def _getFileClass(self, path):\r\n return self.FileClass", "def addMedia(self, m):", "def create_cephfs_storageclass(request):\n class_instance = request.node.cls\n\n def finalizer():\n \"\"\"\n Delete the CephFS storage class\n \"\"\"\n if class_instance.sc_obj.get():\n class_instance.sc_obj.delete()\n class_instance.sc_obj.ocp.wait_for_delete(class_instance.sc_obj.name)\n\n request.addfinalizer(finalizer)\n\n class_instance.sc_obj = helpers.create_storage_class(\n interface_type=constants.CEPHFILESYSTEM,\n interface_name=helpers.get_cephfs_data_pool_name(),\n secret_name=class_instance.cephfs_secret_obj.name,\n )\n assert class_instance.sc_obj, \"Failed to create storage class\"", "def update_mimetypes_mapping(cls) -> None:\n for mimetypes_mapping in cls.get_mimetypes_mapping():\n # INFO - G.M - 2019-11-22 - mimetype are added as strict to force override of default\n # system/mimetype lib value, which is needed for type like .obj where system type can be\n # \"text/plain\" or \"application/octet-stream\"\n mimetypes_storage.add_type( # type: ignore\n type=mimetypes_mapping.mimetype, ext=mimetypes_mapping.file_extension, strict=True\n )", "def storage_mode(self, storage_mode):\n\n self._storage_mode = storage_mode", "def register_filesystem(self, filesystem_class):\n self.filesystems.append(filesystem_class)", "def uploaded_mime_type(self, uploaded_mime_type):\n\n self._uploaded_mime_type = uploaded_mime_type", "def __init__(self, *args, **kwargs):\n super(MediafileSerializer, self).__init__(*args, **kwargs)\n self.serializer_field_mapping[dbmodels.FileField] = AngularCompatibleFileField\n\n # Make some fields read-oinly for updates (not creation)\n if self.instance is not None:\n self.fields[\"mediafile\"].read_only = True", "def updateSupportedFileTypes(self) -> None:\n supported_file_types = CuraApplication.getInstance().getMeshFileHandler().getSupportedFileTypesRead()\n self._supported_file_types = list(supported_file_types.keys())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify a custom moderation template.
def ModifyReviewTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyReviewTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyReviewTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def edit_template(self, data: dict) -> None:\n self.add_operation({\n 'op': 'editTemplate',\n 'data': data,\n })", "def tweak_template_permission(self, tweak_template_permission):\n\n self._tweak_template_permission = tweak_template_permission", "def _update_template(self, content):\r\n t, created = Template.objects.get_or_create(resource=self.resource)\r\n t.content = content\r\n t.save()", "def modify(username, template, summary, owner, email=None):\n if owner:\n email = lookup_email_addr(owner)\n update_meta(template,\n username=username,\n owner=owner,\n email=email,\n summary=summary)", "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def set_template(self, name: str, template: str) -> None:\n self.custom_templates[name] = template", "def tweak_template_permission(self):\n return self._tweak_template_permission", "def ModifyServiceTemplateAttribute(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyServiceTemplateAttribute\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyServiceTemplateAttributeResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def UpdateTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps", "def test_update_entitlement_template(self):\n pass", "def ModifyServiceTemplateGroupAttribute(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyServiceTemplateGroupAttribute\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyServiceTemplateGroupAttributeResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def update_cluster_template(self, cluster_template_id, values):", "def UpdateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def _draw_modifier_template(self, modifier):\n layout = self.layout.box()\n\n # This is the main title row. It mimics the Blender template_modifier, which (unfortunately)\n # requires valid Blender Modifier data. It would be nice if the Blender UI code were consistently\n # C or Python and not a frankenstein mix. I would probably prefer working in C, just because\n # the compiler saves my neck 99.9% of the time...</rant>\n row = layout.row(align=True)\n exicon = \"TRIA_DOWN\" if modifier.show_expanded else \"TRIA_RIGHT\"\n row.prop(modifier, \"show_expanded\", text=\"\", icon=exicon, emboss=False)\n row.label(text=modifier.bl_label, icon=getattr(modifier, \"bl_icon\", \"NONE\"))\n\n row.operator(\"object.plasma_modifier_move_up\", text=\"\", icon=\"TRIA_UP\").active_modifier = modifier.display_order\n row.operator(\"object.plasma_modifier_move_down\", text=\"\", icon=\"TRIA_DOWN\").active_modifier = modifier.display_order\n row.operator(\"object.plasma_modifier_copy\", text=\"\", icon=\"COPYDOWN\").active_modifier = modifier.display_order\n row.operator(\"object.plasma_modifier_reset\", text=\"\", icon=\"FILE_REFRESH\").active_modifier = modifier.display_order\n row.operator(\"object.plasma_modifier_remove\", text=\"\", icon=\"X\").active_modifier = modifier.display_order\n\n # Now we return the modifier box, which is populated with the modifier specific properties\n # by whatever insanity is in the modifier module. modifier modifier modifier...\n # MODDDDDDDDIFIIIIEEEERRRRRRRR!!!\n return layout", "def test_template_permission_sets_id_replace_post(self):\n pass", "def put(self, request, vnf_id):\n if request.META['CONTENT_TYPE'] != 'application/json':\n return HttpResponse(status=415)\n try:\n if 'functional-capability' not in request.data.keys():\n return HttpResponse(\"Missing functional-capability field\", status=400)\n capability = request.data['functional-capability']\n ValidateTemplate().validate(request.data)\n template = json.dumps(request.data)\n except:\n return HttpResponse(status=400)\n res = API.updateVNFTemplate(vnf_id, template, capability)\n if not res:\n return HttpResponse(status=404)\n return HttpResponse(status=200)", "def template_permission(self, template_permission):\n\n self._template_permission = template_permission", "def update_policy_template(self, policy):\n baseURL = self.baseURL + \"policy-templates/{}\".format(policy['policy_id'])\n\n policy['checksum'] = \"\"\n policy['policy_utctimestamp'] = \"\"\n\n data = {}\n data['modelDetails'] = policy\n data['name'] = policy['name']\n data['partnerId'] = self.app_id\n data['policyTemplateId'] = policy['policy_id']\n\n return self._make_request(\"put\",baseURL, data=data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify subapplication information, but it is not allowed to modify primary application information.
def ModifySubAppIdInfo(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifySubAppIdInfo", params, headers=headers) response = json.loads(body) model = models.ModifySubAppIdInfoResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def change_subspace(self, cluster_subspace):\n self._subspace = cluster_subspace\n self.update_features()", "def attach_subarray(self, subarray: \"ctapipe.instrument.SubarrayDescription\"):\n self._subarray = subarray\n self._lookup.attach_subarray(subarray)", "def update_from_app_info(self, app_info):\n if self._changeset is None:\n self._changeset = app_info.get('application_changeset')\n if self._repo_url is None:\n self._repo_url = app_info.get('application_repository')", "def CreateSubAppId(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateSubAppId\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateSubAppIdResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def edit_application_metada(self, description, application_id, app_metadata_id):\n params = {'description' : description}\n return self._request('POST', 'rest/applications/' + str(application_id) + '/metadata/' + str(app_metadata_id) + '/update')", "def on_put(self, req, resp, appid):\n mapper = self.meta.get('mapper')\n app = mapper.application.Application.get_by_uid(appid)\n if app is None:\n raise falcon.HTTPInvalidParam('Application not found', 'appid')\n \n body = req.context['body']\n # look for changes to name, description, status, parameters, and data\n if 'name' in body:\n app.set_name(body['name'].strip())\n if 'description' in body:\n app.set_description(body['description'].strip())\n if 'status' in body:\n app.set_status(body['status'].strip())\n if 'jwt_secret' in body:\n app.set_jwt_secret(body['jwt_secret'].strip())\n if 'custom_data' in body and isinstance(body['custom_data'], dict):\n app.set_custom_data(body['custom_data'])\n if 'data' in body and isinstance(body['data'], list):\n # body['data'] = [{'key': 'spam', 'value': 'eggs'}, ...]\n app.set_data(body['data'])\n if 'parameters' in body and isinstance(body['parameters'], list):\n # body['parameters'] = [{'key': 'spam', 'datatype': 'and', 'default': 'eggs', 'description': 'spam and eggs'}, ...]\n app.set_paramameters(body['params'])\n \n app = mapper.application.Application.update_from_object(app)\n resp.body = {\"application\": app.to_dict()}\n return True", "def submsg(self, submsg):\n\n self._submsg = submsg", "def updateApp():\n newConfig = json.loads(request.data)\n logger.info('Method called with: {0}'.format(newConfig))\n\n system = ServiceOrchestrator(CONFIG_FILE)\n ok = system.updateApps(newConfig)\n\n return 'ok'", "def app_data(self, value):\n self._app_data = value", "def test_partial_update_app(self):\n url = reverse(\"apps-detail\", kwargs={\"pk\": self.test_app.id})\n data = {\n \"name\": \"Test App PATCH\",\n }\n response = self.client.patch(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"id\"], self.test_app.id)\n self.assertEqual(response.data[\"name\"], data[\"name\"])", "def sub_family(self, sub_family):\n\n self._sub_family = sub_family", "def test_post_modify_application(self):\n c = Client()\n request = c.post('/GradMaze/accounts/apps/modify/',{'row_id': 'app-'+str(self.application.id),'status':'Accepted'} ,follow=True)\n self.assertNotEqual(self.application,Application.objects.filter(id=self.application.id))", "def test_patch_remove_submodel(self):\r\n # Creating the row that will be updated\r\n data = {\r\n 'name': u'Lincoln', 'age': 23,\r\n 'computers': [\r\n {'name': u'lixeiro', 'vendor': u'Lemote'},\r\n {'name': u'pidinti', 'vendor': u'HP'},\r\n ],\r\n }\r\n self.app.post('/api/person', data=dumps(data))\r\n\r\n # Data for the update\r\n update_data = {\r\n 'computers': {\r\n 'remove': [{'name': u'pidinti'}],\r\n }\r\n }\r\n resp = self.app.patch('/api/person/1', data=dumps(update_data))\r\n assert resp.status_code == 200\r\n assert loads(resp.data)['id'] == 1\r\n\r\n # Let's check it out\r\n response = self.app.get('/api/person/1')\r\n loaded = loads(response.data)\r\n assert len(loaded['computers']) == 1", "def _new_subconfig_status(self, change):\n self.check_parameters()", "def set_app(self, item):\n item.project.is_app = item.checkState() == Qt.Checked\n item.project.save(item.path)\n self.sig_apps_updated.emit()", "async def set_submod_role(self, ctx, *, role_name):\n config = hf.database_toggle(ctx, self.bot.db['submod_role'])\n if 'enable' in config:\n del (config['enable'])\n submod_role = discord.utils.find(lambda role: role.name == role_name, ctx.guild.roles)\n if not submod_role:\n await ctx.send(\"The role with that name was not found\")\n return None\n config['id'] = submod_role.id\n await ctx.send(f\"Set the submod role to {submod_role.name} ({submod_role.id})\")\n await hf.dump_json()", "def sub_account_type(self, sub_account_type):\n\n self._sub_account_type = sub_account_type", "def put(self, orgname, client_id):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can() or allow_if_superuser():\n try:\n org = model.organization.get_organization(orgname)\n except model.InvalidOrganizationException:\n raise NotFound()\n\n application = model.oauth.lookup_application(org, client_id)\n if not application:\n raise NotFound()\n\n app_data = request.get_json()\n application.name = app_data[\"name\"]\n application.application_uri = app_data[\"application_uri\"]\n application.redirect_uri = app_data[\"redirect_uri\"]\n application.description = app_data.get(\"description\", \"\")\n application.avatar_email = app_data.get(\"avatar_email\", None)\n application.save()\n\n app_data.update(\n {\"application_name\": application.name, \"client_id\": application.client_id}\n )\n\n log_action(\"update_application\", orgname, app_data)\n\n return app_view(application)\n raise Unauthorized()", "def edit_array(self,\n control_host_id,\n master_config_id,\n value,\n config_update_level,\n level_id,\n array_access_node,\n mode):\n\n copy_level_id = app_level_id = client_level_id = 0\n request_json_service = self.storage_arrays + '/{0}'.format(control_host_id)\n flag, request_json = self._commcell_object._cvpysdk_object.make_request(\n 'GET', request_json_service\n )\n\n if config_update_level == \"array\":\n config_update_level = 3\n elif config_update_level == \"copy\":\n config_update_level = 6\n copy_level_id = level_id\n elif config_update_level == \"subclient\":\n config_update_level = 9\n app_level_id = level_id\n elif config_update_level == \"client\":\n config_update_level = 8\n client_level_id = level_id\n else:\n config_update_level = 3\n\n request_json = request_json.json()\n\n update_dict = {\n \"add\": False,\n \"forceAdd\": False,\n \"assocType\": config_update_level,\n \"copyId\": copy_level_id,\n \"appId\": app_level_id,\n \"clientId\": client_level_id\n }\n request_json.update(update_dict)\n\n if master_config_id is not None:\n for config in request_json['configList']['configList']:\n if config['masterConfigId'] == int(master_config_id):\n config['value'] = str(value)\n if config_update_level != \"array\":\n config['isOverridden'] = True\n\n if array_access_node is not None and mode == \"add\":\n client_id = int(self._commcell_object.clients.get(array_access_node).client_id)\n if \"selectedMAs\" in request_json:\n update_dict = {\n \"arrayControllerId\": 0,\n \"mediaAgent\": {\n \"name\": array_access_node,\n \"id\": client_id\n },\n \"arrCtrlOptions\": [\n {\n \"isEnabled\": True,\n \"arrCtrlOption\": {\n \"name\": \"Pruning\",\n \"id\": 262144\n }\n }\n ]\n }\n request_json['selectedMAs'].append(update_dict)\n else:\n update_dict = {\n \"selectedMAs\": [\n {\n \"arrayControllerId\": 0,\n \"mediaAgent\": {\n \"name\": array_access_node,\n \"id\": client_id\n },\n \"arrCtrlOptions\": [\n {\n \"isEnabled\": True,\n \"arrCtrlOption\": {\n \"name\": \"Pruning\",\n \"id\": 262144\n }\n }\n ]\n }\n ]}\n request_json.update(update_dict)\n\n elif array_access_node is not None and mode == \"remove\":\n client_id = int(self._commcell_object.clients.get(array_access_node).client_id)\n if \"selectedMAs\" in request_json:\n for controller in range(len(request_json['selectedMAs'])):\n if request_json['selectedMAs'][controller]['mediaAgent']['id'] == int(client_id):\n del request_json['selectedMAs'][controller]\n break\n\n request_json['configs'] = request_json.pop('configList')\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'PUT', self.storage_arrays, request_json\n )\n\n if response.json() and 'errorCode' in response.json():\n error_code = response.json()['errorCode']\n\n if error_code != 0:\n if error_code == 1:\n raise SDKException('StorageArray', '101')\n\n error_message = response.json().get('errorMessage', '')\n o_str = 'Failed to update Snap Configs\\nError: \"{0}\"'.format(error_message)\n raise SDKException('StorageArray', '103', o_str)\n else:\n raise SDKException('StorageArray', '103')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify a custom transcoding template.
def ModifyTranscodeTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyTranscodeTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyTranscodeTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def set_source_template(template):", "def _update_template(self, content):\r\n t, created = Template.objects.get_or_create(resource=self.resource)\r\n t.content = content\r\n t.save()", "def set_template(self, name: str, template: str) -> None:\n self.custom_templates[name] = template", "def edit_template(self, data: dict) -> None:\n self.add_operation({\n 'op': 'editTemplate',\n 'data': data,\n })", "def UpdateTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def UpdateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def template_loaded(self, template):\n self.template = template", "def template_i18n(self, template_i18n):\n self._template_i18n = template_i18n", "def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps", "def RenameTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def rewrite_template(self, template_name: str):\n operand_to_type_mapping, code = parser.preprocess(template_name)\n annotations: List[Annotations] = parser.parse(operand_to_type_mapping, code)\n rewritten_program: str = self.compiler.apply_substitution(annotations, self.debug)\n return '\\n'.join([f' {line}' for line in rewritten_program.split('\\n')])", "def sub_template(template,template_tag,substitution):\n\n template = template.replace(template_tag,substitution)\n return template", "def override_template(*args, **kwargs):\n utils_override_template(*args, **kwargs)", "def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)", "def addSyntheticTemplate(self, templates, class_id) -> retval:\n ...", "def apply_template(template, subst):\n\t\n\tif not template:\n\t\tprint 'xx> No template given'\n\t\treturn None\n\t\n\t# apply the known items\n\tapplied = template\n\tfor k, v in subst.iteritems():\n\t\tapplied = re.sub('\\{\\{\\s*' + k + '\\s*\\}\\}', v if v else '', applied)\n\t\n\t# remove unknown items\n\tmatches = re.findall('\\{\\{[^\\}]+\\}\\}', applied)\n\tif matches is not None:\n\t\tfor match in matches:\n\t\t\tapplied = applied.replace(match, '')\n\t\n\treturn applied", "def add_text_template(self, template, mime_subtype='plain'):\n self.template_cache[mime_subtype] = self.env.get_template(template)", "def test_replace_template(self):\n template_sample = (r'a {{templatename '\n r' | accessdate={{Fecha|1993}} '\n r' |atitle=The [[real title]] }}')\n self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',\n ['template'], site=self.site),\n 'X' + template_sample[1:])\n\n template_sample = (r'a {{templatename '\n r' | 1={{a}}2{{a}} '\n r' | 2={{a}}1{{a}} }}')\n self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',\n ['template'], site=self.site),\n 'X' + template_sample[1:])\n\n template_sample = (r'a {{templatename '\n r' | 1={{{a}}}2{{{a}}} '\n r' | 2={{{a}}}1{{{a}}} }}')\n self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',\n ['template'], site=self.site),\n 'X' + template_sample[1:])\n\n # sf.net bug 1575: unclosed template\n template_sample = template_sample[:-2]\n self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',\n ['template'], site=self.site),\n 'X' + template_sample[1:])", "def __set_template(self):\n cmd.load(self.template, object=self.templateN)\n space = {'residues': list()}\n cmd.iterate(self.templateN, 'residues.append(resi)', space=space)\n residues = sorted([int(r) for r in set(space['residues'])])\n selection = 'resi {}-{} and {} and name CA+C+O+N'\n cmd.select(self.template_start_strand,\n selection.format(residues[0], residues[2], self.templateN))\n cmd.select(self.template_end_strand,\n selection.format(residues[-3], residues[-1],\n self.templateN))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify the acceleration region of a domain name on VOD. 1. You can modify acceleration regions of only domain names whose status is `Online`.
def ModifyVodDomainAccelerateConfig(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyVodDomainAccelerateConfig", params, headers=headers) response = json.loads(body) model = models.ModifyVodDomainAccelerateConfigResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def update_region_id(self, region_id, **kwargs):\n return self.netbox_con.patch('/dcim/regions/', region_id, **kwargs)", "async def update(self, ctx, region: str):\n try:\n riotapi.set_region(region)\n except ValueError:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"{0} is not a valid region!\".format(region),\n colour=0xCA0147)\n utilities.footer(ctx, embed)\n await ctx.send(\"\", embed=embed)\n return\n db = database.Database('guilds.db')\n try:\n db.find_entry(ctx.guild.id)\n db.update_entry(ctx.guild.id, region)\n db.close_connection()\n embed = discord.Embed(\n title='Success!',\n description=\"Set {0} as {1}'s default region!\".format(region, ctx.guild.name),\n colour=0x1AFFA7)\n utilities.footer(ctx, embed)\n await ctx.send(\"\", embed=embed)\n except TypeError:\n db.close_connection()\n embed = discord.Embed(\n title=\"Error!\",\n description=\"A default region for this server has not been set!\",\n colour=0xCA0147)\n utilities.footer(ctx, embed)\n await ctx.send(\"\", embed=embed)", "def update_protection_zone(new_zone):\n rospy.wait_for_service('UpdateProtectionZone')\n\n try:\n function_UpdateProtectionZone = rospy.ServiceProxy('UpdateProtectionZone', UpdateProtectionZone)\n function_UpdateProtectionZone(new_zone)\n except rospy.ServiceException as e:\n print \"Service call failed : %s\" % e", "def set_region(self, region):\n self._region_name = region", "def update_from_master(self, domain_name):\n domain = Domain.query.filter(Domain.name == domain_name).first()\n if domain:\n headers = {}\n headers['X-API-Key'] = PDNS_API_KEY\n try:\n jdata = utils.fetch_json(urlparse.urljoin(PDNS_STATS_URL, API_EXTENDED_URL + '/servers/localhost/zones/%s/axfr-retrieve' % domain), headers=headers, method='PUT')\n return {'status': 'ok', 'msg': 'Update from Master successfully'}\n except:\n return {'status': 'error', 'msg': 'There was something wrong, please contact administrator'}\n else:\n return {'status': 'error', 'msg': 'This domain doesnot exist'}", "def update_cloud_account_name(self, body: AwsCloudAccountUpdateName) -> Dict:\n\t\troute = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{AwsCloudAccountConsts.NAME.value}'\n\t\treturn self._put(route=route, body=body)", "def endRegion( self, name ):\n assert isinstance( name, str )\n\n assert isinstance( self._str, str )\n assert isinstance( self._nextName, int )\n assert isinstance( self._map, dict )\n\n theCurrentPos = len(self._str)\n\n self._map[ name ].regionIndecies( )[ 1 ] = theCurrentPos", "def test_zone_update_function():\n response = zone.update(domain_name='example.com')\n assert response.success\n\n payload = response.payload\n assert payload['url'] == 'https://api.cloudns.net/dns/update-zone.json'\n assert payload['params']['domain-name'] == 'example.com'", "def replaceResourceQuota(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'body']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method replaceResourceQuota\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/namespaces/{namespaces}/resourcequotas/{name}/status'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'PUT'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = '*/*,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n if ('body' in params):\n bodyParam = params['body']\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)", "def set_draw_airspaces(self, value, airspaces=[], range_km=None, reload=True):\n if (reload or not value or len(airspaces) == 0) and self.airspaces:\n if OPENAIP_NOTICE in self.crs_text.get_text():\n self.crs_text.set_text(self.crs_text.get_text().replace(f\"{OPENAIP_NOTICE}\\n\", \"\"))\n self.airspaces.remove()\n self.airspacetext.remove()\n self.airspaces = None\n self.airspacetext = None\n self.ax.figure.canvas.mpl_disconnect(self.airspace_event)\n if value and len(airspaces) > 0:\n country_codes = [airspace.split(\" \")[-1] for airspace in airspaces]\n self.draw_airspaces(country_codes, range_km)", "def update_name(self):\n try:\n rc, result = self.request(\"storage-systems/%s/configuration\" % self.ssid, method=\"POST\", data={\"name\": self.name})\n except Exception as err:\n self.module.fail_json(msg=\"Failed to set the storage array name! Array Id [%s]. Error [%s].\" % (self.ssid, to_native(err)))", "def update_region_config(self, body: CloudAccountRegionConfigurationViewModel) -> Dict:\n\t\troute = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{AwsCloudAccountConsts.REGION_CONFIG.value}'\n\t\treturn self._put(route=route, body=body)", "def set_dev_region(self, dev_id, region):\n if any(dev_id == d['id'] for d in self._remove_devs):\n raise ValueError(\"Can not set region of dev_id %s because it \"\n \"is marked for removal\" % (dev_id,))\n self.devs[dev_id]['region'] = region\n self.devs_changed = True\n self.version += 1", "def set_acceleration(self, function):\n self.acceleration_function = function", "def create_region(self, name, slug, **kwargs):\n required_fields = {\"name\": name, \"slug\": slug}\n return self.netbox_con.post('/dcim/regions/', required_fields, **kwargs)", "def setViewportRegion(self, reg: 'SbViewportRegion') -> \"void\":\n return _coin.SoDragger_setViewportRegion(self, reg)", "def edit_region(self, index):\n self.regions[index].edit_lines()", "def update_zone(self, zone, domain, type='master', ttl=None, extra=None):\r\n raise NotImplementedError(\r\n 'update_zone not implemented for this driver')", "def set_domain (self, domain, status):\n if domain not in self.__statuses:\n raise RuntimeError(\"Updated domain: %s is not registered!\" % domain)\n self.__statuses[domain] = status\n if status in (self.OK, self.FAILED, self.RESET):\n stats.add_measurement_end_entry(type=stats.TYPE_DEPLOY_DOMAIN,\n info=\"%s-->%s\" % (domain, status))\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to modify a custom watermarking template. The watermark type cannot be modified.
def ModifyWatermarkTemplate(self, request): try: params = request._serialize() headers = request.headers body = self.call("ModifyWatermarkTemplate", params, headers=headers) response = json.loads(body) model = models.ModifyWatermarkTemplateResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def custom_template(self, custom_template):\n\n self._custom_template = custom_template", "def WaterMark(waterMark, markRadius=1, markPosition=[250, 10], markSize=1.0):\n txt=a3DText()\n txt.Text = waterMark\n rep=Show();rep.Visibility=0\n RenameSource('WaterMark',txt)\n Transform2=Transform()\n Transform2.Transform=\"Transform\"\n Transform2.Transform.Scale =[2.0*markSize,2.0*markSize, 1.0*markSize]\n Transform2.Transform.Translate =[markPosition[0], markPosition[1], markRadius]\n rep=Show(Transform2);rep.Visibility=0\n Mark2Sphere = Cart2Spherical(0,Transform2)\n Text_disp=Show()\n Text_disp.DiffuseColor = [0.0, 0.0, 0.0]\n Text_disp.Opacity=0.1", "def tweak_template_permission(self, tweak_template_permission):\n\n self._tweak_template_permission = tweak_template_permission", "def output_watermark_paras(self, output_watermark_paras):\n self._output_watermark_paras = output_watermark_paras", "def set_watermark(ctx, comp, data):\n\tfrom PIL import Image as ImagePIL\n\n\tcache_dir = \".cache\" # store generated images here for reuse\n\tfile_ext = \".png\" # set icon file extension\n\n\tcard = data[\"card\"]\n\ttheme_dir = data[\"dir\"]\n\thas_race = card.race != Race.INVALID\n\tis_premium = data[\"premium\"]\n\tcard_type = data[\"cardtype\"]\n\trace_offset = comp.custom[\"raceOffset\"] # in respect to y coordinate only\n\tset_name = card.card_set.name.lower()\n\n\tif not os.path.isdir(cache_dir):\n\t\tos.makedir(cache_dir)\n\n\t# set the name for the generated image\n\tname = [card_type]\n\tif is_premium:\n\t\tname.append(\"_premium\")\n\tif has_race:\n\t\tname.append(\"_race\")\n\tname.append(\"_\")\n\tname.append(set_name)\n\timage_name = \"\".join(name)\n\timage_path = os.path.join(cache_dir, \"{}{}\".format(image_name, file_ext))\n\n\t# load the data\n\tbase_image = Image(comp.custom[\"image\"])\n\tset_region = Region(\n\t\tcomp.custom[\"region\"][\"x\"],\n\t\tcomp.custom[\"region\"][\"y\"],\n\t\tcomp.custom[\"region\"][\"width\"],\n\t\tcomp.custom[\"region\"][\"height\"])\n\n\t# no icon for core set, but need description plate\n\tif card.card_set == CardSet.CORE:\n\t\tdraw_png_at(\n\t\t\tctx, os.path.join(theme_dir, base_image.assets[\"base\"]),\n\t\t\tbase_image.x, base_image.y, base_image.width, base_image.height)\n\t\treturn\n\n\t# if there is a cached version of the image use it\n\tif os.path.isfile(image_path):\n\t\tdraw_png_at(\n\t\t\tctx, image_path, base_image.x, base_image.y, base_image.width,\n\t\t\tbase_image.height)\n\t\treturn\n\n\t# check the set icon exists\n\tset_icon_path = os.path.join(theme_dir,\n\t\tcomp.custom[\"setIcons\"], \"{}{}\".format(set_name, file_ext))\n\tif not os.path.isfile(set_icon_path):\n\t\tprint(\"Warning: set icon missing for '{}'\".format(set_name))\n\t\treturn\n\n\t# calc set offset within base\n\toffset = {\n\t\t\"x\": set_region.x - base_image.x,\n\t\t\"y\": set_region.y - base_image.y\n\t}\n\t# if a minion has a race, need offset watermark\n\tif has_race:\n\t\toffset[\"y\"] += race_offset\n\n\t# resize the set icon to the correct size\n\tset_org = ImagePIL.open(set_icon_path)\n\tset_resize = set_org.resize((set_region.width, set_region.height), ImagePIL.BILINEAR)\n\tset_img = ImagePIL.new(\"RGBA\",\n\t\t(base_image.width, base_image.height),\n\t\t(0, 0, 0, 0))\n\tset_img.paste(set_resize, (offset[\"x\"], offset[\"y\"]))\n\tset_org.close()\n\tset_resize.close()\n\n\t# open the base image\n\tdescp_img = ImagePIL.open(os.path.join(theme_dir, base_image.assets[\"base\"]))\n\n\t# get the blending attributes\n\tintensity = comp.custom[\"blendIntensity\"]\n\ttint = comp.custom[\"tint\"][\"premium\" if is_premium else card_type]\n\ttint = Vector4(tint[\"r\"], tint[\"g\"], tint[\"b\"], tint[\"a\"])\n\tr0_data = set_img.getdata()\n\tr1_data = descp_img.getdata()\n\n\t# check nothing strange happened\n\tassert len(r0_data) == descp_img.width * descp_img.height, \"data size mismatch\"\n\n\tout_data = []\n\t# run the blending algorithm on each pixel pair\n\tfor i in range(len(r0_data)):\n\t\tr0 = rgb_from_bytes(r0_data[i])\n\t\tr1 = rgb_from_bytes(r1_data[i])\n\t\t# speed up by ignoring fully transparent pixels on the set icon\n\t\tif r0.a == 0:\n\t\t\tout_data.append(rgb_to_bytes(r1))\n\t\t\tcontinue\n\t\tr0 = r0 * tint * intensity\n\t\tr2 = r1 * r0 - r1\n\t\tr0 = r2 * r0.a + r1\n\t\tr0.a = 1\n\t\tout_data.append(rgb_to_bytes(r0))\n\n\tout = ImagePIL.new(\"RGBA\", (descp_img.width, descp_img.height))\n\tout.putdata(out_data)\n\tout.save(image_path)\n\n\tdraw_png_at(\n\t\tctx, image_path, base_image.x, base_image.y, base_image.width,\n\t\tbase_image.height)\n\n\tout.close()\n\tdescp_img.close()\n\tset_img.close()", "def prepareWatermark(conn, commandArgs, sizeX, sizeY):\n\n wm_orig_file = commandArgs[\"Watermark\"]\n # get Original File as Image\n wm_file = conn.getObject(\"OriginalFile\", wm_orig_file.id.val)\n wm_data = \"\".join(wm_file.getFileInChunks())\n i = StringIO(wm_data)\n wm = Image.open(i)\n wm_w, wm_h = wm.size\n # only resize watermark if too big\n if wm_w > sizeX or wm_h > sizeY:\n wm = reshape_to_fit(wm, sizeX, sizeY)\n # wm = wm.convert(\"L\")\n return wm", "def set_template(self, name: str, template: str) -> None:\n self.custom_templates[name] = template", "def get_watermark():\n\n oErr = ErrHandle()\n watermark_template = \"seeker/passim_watermark.html\"\n watermark = \"\"\n try:\n # create a watermark with the right datestamp\n context_wm = dict(datestamp=get_crpp_date(get_current_datetime(), True))\n watermark = render_to_string(watermark_template, context_wm)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"get_watermark\")\n # Return the result\n return watermark", "def UpdateTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def template_flag(self, template_flag):\n\n self._template_flag = template_flag", "def _update_template(self, content):\r\n t, created = Template.objects.get_or_create(resource=self.resource)\r\n t.content = content\r\n t.save()", "def watermark(image, destination, watermark, scale = 0.1, margin = 0.1, opacity = 1):\n image_name = os.path.basename(image)\n destination_path = os.path.join(destination, image_name)\n im = Image.open(image)\n mark = Image.open(watermark)\n if opacity < 1:\n mark = reduce_opacity(mark, opacity)\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n # create a transparent layer the size of the image and draw the\n # watermark in that layer.\n layer = Image.new('RGBA', im.size, (0,0,0,0))\n\n # Preserve the aspect ratio\n ratio = min(float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1])\n logo_w = int(mark.size[0] * ratio * scale)\n logo_h = int(mark.size[1] * ratio * scale)\n mark = mark.resize((logo_w, logo_h))\n logo_position = (int((im.size[0] * (1 - margin) - logo_w)), int((im.size[1] * (1 - margin) - logo_h)))\n layer.paste(mark, logo_position)\n\n # composite the watermark with the layer\n Image.composite(layer, im, layer).save(destination_path)", "def edit_template(self, data: dict) -> None:\n self.add_operation({\n 'op': 'editTemplate',\n 'data': data,\n })", "def UpdateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def register_blackbox_template(self, name, lib_template = None, techmap_template = None, parameters = None,\n premap_commands = None):\n if name in self._blackbox_entries:\n raise PRGAInternalError(\"Blackbox template entry '{}' already registered\".format(name))\n return self._blackbox_entries.setdefault(name, YosysTemplateEntry(lib_template, techmap_template, parameters,\n premap_commands))", "def addTemplate(\n self, sources, class_id, object_mask\n ) -> Tuple[retval, bounding_box]:\n ...", "def insert_watermark(imagename, wmname, newimagename):\n if not os.path.exists(imagename):\n return (\"Error\", \"File {0} not found\".format(imagename))\n\n if not os.path.exists(wmname):\n return (\"Error\", \"File {0} not found\".format(wmname))\n\n wm = _watermark2bin(wmname)\n wm_len = len(wm)\n\n image = Image.open(imagename)\n pixels = image.load()\n x, y = image.size\n availible_size = x * y / 8\n\n if wm_len > availible_size:\n return (\"Error\", \"It's needed {0} bits but only {1} present.\".format(\n wm_len, availible_size))\n\n indexes = product(range(x), range(y))\n mse = 0.0\n snr = 0.0\n\n for m in wm:\n i = next(indexes)\n blue = pixels[i][2]\n lbit = bin(blue)[-1]\n\n if \"0\" == m:\n if \"1\" == lbit:\n blue -= 1\n elif \"1\" == m:\n if \"0\" == lbit:\n blue += 1\n\n # metrics here\n _mse = (int(lbit) - int(bin(blue)[-1])) ** 2\n mse += _mse\n try:\n snr += int(lbit) ** 2 / _mse\n except ZeroDivisionError:\n snr += int(lbit) ** 2\n\n pixels[i] = (pixels[i][0], pixels[i][1], blue)\n\n image.save(newimagename)\n\n return (round(mse / availible_size, 4), round(10 * log10(snr), 4))", "def set_low_watermark(self, low_watermark):\n\n self.send_qwctl_command('set low_watermark %d' % low_watermark,\n ['low_watermark must be between'])", "def set_source_template(template):", "def setTemplateAttribute(self, value, name):\n try:\n self.setBasicAttribute(value, name)\n except AttributeError:\n raise NotImplementedError(\"cannot set '%s' attribute\"%name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to remove watermarks from a video.
def RemoveWatermark(self, request): try: params = request._serialize() headers = request.headers body = self.call("RemoveWatermark", params, headers=headers) response = json.loads(body) model = models.RemoveWatermarkResponse() model._deserialize(response["Response"]) return model except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(type(e).__name__, str(e))
[ "def remove_marking(markable, marking):\n if not is_marked(markable):\n return\n\n markable.__datamarkings__.remove(marking)", "def removeVerticalMarker(self, id):\n del self.verticalmarkers[id]", "def remove_offsets(self, robust=None):\n self.remove_drifts(target_frame_resolution=self.size, robust=robust)", "def RemoveOverlayTags(self):\n\n def RemoveCallback(dataset, data_element):\n \"\"\"Internal method to use as callback to walk() method.\"\"\"\n if (data_element.tag.group & 0xFF00 == 0x6000):\n # if (data_element.tag.group & 0x00FF == 0x0020): #for test\n # can't del self[tag] - won't be right dataset on recursion\n del dataset[data_element.tag]\n\n self.modified_dataset.walk(RemoveCallback)", "def remove_markings(markable, markings):\n for marking in markings:\n remove_marking(markable, marking)", "def remove_rain(settings, screen, raindrops):\n for raindrop in raindrops:\n if raindrop.rect.y >= settings.screen_height:\n raindrops.remove(raindrop)", "def removePointMarker(self, id):\n del self.pointmarkers[id]", "def _erase_marks(self, view):\r\n\r\n erase_lint_marks(view)", "def delete_ext_clip_ply(self, ext_ply):\n self._delete_ext_clip_ply(ext_ply)", "def remove_mask(context):\n\n anim_offset = context.scene.animaide.anim_offset\n blends_action = bpy.data.actions.get('animaide')\n blends_curves = getattr(blends_action, 'fcurves', None)\n\n anim_offset.mask_in_use = False\n if blends_curves is not None and len(blends_curves) > 0:\n blends_curves.remove(blends_curves[0])\n # reset_timeline_mask(context)\n\n return", "def remove_faces(self):\n exif_json.save(self.path, None)", "def clear_annotations(self):\n self.annotations.clear()\n self.annotated_img_ids.clear()", "def removeLineMarker(self, id):\n del self.linemarkers[id]", "def _draw_watermarks(self):\n for wm in self.watermarks:\n wm.draw(self.figure)", "def remove_blacklist(self,blacklist,eos):\n for t in self.instances:\n removed_features = []\n for i,feature in enumerate(t.features[0]):\n parts = feature.split(\"_\")\n for term in blacklist:\n match = False\n for p in parts:\n if re.search(term,p,re.IGNORECASE):\n match = True\n if match:\n removed_features.append(i)\n break\n if eos:\n offset = 0\n for index in removed_features:\n index -= offset\n if re.search(\"_<s>\",t.features[0][index]):\n pre_last_feature = t.features[0][index-1]\n parts = pre_last_feature.split(\"_\")\n if len(parts) == 2:\n new_feature = parts[-1]\n else: \n new_feature = \"_\".join(parts[-2:])\n new_feature = new_feature + \"_<s>\"\n t.features[0][index] = new_feature\n else:\n del(t.features[0][index])\n offset += 1\n else:\n for offset,index in enumerate(removed_features):\n index -= offset\n del(t.features[0][index])", "def annotate_video(self):\n self.frame_count = 0\n video = VideoFileClip(self.video_file)\n annotated_video = video.fl_image(self.detect)\n annotated_video.write_videofile(self.output_file, audio=False)", "def SoMarkerSet_removeMarker(idx: 'int') -> \"SbBool\":\n return _coin.SoMarkerSet_removeMarker(idx)", "def delete_thumbnails(self, fieldname='image'):\n an = IAnnotations(self.context)\n an_key = \"%s.%s\" % (self.annotation_prefix, fieldname)\n an[an_key] = None", "def clear_markings(markable):\n if is_marked(markable):\n markable.__datamarkings__ = set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ctor for an SafeEval instance with optional mapping of function names to callables
def __init__(self, allowedCallables: typing.Union[None, typing.Dict[str, typing.Any]] = None): if allowedCallables is not None: self.allowedCallables = allowedCallables else: self.allowedCallables = dict() self.nodes: Dict[ast.AST, Callable[[ast.AST, Dict[str, Any]], Any]] = { ast.Call: self.callNode, ast.Compare: self.compareNode, ast.Name: lambda node, names: names[node.id], ast.Constant: lambda node, _: node.n, ast.Num: lambda node, _: node.n, ast.Str: lambda node, _: node.s, ast.JoinedStr: lambda node, names: [self.execute(x, names) for x in node.values], ast.Subscript: lambda node, names: self.execute(node.value, names)[ self.execute(node.slice, names)], ast.Index: lambda node, names: self.execute(node.value, names), ast.BoolOp: lambda node, names: (all if isinstance(node.op, ast.And) else any)( [self.execute(x, names) for x in node.values]), ast.UnaryOp: lambda node, names: self.unaryOpMap[type(node.op)]( self.execute(node.operand, names)), ast.BinOp: lambda node, names: self.dualOpMap[type(node.op)]( self.execute(node.left, names), self.execute(node.right, names)), ast.IfExp: lambda node, names: self.execute(node.body, names) if self.execute(node.test, names) else \ self.execute(node.orelse, names), } self.unaryOpMap: Dict[ast.AST, Callable[[Any], Any]] = { ast.Not: lambda x: not x, ast.USub: lambda x: -x, ast.UAdd: lambda x: +x, } self.dualOpMap: Dict[ast.AST, Callable[[Any, Any], Any]] = { ast.Eq: lambda x, y: x == y, ast.NotEq: lambda x, y: x != y, ast.Gt: lambda x, y: x > y, ast.GtE: lambda x, y: x >= y, ast.Lt: lambda x, y: x < y, ast.LtE: lambda x, y: x <= y, ast.In: lambda x, y: x in y, ast.NotIn: lambda x, y: x not in y, ast.Sub: lambda x, y: x - y, ast.Add: lambda x, y: x + y, ast.Mult: lambda x, y: x * y, ast.Div: lambda x, y: x / y, }
[ "def __init__(self, func: Callable[[Any], None]):\n\n if not callable(func):\n raise ValueError('func must be callable')\n\n self._callable = func", "def __init__(self, columns, func):\n # Ensure that columns is a list.\n self.columns = as_list(columns)\n # Ensure that the function is a value function.\n if not isinstance(func, ValueFunction):\n # Instantiate the function if a class object is given\n if isinstance(func, type):\n func = func()\n func = CallableWrapper(func=func)\n self.func = func", "def __init__(self, expr, f, sub_evaluators=None):\n super(TrivialEvaluator, self).__init__(expr, sub_evaluators=sub_evaluators)\n if callable(f):\n f = (f,)\n self._f = f[0]\n self._derivs = f", "def __init__(self, expr, factory, sub_evaluators=None):\n super(EvaluatorFactory, self).__init__(expr, sub_evaluators=sub_evaluators)\n if not callable(factory):\n raise TypeError(\"`factory` argument must be callable.\")\n ## The given factory function.\n self._factory = factory\n ## Cached callables created by the factory.\n self._funcs = []\n self.function()", "def __init__(self, compute_fun):\n assert callable(compute_fun)\n self.__comput_fun = compute_fun\n self.__values = {}", "def __init__(self, func, *args, **kwargs):\n\n super(FunctionCondition, self).__init__()\n self._func = func\n self._args = args\n self._kwargs = kwargs", "def set_eval_functions(self):\n def undefined_catcher(func, x, y):\n try:\n return func(x, y)\n except Exception:\n return np.nan\n\n # Functions that take probs as input\n self.summary_metrics = {\n 'AUPRC': lambda x, y: undefined_catcher(sk_metrics.average_precision_score, x, y),\n 'AUROC': lambda x, y: undefined_catcher(sk_metrics.roc_auc_score, x, y),\n 'log_loss': lambda x, y: undefined_catcher(sk_metrics.log_loss, x, y),\n }\n\n # Functions that take binary values as input\n self.point_metrics = {\n 'accuracy': lambda x, y: undefined_catcher(sk_metrics.accuracy_score, x, y),\n 'precision': lambda x, y: undefined_catcher(sk_metrics.precision_score, x, y),\n 'recall': lambda x, y: undefined_catcher(sk_metrics.recall_score, x, y),\n }\n\n self.curve_metrics = {\n 'PRC': lambda x, y: undefined_catcher(sk_metrics.precision_recall_curve, x, y),\n 'ROC': lambda x, y: undefined_catcher(sk_metrics.roc_curve, x, y),\n }", "def safe_to_eval(func):\n _type_map[_get_prototype(func)] = (\n lambda f, *args, **kwargs: f(*args, **kwargs))\n return func", "def __init__(self, dispatcher, func: callable, config:dict, specs:dict):\n self._func = func\n self._dispatcher = dispatcher\n self.setup(config)\n for param_name, spec in specs.items():\n self._add_from_decorator(param_name, spec)\n self._dispatcher.validate()", "def __init__(self, callable_object, arguments = (), keywords = None):\n\n if not is_callable (callable_object):\n raise TypeError (\"'callable_object' must be callable\")\n\n # This raises `TypeError' if `arguments' or `keywords' type is inappropriate.\n arguments = tuple (arguments)\n if keywords is None:\n keywords = frozendict.EMPTY\n # Note: not isinstance, subclasses might become modifiable again.\n elif type (keywords) is not frozendict:\n keywords = frozendict (keywords)\n\n super (Binding, self).__init__()\n\n if isinstance (callable_object, BindingCompatibleTypes):\n if _PY3K:\n self._object = callable_object.__self__\n self._function = callable_object.__func__\n self._class = type (self._object)\n else:\n self._object = callable_object.im_self\n self._function = callable_object.im_func\n self._class = callable_object.im_class\n else:\n self._object = None\n self._function = callable_object\n self._class = None\n\n self._arguments = arguments\n self._keywords = keywords", "def __init__(self, predicate: EvalFunction):\n self.predicate = predicate", "def safeEval(self, expr: str, names: Dict[str, Any]) -> Any:\n\t\treturn self.execute(self.compile(expr), names)", "def evaluator(*, requires):\n def wrapper(function):\n return EvaluatorMeta(function.__name__, (Evaluator,), {'evaluate': staticmethod(function), 'requires': requires})\n return wrapper", "def __init__(self, stack, fn_name, args):\r\n super(Function, self).__init__()\r\n self.stack = stack\r\n self.fn_name = fn_name\r\n self.args = args", "def __init__(self, func):\n self.func = func", "def __init__(self, *args, **kwargs):\n assert not kwargs.get('key_name'), (\n 'Setting key_name manually not supported')\n evaluator = kwargs.get('evaluator')\n submission_key = kwargs.get('submission_key')\n assert evaluator, 'Missing required evaluator property'\n assert submission_key, 'Missing required submission_key property'\n kwargs['key_name'] = self.key_name(submission_key, evaluator)\n super(ManualEvaluationStep, self).__init__(*args, **kwargs)", "def __init__(self, function, cache=None):\n if cache:\n self.cache = cache\n else:\n self.cache = LRUCacheDict()\n self.function = function\n self.__name__ = self.function.__name__", "def __new__(cls, function, parameter_names, deepness, parent):\n command, parameter_converters = get_application_command_parameter_auto_completer_converters(function)\n \n name_pairs = frozenset((name, raw_name_to_display(name)) for name in set(parameter_names))\n \n if parent is None:\n parent_reference = None\n else:\n parent_reference = parent._get_self_reference()\n \n self = object.__new__(cls)\n \n self._command_function = command\n self._parameter_converters = parameter_converters\n self.name_pairs = name_pairs\n self.deepness = deepness\n self._parent_reference = parent_reference\n self._exception_handlers = None\n \n return self", "def __call__(self, *params):\n return Function(name=self.name, params=params)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Safely evaluate an expression. If you want to evaluate the expression multiple times with different variables use compile to generate the AST once and call execute for each set of variables.
def safeEval(self, expr: str, names: Dict[str, Any]) -> Any: return self.execute(self.compile(expr), names)
[ "def evaluate(expr, locals):", "def safe_eval_custom(expr, globals_dict=None, locals_dict=None, mode=\"eval\", nocopy=False, locals_builtins=False):\n if type(expr) is CodeType:\n raise TypeError(\"safe_eval does not allow direct evaluation of code objects.\")\n\n # prevent altering the globals/locals from within the sandbox\n # by taking a copy.\n if not nocopy:\n # isinstance() does not work below, we want *exactly* the dict class\n if (globals_dict is not None and type(globals_dict) is not dict) \\\n or (locals_dict is not None and type(locals_dict) is not dict):\n _logger.warning(\n \"Looks like you are trying to pass a dynamic environment, \"\n \"you should probably pass nocopy=True to safe_eval().\")\n if globals_dict is not None:\n globals_dict = dict(globals_dict)\n if locals_dict is not None:\n locals_dict = dict(locals_dict)\n\n if globals_dict is None:\n globals_dict = {}\n\n globals_dict['__builtins__'] = _BUILTINS\n if locals_builtins:\n if locals_dict is None:\n locals_dict = {}\n locals_dict.update(_BUILTINS)\n c = test_expr(expr, _SAFE_OPCODES, mode=mode)\n try:\n return unsafe_eval(c, globals_dict, locals_dict)\n except odoo.exceptions.except_orm:\n raise\n except odoo.exceptions.Warning:\n raise\n except odoo.exceptions.RedirectWarning:\n raise\n except odoo.exceptions.AccessDenied:\n raise\n except odoo.exceptions.AccessError:\n raise\n except werkzeug.exceptions.HTTPException:\n raise\n except odoo.http.AuthenticationError:\n raise\n except OperationalError:\n # Do not hide PostgreSQL low-level exceptions, to let the auto-replay\n # of serialized transactions work its magic\n raise\n except odoo.exceptions.MissingError:\n raise\n except NameError:\n raise\n except Exception as e:\n exc_info = sys.exc_info()\n pycompat.reraise(ValueError, ValueError('%s: \"%s\" while evaluating\\n%r' % (ustr(type(e)), ustr(e), expr)), exc_info[2])", "def do_eval(expr, context):\n return eval(expr, context.vals)", "def evaluate(expr,**bindings):\n expr = expr.replace(\" \", \"\")\n paren, lst, lst_op = 0, -1, None\n #finds the last operator to be evaluated.\n for i in range(len(expr)):\n if expr[i] == \"(\":\n paren = paren + 1\n elif expr[i] == ')':\n paren = paren - 1\n else:\n s = op_by_symbol(expr[i])\n if s is None or paren != 0:\n continue\n elif lst == -1:\n lst = i\n lst_op = s\n elif s < lst_op:\n lst = i\n lst_op = s\n\n if lst_op is None:\n #if there were no operators found, make sure the expr was not wrapped in ()\n if expr[0] == '(' and expr[len(expr) - 1] == \")\":\n return evaluate(expr[1: len(expr) - 1], **bindings)\n else:#if not in (), this must be a variable\n return bindings[expr]\n elif lst_op == Operators.NOT:#otherwise, evaluate the operator.\n return lst_op(evaluate(expr[lst + 1:], **bindings))\n else:\n return lst_op([evaluate(expr[:lst], **bindings),evaluate(expr[lst + 1:], **bindings)])", "def eval_variables(exprs, df, locals_d=None):\n if locals_d is None:\n locals_d = {}\n locals_d.update(locals())\n\n def to_series(x):\n if np.isscalar(x):\n return pd.Series([x] * len(df), index=df.index)\n return x\n\n l = []\n # need to be able to identify which variables causes an error, which keeps\n # this from being expressed more parsimoniously\n for e in exprs:\n try:\n l.append((e, to_series(eval(e[1:], globals(), locals_d))\n if e.startswith('@') else df.eval(e)))\n except Exception as err:\n logger.exception(\"Variable evaluation failed for: %s\" % str(e))\n raise err\n\n return pd.DataFrame.from_items(l)", "def safe_eval_ast(cg, node, name, lineno, local_names):\n if has_list_comp(node):\n expr_cg = CodeGenerator()\n expr_cg.filename = cg.filename\n expr_cg.name = name\n expr_cg.firstlineno = lineno\n expr_cg.set_lineno(lineno)\n expr_cg.insert_python_expr(node, trim=False)\n call_args = expr_cg.rewrite_to_fast_locals(local_names)\n expr_code = expr_cg.to_code()\n cg.load_const(expr_code)\n cg.make_function()\n for arg in call_args:\n if arg in local_names:\n cg.load_fast(arg)\n else:\n cg.load_global(arg)\n cg.call_function(len(call_args))\n else:\n expr_cg = CodeGenerator()\n expr_cg.insert_python_expr(node)\n expr_cg.rewrite_to_fast_locals(local_names)\n cg.code_ops.extend(expr_cg.code_ops)", "def eval(self, expr):\n self.log_screen()\n self.logger.info(\"eval «%s»\", expr)\n return self.nvim.eval(expr)", "def evaluate(expr: Union[sympy.Basic, int, float],\n symbols: Dict[Union[symbol, str], Union[int, float]]) -> \\\n Union[int, float, numpy.number]:\n if isinstance(expr, list):\n return [evaluate(e, symbols) for e in expr]\n if isinstance(expr, tuple):\n return tuple(evaluate(e, symbols) for e in expr)\n if isinstance(expr, SymExpr):\n return evaluate(expr.expr, symbols)\n if issymbolic(expr, set(map(str, symbols.keys()))):\n raise TypeError(f'Symbolic expression \"{expr}\" cannot be evaluated to a constant')\n if isinstance(expr, (int, float, numpy.number)):\n return expr\n\n # Evaluate all symbols\n syms = {(sname if isinstance(sname, sympy.Symbol) else symbol(sname)):\n sval.get() if isinstance(sval, symbol) else sval\n for sname, sval in symbols.items()}\n\n # Filter out `None` values, callables, and iterables but not strings (for SymPy 1.12)\n syms = {\n k: v\n for k, v in syms.items() if not (v is None or isinstance(v, (Callable, Iterable))) or isinstance(v, str)\n }\n # Convert strings to SymPy symbols (for SymPy 1.12)\n syms = {k: sympy.Symbol(v) if isinstance(v, str) else v for k, v in syms.items()}\n\n return expr.subs(syms)", "def _evalExpression(self):\n value = self.expressionVar.get().strip()\n if value:\n for point in self.data:\n if point.eval(value):\n point.setState(Point.DISCARDED)", "def evaluate(\n expression: str, runtime_context: Optional[Dict[str, Any]] = None\n ) -> EvaluatedValue:\n\n value = eval(expression, runtime_context)\n value_str = repr(value)\n type_str = type(value).__name__\n\n to_be_expanded = list()\n\n if ExpressionEvaluator._has_attributes(value):\n to_be_expanded += ExpressionEvaluator._extract_attributes(value)\n\n if ExpressionEvaluator._is_iterable(value):\n if ExpressionEvaluator._is_generator(value):\n to_be_expanded += ExpressionEvaluator._extract_generator_locals(value)\n elif ExpressionEvaluator._is_iterator(value):\n pass\n else:\n to_be_expanded += ExpressionEvaluator._extract_container_items(value)\n\n return EvaluatedValue(\n value=TypedValue(\n expression=expression,\n value_ref=expression,\n value_str=value_str,\n value_type=type_str,\n expandable=ExpressionEvaluator._is_expandable(value),\n ),\n attributes=to_be_expanded,\n )", "def evaluate(tree, env=None):\n #base case\n if tree == []:\n raise EvaluationError\n #default env\n if env == None:\n env = {}\n #copy global env\n for op in carlae_builtins:\n if op not in env:\n env[op] = carlae_builtins[op]\n #primitives\n if type(tree) == float or type(tree) == int:\n return tree\n #evaluate-able expression\n elif type(tree) == list:\n #define -> evaluate and store in env\n if tree[0] == 'define':\n if type(tree[1]) == list:\n func = evaluate(['define', tree[1][0], ['lambda', tree[1][1:], tree[2]]], env)\n return func\n env[tree[1]] = evaluate(tree[2], env)\n return env[tree[1]]\n #lambda -> create function and return function object\n elif tree[0] == 'lambda':\n def fn(args): \n #create function environment, inherits superenvironment\n fn_env = {}\n for op in env:\n fn_env[op] = env[op]\n #define variables as parameters\n for ind, param in enumerate(tree[1]):\n fn_env[param] = args[ind]\n return evaluate(tree[2], fn_env)\n #fn_env[fn] = evaluate(fn, args?) ??????\n return fn\n #if -> ternary statement\n elif tree[0] == 'if':\n if evaluate(tree[1], env) == '#t':\n return evaluate(tree[2], env)\n return evaluate(tree[3], env)\n #separate definitions for on-the-fly evaluation for short-circuiting purposes\n #cannot be built-ins because function calling preemptively evaluates all parameters\n elif tree[0] == 'and':\n return bools[custom_reduc(lambda x, y: evaluate(x, env) == '#t', tree[1:]) and evaluate(tree[1:][-1], env) == '#t']\n elif tree[0] == 'or':\n return bools[not (custom_reduc(lambda x, y: evaluate(x, env) == '#f', tree[1:])) or evaluate(tree[1:][-1], env) == '#t']\n #call function\n elif type(evaluate(tree[0], env)) == type(lambda x: x) or type(evaluate(tree[0], env)) == type(sum):\n func = evaluate(tree[0], env)\n params = list(map(lambda x: evaluate(x, env), tree[1:]))\n return evaluate(func(params), env)\n raise EvaluationError\n #function object 'primitive'\n elif type(tree) == type(lambda x: x):\n return tree\n #variable lookup\n elif tree in env:\n return env[tree]\n #boolean primitive\n if tree == '#f' or tree == '#t':\n return tree\n #undefined evaluation\n raise EvaluationError", "def evaluate_compiled(self, env):\n try:\n value = self.real_evaluate_compiled(env)\n except NotImplementedError:\n self.evaluate_compiled = lambda env: self.evaluate(env).value\n value = self.evaluate_compiled(env)\n else:\n self.evaluate_compiled = self.real_evaluate_compiled\n return value", "def evaluate_strings_with_given_variables(_strs_to_execute, _variable_dict=None):\n if _variable_dict is None:\n _variable_dict = {}\n if not isinstance(_strs_to_execute, list):\n _got_list_of_constraints = False\n _strs_to_execute = [_strs_to_execute]\n else:\n _got_list_of_constraints = True\n for _key, _value in _variable_dict.items():\n locals()[_key] = _value\n _ret = [eval(_elem) for _elem in _strs_to_execute]\n if _got_list_of_constraints:\n return _ret\n else:\n return _ret[0]", "def evaluate(self):\n self.arithmeticInorder()\n return eval(self._expression)", "def eval(self,\n expr,\n with_modules=None,\n preimport_stmts=[],\n python_args=None):\n statements = []\n\n if preimport_stmts:\n assert isinstance(preimport_stmts, (list, tuple))\n statements.extend(preimport_stmts)\n \n if with_modules:\n assert isinstance(with_modules, (list, tuple))\n statements.extend(\n ['import %s' % mod for mod in with_modules]\n )\n \n if python_args is None:\n python_args = ['-B'] # no bytecode generation\n assert isinstance(python_args, (tuple, list))\n \n if not isinstance(expr, (list, tuple)):\n expr = [expr]\n for e in expr:\n statements.append('print({0})'.format(e))\n \n cmd = [self.python_exe] + python_args + [\n '-s', '-c', ';'.join(statements)\n ]\n\n return sh.run(cmd)[0].decode('utf8')", "def eval_all_poss(expr):\n var = list(get_vars(expr))\n r = []\n for i in all_t_f(len(var)):\n binds = {var[j] : i[j] for j in range(len(i))}\n r += [(binds, evaluate(expr, **binds))]\n return r", "def evaluate(self,state,args,context=None):\n loc = {'_state_':state,\n '_args_':args}\n for s in self.state_deps:\n if s not in state:\n state[s] = None\n try:\n return eval(self.parsed_expr,context.__dict__,loc)\n except KeyError as e:\n return None\n except Exception as e:\n print \"Exception thrown when evaluating expression:\",e\n print type(e).__name__\n print self.source_string\n raise", "def evaluate(tree, environment = None):\n if environment is None:\n \n environment = Environments(parent = Snek)\n \n if type(tree) == float or type(tree) == int:\n \n return tree\n \n if type(tree) != list and type(tree) == str:\n \n value = environment[tree]\n \n return value\n \n if tree[0] == 'lambda':\n \n function = Functions(tree[1], tree[2], environment)\n \n return function\n \n elif tree[0] == 'define':\n \n if len(tree) != 3:\n \n raise SnekEvaluationError\n \n if type(tree[1]) != list:\n \n environment[tree[1]] = evaluate(tree[2], environment)\n \n return environment[tree[1]]\n \n else:\n \n function = Functions(tree[1][1:], tree[2], environment)\n \n environment[tree[1][0]] = function\n \n return environment[tree[1][0]]\n \n else:\n \n try:\n \n final_tree = []\n \n variables = tree[1:]\n \n for index, item in enumerate(variables):\n \n final_tree.append(evaluate(variables[index], environment))\n \n function = evaluate(tree[0], environment)\n \n if not callable(function): \n \n raise SnekEvaluationError\n \n return function(final_tree)\n \n except NameError:\n \n raise SnekNameError", "def eval(self, expr):\n\n local_variables = {m.__name__: m for m in EXPORTED_MODULES}\n\n value = yield\n\n for idx in range(len(self.view.sel())):\n local_variables.update({\n 'i': idx,\n 'l': len(value),\n 'v': value,\n })\n\n value = yield str(expr).format(**local_variables)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Warns the user that a forbidden name has been found.
def warn_forbidden_name(forname, inname=None, rename=None): msg = "found forbidden name {0!r}".format(forname) if inname is not None: msg += " in {0!r}".format(inname) if rename is not None: msg += ", renaming to {0!r}".format(rename) warn(msg, RuntimeWarning)
[ "async def _delbadname(self, ctx: commands.Context, badname: str):\n async with self.config.guild(ctx.guild).badnames() as badnames:\n if badname in badnames:\n badnames.remove(badname)\n await ctx.send(f\"{badname} has been removed from the blacklist.\")\n else:\n await ctx.send(\"That name doesnt exist\")", "def disableIncorrectNameWarning():\n pass", "def ignore(self,name):\n self._send_to_all( Connection.ignore, name)", "def thb_remove_restrict(self, chat_id, member_id, member_name):\n\n bot = self.bot\n logging.info('User is human')\n bot.restrict_chat_member(\n chat_id, member_id,\n can_send_messages=True,\n can_send_media_messages=True,\n can_send_other_messages=True,\n can_add_web_page_previews=True,\n )\n bot.send_message(\n chat_id,\n BOT_MSGS['allowed'].format(member_name),\n parse_mode=ParseMode.HTML,\n )", "def fallback_403(request):\n return django.http.HttpResponseForbidden(\n \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>403 %(title)s</title>\n</head><body>\n<h1>%(title)s</h1>\n<p>%(text)s</p>\n<hr>\n</body></html>\"\"\" % {'title': _(u\"Forbidden\"), 'text': _(u\"You don't have permission to access %s on this server.\") % request.path})", "def denied(self):\r\n popup = tk.messagebox.showerror(\"Access Denied!\", \"Wrong Password\")", "def name_taken(self, username: str) -> bool:\n\t\treturn username in self.__user", "def test_UserFormViewPermissionForUser(self):\n response = self.app.get(\n reverse('employees:UserFormView', args=[\"aaron.snow\"]),\n headers={'X_AUTH_USER': 'regular.user@gsa.gov'},\n status=403)\n self.assertEqual(response.status_code, 403)", "def reject_anonymous(obj, request):\n if api.user.is_anonymous():\n portal = api.portal.get()\n portal_path = portal.getPhysicalPath()\n physical_path = request.physicalPathFromURL(request['URL'])\n url = physical_path[len(portal_path):]\n if url[-1] == 'index_html':\n url.pop()\n item_id = url[0]\n if not item_id.startswith(ANON_WHITE_LISTED):\n raise Unauthorized('Anonymous rejected')", "def name_sanity_check(self, name, nametype):\n reason = None\n if not name.isalnum():\n reason = \"Please choose a {} name that consists of alphanumeric characters.\\n\".format(nametype)\n if len(name) > Chat.MAXLEN_NAME:\n reason = \"The {} name can't be longer than {} characters.\\n\".format(nametype, Chat.MAXLEN_NAME)\n return reason", "def testLoneUserAccessDenied(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n\n response = self.get(self.url)\n self.assertResponseForbidden(response)", "def check_forbidden(self, packet):\n # Check if any forbidden words appear in the packet payload\n for keyword in self.forbidden:\n if keyword in self.get_payload(packet):\n self.logger.debug(\"Packet triggered censor: \" + layers.packet.Packet._str_packet(packet))\n return True\n return False", "def _mock_disallow(func_name: str):\n\n raise Exception(\"The use of function {} is not allowed.\".format(func_name))", "def NameChangeWarning(self, oldName, newName):\n dlg = wx.MessageDialog(None,\n \"The name %s already existed\" %(oldName) + \\\n \" in the list.\\n\" + \\\n \"Your entry was given the\" + \\\n \" name %s instead.\" %(newName),\n \"NOTE: Name Changed\",\n wx.OK)\n dlg.ShowModal()\n dlg.Destroy()", "def tellMeWhyNot(self):\n return \"You can't reach through the glass box.\"", "def checkNormalUsernameForAdd(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"normal_username\",\"current_username\")\n request.getAuthNameObj().canChangeNormalAttrs(None)\n usernames=self.__filterCurrentUsernames(request)\n bad_usernames=filter(lambda username: not _checkNormalUsernameChars(username),usernames)\n exist_usernames=normalUsernameExists(usernames)\n return self.__createCheckAddReturnDic(bad_usernames,exist_usernames)", "def send_deny_notification(self):\n if self.uploader.email:\n link = \"\".join([\"http://\", Site.objects.get_current().domain, self.get_absolute_url()])\n message = render_to_string('email/video_denied.txt', {\n 'video': self,\n 'link': link,\n 'user': self.uploader\n })\n subject = \"Video denied\"\n self.uploader.email_user(subject, message)", "async def bother(self, ctx):\n bother_role = \"But Nobody Bother Buko\"\n role = discord.utils.get(ctx.message.author.guild.roles, name=bother_role)\n if (discord.utils.get(ctx.message.author.roles, name=bother_role) == None):\n try:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Don't bother \" + ctx.message.author.name + \".\")\n except discord.Forbidden:\n await ctx.send(\"I don't have permission to mess with roles!\")\n else:\n try:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Everyone bother \" + ctx.message.author.name + \".\")\n except discord.Forbidden:\n await ctx.send(\"I don't have permission to mess with roles!\")", "def perm_to_403(self, path=None):\n try:\n yield\n except PermissionDenied as e:\n path = path or e.path or \"unknown file\"\n raise web.HTTPError(403, \"Permission denied: %r\" % path) from e", "def access_warn(request, msg=None):\n ai = AccessInfo(request)\n ai.log(logging.WARN, msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the rc with values from another mapping. If this rc has if a key is in self, other, and self._updaters, then the updaters value is called to perform the update. This function should return a copy to be safe and not update inplace.
def _update(self, other): if hasattr(other, '_dict'): other = other._dict elif not hasattr(other, 'items'): other = dict(other) for k, v in other.items(): if v is NotSpecified: pass elif k in self._updaters and k in self: v = self._updaters[k](getattr(self, k), v) setattr(self, k, v)
[ "def update(self, other):\n self._map.update(other._map)", "def update(self, other):\n try:\n it = other.items()\n except AttributeError:\n it = iter(other)\n super().update(map(self._validate_entry, it))", "def join(self, other):\n self.cache = {**self.cache, **other.cache}", "def merge(self, other: KeyIndexedCollection) -> KeyIndexedCollection:\n new_items = []\n for other_item in other.items():\n our_item = self.get(other_item.key())\n if our_item is None:\n new_items.append(other_item)\n elif our_item != other_item:\n raise ValueError('Error during merge for key {!r}: values are different'.format(other_item.key()))\n return self.clone(new_items + self.items())", "def __add__(self, other: 'MapResult') -> 'MapResult':\n if self.key_names != other.key_names:\n raise KeyError('Key names must be identical')\n if self.value_names != other.value_names:\n raise ValueError('Value names must be identical')\n new_data = OrderedDict()\n for key in self.keys():\n if key in other.keys():\n new_data[key] = self[key] + other[key]\n else:\n new_data[key] = self[key]\n for key in other.keys():\n if key not in self.keys():\n new_data[key] = other[key]\n return MapResult(\n data=new_data,\n key_names=self.key_names,\n value_names=self.value_names\n )", "def _auth_update(old_dict, new_dict_source):\n new_dict = copy.deepcopy(new_dict_source)\n for k, v in new_dict.items():\n if k == 'auth':\n if k in old_dict:\n old_dict[k].update(v)\n else:\n old_dict[k] = v.copy()\n else:\n old_dict[k] = v\n return old_dict", "def __sub__(self, other: 'MapResult') -> 'MapResult':\n if self.key_names != other.key_names:\n raise KeyError('Key names must be identical')\n if self.value_names != other.value_names:\n raise ValueError('Value names must be identical')\n new_data = OrderedDict()\n for key in self.keys():\n if key in other.keys():\n new_data[key] = self[key] - other[key]\n else:\n new_data[key] = self[key]\n for key in other.keys():\n if key not in self.keys():\n new_data[key] = -other[key]\n return MapResult(\n data=new_data,\n key_names=self.key_names,\n value_names=self.value_names\n )", "def update(self, other):\n if isinstance(other, self.__class__):\n for x, n in other.iteritems():\n self[x] += n\n else:\n for x in other:\n self[x] += 1", "def merge(self, other) -> None:\n if other.new:\n raise ValueError(\"This patch should not have a .new set.\")\n if not other.old:\n raise ValueError(\"No data in .old\")\n self.old = other.old + self.old\n self.old_hash = get_sha256(self.old)", "def _merge_dicts(self, x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n\n return z", "def __merge_dictionary(a, b):\n if a is None:\n return b\n\n merged = a.copy()\n merged.update(b)\n return merged", "def __add__(self, other):\n if not isinstance(other, keymap):\n raise TypeError(\"can't concatenate '%s' and '%s' objects\" % (self.__class__.__name__, other.__class__.__name__))\n k = copy(other)\n #k.__chain__ = __chain__(self, k)\n k.__inner__ = copy(self) #XXX: or just... self ?\n k.__outer__ = copy(other) #XXX: or just... other ?\n return k", "def _additive_dict_update(d1, d2):\n for key in d2:\n val = d1.get(key, [])\n val.extend(d2[key])\n d1[key] = val", "def update(self, other: \"ParameterSet\"):\n param_dict = {type(x): x for x in self.parameters}\n param_dict.update({type(x): x for x in other})\n self.parameters = list(param_dict.values())", "def __ior__(self, other):\r\n self.update(other)\r\n return self", "def __mul__(self, other: 'MapResult') -> 'MapResult':\n if self.key_names != other.key_names:\n raise KeyError('Key names must be identical')\n if self.value_names != other.value_names:\n raise ValueError('Value names must be identical')\n new_data = OrderedDict()\n for key in self.keys():\n if key in other.keys():\n if (\n not isinstance(self[key], Iterable) and\n not isinstance(other[key], Iterable)\n ):\n new_data[key] = self[key] * other[key]\n else:\n raise TypeError(\n 'Cannot multiple iterable value by non-iterable value'\n )\n return MapResult(\n data=new_data,\n key_names=self.key_names,\n value_names=self.value_names\n )", "def MergeCommonKeysFrom(self, other: ParamsT) -> ParamsT:\n return CopyFieldsTo(other, self, ignore_unknown_keys=True)", "def update_dict(a, b, copy=True):\n if copy:\n b = deepcopy(b)\n for k in b.keys():\n if isinstance(b[k], Mapping) and k in a and isinstance(a[k], MutableMapping):\n # update existing key\n update_dict(a[k], b[k])\n else:\n # assign new key\n a[k] = b[k]\n return a", "def update(d1, d2):\n\t# type: (dict, dict) -> None\n\n\tfor k, v in d2.items():\n\t\tif v is not None:\n\t\t\td1[k] = v", "def merge(self, other_analyzer_context):\n self.metric_map.update(other_analyzer_context.metric_map)\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates and possibly converts a value based on its key and the current validators.
def _validate(self, key, value): validators = self._validators if key in validators: validator, convertor = validators[key] else: for vld in validators: if isinstance(vld, str): continue m = vld.match(key) if m is not None: validator, convertor = validators[vld] else: validator, convertor = always_true, noop return value if validator(value) else convertor(value)
[ "def process(self, value):\n if self.value_modifier is not None:\n validator = self.value_modifier\n if inspect.isclass(self.value_modifier):\n validator = validator()\n value = validator.process(value)\n if value not in self.option_keys:\n return _NoValue\n if self.default_op and value == -1:\n return _NoValue\n return value", "def validate_converter(value):\n if value in converters_map or value is None:\n return value\n else:\n raise Invalid(\"Unknown converter function: '{}' type: '{}'\".format(value, type(value)))", "def run_validators(self, value):\n if isinstance(value, dict):\n to_validate = self._read_only_defaults()\n to_validate.update(value)\n else:\n to_validate = value\n super().run_validators(to_validate)", "def clean(self, value):\n try:\n self.run_validators(value)\n return self.to_python(value)\n except ValueError:\n raise exceptions.ValidationError(\n code=exceptions.VALIDATION_INVALID_VALUE,\n message=\"Value {value} is invalid\",\n value=value\n )", "def setup_validator(self):\n # make an educated guess about what type the unicode values sent in on\n # a set() operation should be converted to\n if self.value_modifier == 'auto' or self.value_modifier is None:\n if self.value_modifier and len(self.option_keys) == 0:\n raise ValueError(_('value_modifier argument set to \"auto\", but '\n 'the options set is empty and the type can therefore not '\n 'be determined for {name}', name=self.__class__.__name__))\n first_key = self.option_keys[0]\n if isinstance(first_key, six.string_types) or self.value_modifier is None:\n self.value_modifier = validators.StringValidator()\n elif isinstance(first_key, int):\n self.value_modifier = validators.IntValidator()\n elif isinstance(first_key, float):\n self.value_modifier = validators.FloatValidator()\n elif isinstance(first_key, D):\n self.value_modifier = validators.DecimalValidator()\n else:\n raise TypeError(\n _(\"can't use value_modifier='auto' when option keys are {key_type}\",\n key_type=type(first_key))\n )\n else:\n # if its not the string 'auto' and its not a webgrid validator, assume\n # its a callable and wrap with a webgrid validator\n if not hasattr(self.value_modifier, 'process'):\n if not hasattr(self.value_modifier, '__call__'):\n raise TypeError(\n _('value_modifier must be the string \"auto\", have a \"process\" attribute, '\n 'or be a callable')\n )\n self.value_modifier = validators.CustomValidator(processor=self.value_modifier)", "def Validate(self, value, key='???'):\n if isinstance(value, datetime):\n return value\n\n for date_format in DATE_FORMATS:\n try:\n return datetime.strptime(value, date_format)\n except ValueError:\n pass\n\n raise validation.ValidationError('Datetime validation error - \\\n must follow one of the formats {}'.format(DATE_FORMATS))", "def run_validators(self, value):\n try:\n self.model_field.run_validators(value)\n except ModelValidationError as err:\n raise ValidationError(err.messages)\n except TypeError as err:\n raise ValidationError(err)\n super(DjongoField, self).run_validators(value)", "def make_value_from_form(self, value):\n if not value:\n return None\n if isinstance(value, unicode):\n try:\n return db.Key(value)\n except datastore_errors.BadKeyError:\n raise forms.ValidationError(\n 'Supplied unicode representation of db.Key is not valid. '\n 'Found: %s' % value)\n elif not isinstance(value, db.Model) and not isinstance(value, db.Key):\n raise forms.ValidationError(\n u'Value for reference property must be either an instance of '\n 'db.Model or db.Key. Found: %s' % type(value))\n else:\n return value", "def validate_value(self, value):\n pass", "def is_valid(self, field_name: str, value, kg: dict) -> Optional[dict]:\n # property\n uri = self.__is_valid_uri_resolve(field_name, kg.get(\"@context\"))\n property_ = self.get_entity(uri)\n if not isinstance(property_, OntologyProperty):\n logging.warning(\"Property is not OntologyProperty, ignoring it: %s\", uri)\n return None\n if not self.__is_valid_domain(property_, kg):\n logging.warning(\"Property does not have valid domain, ignoring it: %s\", uri)\n return None\n # check if is valid range\n # first determine the input value type\n if isinstance(property_, OntologyDatatypeProperty):\n types = self.__is_valid_determine_value_type(value)\n else:\n if isinstance(value, dict):\n try:\n types = map(self.get_entity, value['@type'])\n except KeyError:\n return None # input entity without type\n elif self.__is_schema_org_datatype(property_):\n if self.expanded_jsonld:\n return {'@value': self.__serialize_type(value)}\n else:\n return value\n else:\n return {'@id': self.__serialize_type(value)}\n # check if is a valid range\n if any(property_.is_legal_object(type_) for type_ in types):\n if isinstance(property_, OntologyObjectProperty):\n return value\n elif self.expanded_jsonld:\n return {'@value': self.__serialize_type(value)}\n else:\n return self.__serialize_type(value)\n return None", "def dictValidate(self, decoded):\n \n log.debug(\"Validating against provided validators\")\n\n #Initialize valid_args and errors for use in iteration over the validators\n \n valid_args = {}\n errors = {}\n \n for field, validator in self.validators.iteritems():\n try:\n # XXX: Is this necessary to call twice?\n #validator.to_python(params.get(field), state)\n valid_args[field] = validator.to_python(decoded.get(field),\n self.state)\n # catch individual validation errors into the errors dictionary\n except formencode.Invalid, inv:\n errors[field] = inv\n \n # If there are errors, create a compound validation error based on\n # the errors dictionary, and raise it as an exception\n if errors:\n raise formencode.Invalid(\n formencode.schema.format_compound_error(errors),\n decoded , None, error_dict=errors)\n \n return valid_args", "def try_value(self, value):\r\n if value is None:\r\n # V32243 - None passed in here is \"special\". We need to\r\n # preserve the meaning that the setting was not\r\n # specified. The None object should never be used as a\r\n # true setting value.\r\n return None\r\n\r\n parsed_value = value\r\n\r\n if self.parser:\r\n parsed_value = self.parser(value)\r\n\r\n if not isinstance(parsed_value, self.type):\r\n try:\r\n parsed_value = self.type(value)\r\n except:\r\n raise AttributeError, \\\r\n \"'%s' cannot be instantiated as '%s'\" % \\\r\n (value, self.type.__name__)\r\n\r\n try:\r\n verified = self.verify_function(parsed_value)\r\n except Exception, e:\r\n msg = \"'%s' fails verification function: %s\" % \\\r\n (value, str(e))\r\n raise AttributeError, msg\r\n else:\r\n if type(verified) == bool and not verified:\r\n msg = \"'%s' verification function returns false\" % \\\r\n (value)\r\n raise AttributeError, msg\r\n\r\n\r\n return parsed_value", "def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):\n original_type = type(original)\n replacement_type = type(replacement)\n\n # The types must match (with some exceptions)\n if replacement_type == original_type:\n return replacement\n\n # If either of them is None, allow type conversion to one of the valid types\n if (replacement_type == type(None) and original_type in _VALID_TYPES) or (\n original_type == type(None) and replacement_type in _VALID_TYPES):\n return replacement\n\n # Cast replacement from from_type to to_type if the replacement and original\n # types match from_type and to_type\n def conditional_cast(from_type, to_type):\n if replacement_type == from_type and original_type == to_type:\n return True, to_type(replacement)\n else:\n return False, None\n\n # Conditionally casts\n # list <-> tuple\n casts = [(tuple, list), (list, tuple)]\n # For py2: allow converting from str (bytes) to a unicode string\n try:\n casts.append((str, unicode)) # noqa: F821\n except Exception:\n pass\n\n for (from_type, to_type) in casts:\n converted, converted_value = conditional_cast(from_type, to_type)\n if converted:\n return converted_value\n\n raise ValueError(\n \"Type mismatch ({} vs. {}) with values ({} vs. {}) for config \"\n \"key: {}\".format(original_type, replacement_type, original,\n replacement, full_key))", "def _cast_val(self, value, field, value_identifier, suppress_invalid=False):\n try:\n return (field.cast_value(value, constraints=True), True)\n except exceptions.CastError as e:\n return self._report_invalid_value(\n value_identifier,\n self._parse_cast_error(e, value, field),\n suppress_invalid,\n )", "def _validate_key(key):\n key_error_message = (\n \"The key must either be an ObservableType or a \"\n \"string representation of an ObservableType\"\n )\n\n if isinstance(key, str):\n\n try:\n key = ObservableType(key)\n except ValueError:\n raise KeyError(key_error_message)\n\n elif not isinstance(key, ObservableType):\n raise KeyError(key_error_message)\n\n return key", "def _validate_entry(self, kv: Tuple):\n key, value = kv\n try:\n kt, vt = self._types\n except AttributeError:\n pass\n else:\n if not isinstance(key, kt):\n raise TypeError(\n f\"Expected key type {kt.__name__}; got {type(key).__name__}\"\n )\n elif not isinstance(value, vt):\n raise TypeError(\n f\"Expected value type {vt.__name__}; got {type(value).__name__}\"\n )\n\n return key, value", "def validator(dictionary):\n def callback(value):\n \"\"\"Accept a value to validate, return validation results\"\"\"\n results = []\n for k, vldtr in dictionary.items():\n val = value[k] if k in value else None\n okay, result = vldtr(val)\n if not okay:\n results.append(result(k))\n if results:\n return False, os.linesep.join(results)\n return True, None\n return callback", "def _validate_dict(self, val):\n if val is None:\n if self._allow_null:\n return\n else:\n raise ValidatorException(\n f'field {self._schema_name} may not be null')\n if not isinstance(val, self._firestore_type):\n raise ValidatorException(\n f'expected instance of type {self._firestore_type.__name__} but got {type(val).__name__}.\\nValue {val}'\n )", "def __getitem__(self, key):\n #retrieve the value\n curValue = self._d[key.lower().strip()]\n \n #check if the value is a bool\n if curValue.strip().lower() in ['yes','true']:\n return True\n if curValue.strip().lower() in ['no','false']:\n return False\n \n #check if value is a int\n if curValue.strip().isdigit():\n return int(curValue)\n \n #try to convert it to a float\n try:\n curValue = float(curValue)\n return curValue\n except ValueError:\n pass\n \n #return it as a string\n return curValue", "def _required_value(converter: typing.Callable) -> typing.Callable:\n @functools.wraps(converter)\n def main(value: typing.Any) -> typing.Any:\n if value is not None:\n return converter(value)\n raise utils.RequestError(3101)\n return main" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Circumradius of the polygon
def circumradius(self): return self._circumradius
[ "def get_radius(self):\n return np.degrees(self.bounding_circle[2])", "def inradius(vertices):\n a = area(vertices)\n s = perimeter(vertices) / 2\n return a / s", "def circumradius(vertices):\n el = edge_lengths(vertices)\n a = el[0]\n b = el[1]\n c = el[2]\n r = a * b * c / numpy.sqrt((a + b + c) * (b + c - a) * (c + a - b) * (a + b - c))\n return r", "def radius_of_curvature(self):\n _a, _c, _lat = (\n self._a.to_value(u.m),\n self._c.to_value(u.m),\n self._lat.to_value(u.rad),\n )\n return (\n radius_of_curvature_fast(_a, _c, _lat) * u.m\n ) # Need to convert units to u.rad and then take value because numpy expects angles in radians if unit is not given.", "def _get_radius(self) -> \"double\" :\n return _core.Cone__get_radius(self)", "def polygon(n):\n return circle(360./n)", "def getPolygonInnerCircle(areaPoly):\n\t#first, find the point. we have a finite set of points.\n\tC=np.array([0,0]) #will add stuff to this one..\n\tfor corner in areaPoly: \n\t\tC+=np.array(corner)\n\tC/=(len(areaPoly))\n\tC=list(C)\n\tminDist=1e10\n\tfor corner in areaPoly:\n\t\tprint corner, C\n\t\td=getDistance(corner, C)\n\t\tif d<minDist:\n\t\t\tminDist=d\n\treturn C, minDist #middle, radius", "def _get_radius(self) -> \"double\" :\n return _core.Arc2D__get_radius(self)", "def circumference_area(radius):\n return (round(math.pi * radius**2, 3))", "def circumference_of_circle(radius: float) -> float:\n return 2 * pi * radius", "def circle_area(radius):\n return numpy.pi*radius**2", "def _get_radius(self) -> \"double\" :\n return _core.Cylinder__get_radius(self)", "def radius(self):\n return self.get_planet_radius(unit='Rjup')", "def cone_radius(self) -> float:\n return self.GetConeRadius()", "def perimeter(self):\n return float(2*math.pi*self.radius)", "def cylinder_radius(self) -> float:\n return self.GetCylinderRadius()", "def circle_perimeter(r):\n return 2.0*pi*r", "def circumference(self):\n\t\treturn 2 * PI * self.r", "def area_of_a_circle(radius):\n return np.pi * radius ** 2", "def epsf_radius(self):\n return self.__epsf_radius" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Edge length of individual edge in the polygon
def edge_length(self): return (2 * self._circumradius * math.sin(math.pi/self._n_edges))
[ "def edge_lengths(self):\n points = list(self.base_piece.polygon.points())\n NUM = 4\n assert len(points) == NUM\n return [(points[i] - points[(i+1) % NUM]).norm() for i in range(NUM)]", "def calc_edge_length(edge, layout):\n\n Ax, Ay, Bx, By = edge_to_cartesian(edge,layout)\n\n edge_length = math.sqrt( (Bx - Ax)*(Bx - Ax) + (By - Ay)*(By - Ay) )\n\n #print edge, Ax, Ay, Bx, By\n\n return edge_length", "def edge_length(self, edge_id):\n raise NotImplementedError", "def test_Polygon_edgeLength_6():\n a = Polygon(3, 3)\n assert a.edgeLength == (2 * 3 * math.sin(math.pi / 3)), \"wrongly calculates edgeLength\"", "def get_edge_list_len(self):\n return self.edge_list_len", "def get_edge_lengths(vertices, edge_points):\n N, D = vertices.shape\n E = edge_points.shape[0]\n # E,2,D (OK to do this kind of indexing on the first dimension)\n edge_vertices = vertices[edge_points[:,:2]]\n\n edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:])\n edges_sqrlen = torch.sum(edges * edges, dim=-1)\n return edges_sqrlen", "def edge_length(resolution, unit='km'):\n # todo: `mean_edge_length` in 4.0\n return _cy.mean_edge_length(resolution, unit)", "def poly_bounds(polyedge):\n \n width = max(polyedge[0]) - min(polyedge[0]) + 0.1\n height = max(polyedge[1]) - min(polyedge[1]) + 0.1\n\n return width, height", "def lengthOfCurve(pts: list):\n\tif len(pts) < 2: return 0\n\tl = 0\n\tfor i in range(len(pts) - 1):\n\t\tl += vertexDistance(pts[i], pts[i + 1])\n\treturn l", "def ST_Length(geos):\n return arctern.ST_Length(geos)", "def getNumEdges(self): \n return self.__E", "def __len__(self) -> int:\n # Grab some values from *simple_polygon* (i.e. *self*):\n simple_polygon: SimplePolygon = self\n points: List[P2D] = simple_polygon.points\n size: int = len(points)\n return size", "def edge_width(self):\n if not self._edge_width:\n self.edge_width = self.default_edgewidth\n return self._edge_width", "def _get_length(self) -> \"double\" :\n return _core.OrientedBoundingBox3D__get_length(self)", "def path_length(self):\n return np.sum([path.path_length for path in self.paths])", "def path_length(graph, path):\n if len(path) == 1:\n return 0\n else:\n edge_widths = []\n for x in range(len(path) - 1):\n edge_widths.append(graph.get_edge(path[x], path[x + 1]).length)\n return sum(edge_widths)", "def edge_lengths_dimension(self, eqs_lambdas):\n return ideal(eqs_lambdas + [self.aux_var]).dimension()", "def out_edge_count(self):", "def calc_length_of_polyline(polyline: np.ndarray) -> float:\n\n dist = 0\n for i in range(0, len(polyline) - 1):\n dist += SearchBaseClass.distance(polyline[i], polyline[i + 1])\n return dist", "def edge_sum(self):\n return len(self.edges)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interior angle value of each angle in the polygon
def interior_angle(self): return (self._n_edges - 2) * (180/self._n_edges)
[ "def angle_vector(self):\n from math import atan2, pi\n return (atan2(self.y, self.x)) / pi * 180", "def angle(vertices, i):\n v = edges(vertices)\n u0 = -v[(i + 1) % 3]\n u1 = v[(i + 2) % 3]\n return compute_angle(u0, u1)", "def _angles_of_a_polygon(num_edges):\n assert num_edges > 2\n # first see if we have this answer cached already\n if num_edges in _angles_of_a_polygon.cache:\n return _angles_of_a_polygon.cache[num_edges]\n step = 360. / num_edges\n angles_list = [0]\n next_angle = step\n while next_angle < 360:\n angles_list.append(next_angle)\n next_angle += step\n # turn the list of angles to a tuple for immutability, since we'll be caching it and re-using\n angles = tuple(angles_list)\n\n # add to cache and return\n _angles_of_a_polygon.cache[num_edges] = angles\n return angles", "def solid_angle(angle):\n return (pi/4)*angle**2", "def angle(self, sides:int) -> float:\n if sides < 3:\n raise ValueError('cannot calculate angles of polygon with less than 3 sides')\n elif type(sides) != int:\n raise ValueError('sides must be an int')\n angle_sum = 180 * (sides - 2)\n return angle_sum / sides", "def angles( self ): \n return (self.__angleA,self.__angleA,self.__angleA)", "def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)", "def angle(self):\n return np.degrees(np.arctan2(self.u_vector[1], self.u_vector[0]))", "def angle(p):\n return math.atan2(p[1], p[0])", "def elementary_angle(self) -> Angle:\n a, b = self.angle.numerator, self.angle.denominator\n if a % 2 == 0:\n p = a // 2\n q = b - p\n else:\n p = a\n q = 2 * b - a\n\n if (p == 1) or (q == 1):\n p, q = 2 * p, 2 * q\n\n return Angle(2, p + q)", "def compute_angles(self):\n edges = self.edges().reshape(-1, 3, 2)\n vecs = np.diff(self.vertices[edges], axis=2)[:, :, 0]\n vecs = util.normalize(vecs)\n angles = np.arccos(-util.dot(vecs[:, [1, 2, 0]], vecs[:, [2, 0, 1]]))\n assert np.allclose(angles.sum(axis=1), np.pi, rtol=1e-3)\n return angles", "def inside_polygon(point, polygon):\n ang = 0;\n vector = []\n for poly in polygon:\n vector += [(poly[0]-point[0], poly[1]-point[1])]\n\n for i in range(len(vector)-1):\n ang += angle(vector[i],vector[i+1]) \n\n ang += angle(vector[0],vector[-1])\n\n return abs(ang - 2*math.pi) < 0.001", "def ND_angle(self):\n ND_angle = np.degrees(np.arctan(np.average(self.ND_params[0,:])))\n return ND_angle", "def enlargen_polygon(polygon, ratio):\n centre = get_polygon_centre(polygon)\n polygon = polygon.astype(np.int)\n\n enlargened_poly = []\n for corner in polygon:\n diff = corner - centre\n enlargened_poly.append((diff * ratio) + centre)\n return np.array(enlargened_poly).astype(np.int32)", "def test_Polygon_interiorAngle_5():\n a = Polygon(10, 10)\n assert a.interiorAngle == 144, \"wrongly calculates interiorAngle\"", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))", "def polygon(n):\n return circle(360./n)", "def get_angle_from_ellipse(ellipse):\n return (get_angle_from_rotated_rect(ellipse) + 90) % 180 - 90", "def simple_polygon(points):\n \n # Firstly swap the bottommost (and if necessary leftmost) point to the\n # 0th position in the list. The first line finds the bottommost point,\n # and the next line finds its index, so it can be swapped to the front.\n bottommost = min(points, key=lambda p: (p.y, p.x))\n index = points.index(bottommost)\n points[0], points[index] = points[index], points[0]\n \n # Now just sort the rest by angle from points[0]\n rest = points[1:]\n # **** FIXME by inserting the missing line of code here ****\n # print('in func:')\n \n \n rest = sorted(rest, key=lambda x: points[0] - x)\n \n \n #print(rest)\n \n \n \n return [points[0]] + rest", "def _another_angle(theta1,df):\r\n n=1.85\r\n f = c/790.\r\n theta1 = theta1/180.*np.pi\r\n ans = -np.arcsin(np.sqrt(np.square(n) - np.square(n*f)/np.square(df - (f*n)/np.sqrt((n - np.sin(theta1))*(n + np.sin(theta1))))))\r\n return ans/np.pi*180." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modelimplementatie van het heat model
def heat_model(tijdstappen, N2O5_0, N2O4_0, T_0, Ar, Ea, Q, V, rho, Cp, U, A, delta_rH, N2O5_in, N2O4_in, Tin, Tw, returnDataFrame=True): modeloutput = odeint(model_afgeleiden, [N2O5_0, N2O4_0, T_0], tijdstappen, args=(Ar, Ea, Q, V, rho, Cp, U, A, delta_rH, N2O5_in, N2O4_in, Tin, Tw)) modeloutput = pd.DataFrame(modeloutput, columns=['N2O5','N2O4','T'], index=tijdstappen) modeloutput.plot() if returnDataFrame: return modeloutput
[ "def heat(self):\r\n return self.m_act * (self.outlet.h - self.cond.h)", "def heat_func(self):\n return self.Q.val + self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)", "def visualize_matrice_mi(self):\n ax_heatmap = plt.axes()\n sns.heatmap(self.matrice_mi, ax=ax_heatmap)\n ax_heatmap.set_title(\\\n \"Heatmap of the compute mutual information for the different positions\")\n plt.show()", "def view_temperature( self ):\n\t\theat = np.zeros((self.width*self.height),np.uint32)\n\t\tif not LiFlame.view_temperature(heat):\n\t\t\traise Grid.TypeError(\"Failed to view the temperature\")\n\t\treturn heat.reshape((self.width,self.height))", "def __init__(self, temp, eflux, spec_heat):\n self.spec_heat = spec_heat\n self.eflux = eflux\n self.temp = temp", "def model_background_state(self):\n if self.surface_rho == None:\n self.make_stack()\n # load spectra\n vegetation_rho, ash_rho = generate_spectra()\n # select some bands...\n vegetation_rho = vegetation_rho[::25]\n ash_rho = ash_rho[::25]\n # keep these for later...\n self.ash_rho = ash_rho\n self.vegetation_rho = vegetation_rho\n\n # first fill the dataset with just normal leaf reflectance across all timesteps\n self.surface_rho = (self.surface_rho[:].T*vegetation_rho[None, :].T).T\n\n # now make a mean state...\n mean_state = make_noises(1, self.xSize)\n mean_state *= 0.3\n mean_state = np.tile(mean_state, (self.bands,self.timesteps,1,1),)\n mean_state = np.swapaxes(mean_state, 0,1)\n\n # alternative with more spatial features -- less like a cloud\n land_cover = make_landscape(self.xSize)\n\n # for each catergory in the land_cover assign a mean variation on\n # the spectra\n lc_spe_multiple = np.linspace(0.85, 1.15, len(np.unique(land_cover)))\n # associate these values with land_cover\n for i,j in enumerate(np.unique(land_cover)):\n land_cover[np.where(land_cover==j)] = lc_spe_multiple[i]\n\n #import pdb; pdb.set_trace()\n # multiply this with spectra\n lc_effect = np.tile(land_cover, (self.timesteps,self.bands,1,1),)\n #mean_state = np.swapaxes(mean_state, 0,1)\n self.surface_rho *= lc_effect\n\n return None\n # now generate temporal variability\n temporal = make_noises(self.timesteps, self.xSize)\n temporal *= 0.2\n\n # now add to the surface reflectance\n # need same dimensions\n temporal = np.tile(temporal, (self.bands,1,1,1),)\n # switch dimensions\n temporal = np.swapaxes(temporal, 0,1)\n self.surface_rho += temporal", "def calculate_change_mesh(self):", "def heatSinkModel( fileName, tunnelDims, boardDims,\n chipDims,\n sinkBaseDims = None, #--- Heat Sink G2 ; NP ; 05/09/10 for type = None\n sinkDims = None, #--- \"\n ccXFinWidth = None,\n ccXFinGap = None,\n ccYFinWidth = None,\n ccYFinGap = None,\n exYFinWidth = None,\n exYFinGap = None, \n dcFinWidth = None,\n dcFinGap = None,\n pfPinDiameter = None,\n sinkType = 'cross cut'\n ):\n \n model1 = pk.PK_ENTITY_null\n model2 = pk.PK_ENTITY_null\n model3 = pk.PK_ENTITY_null\n model4 = pk.PK_ENTITY_null\n\n GO_init( )\n\n #--------------------------------------------------------------------\n # Check file name extension\n #--------------------------------------------------------------------\n \n absPath\t = os.path.abspath(\t str( fileName )\t)\n baseName = os.path.basename(\t\t absPath\t\t)\n fileInfo = os.path.splitext(\t\t baseName\t)\n fileExt\t = fileInfo[1]\n\t\n if fileExt not in [ \".x_t\", \".x_b\" ]:\n raise HeatSinkCADError, \"unknown file format to write\"\n\n if sinkType == \"none\": sinkType = None\n if sinkType:\n sinkType = sinkType.lower( )\n \n #--------------------------------------------------------------------\n # Set the model parts\n #--------------------------------------------------------------------\n\n while True:\n\n #----------------------------------------------------------------\n # Error checking\n #----------------------------------------------------------------\n\n for i in xrange( 3 ):\n if boardDims[i] > tunnelDims[i]:\n raise HeatSinkCADError, \"The Board dimensions are invalid. \" \\\n \"They could not be more than Tunnel dimensions.\"\n\n for i in xrange( 2 ):\n if chipDims[i] > boardDims[i]:\n raise HeatSinkCADError, \"The Chip length and width dimensions \"\\\n \"are invalid. They could not be more than Board dimensions.\"\n \n if sinkType:\n for i in xrange( 2 ) : \n if sinkDims[i] > boardDims[i]:\n raise HeatSinkCADError, \"The Heat Sink length and width dimensions \"\\\n \"are invalid. They could not be more than Board dimensions.\"\n \n partsHeight = boardDims[2] /2 + chipDims[2] + sinkDims[2]\n \n if partsHeight > tunnelDims[2] / 2:\n raise HeatSinkCADError, \"The model height dimensions are invalid.\"\n \n #----------------------------------------------------------------\n # Set the Tunnel model\n #----------------------------------------------------------------\n x0 = -tunnelDims[0] / 2\n y0 = -tunnelDims[1] / 2\n z0 = -tunnelDims[2] / 2\n x1 = tunnelDims[0] / 2\n y1 = tunnelDims[1] / 2\n z1 = tunnelDims[2] / 2\n \n\t(rc, tunnel)= GO_BODY_create_box_distances( x0, x1, y0,\n y1, z0, z1 ) \n \n\t\t\n\tif tunnel == pk.PK_ENTITY_null:\n raise HeatSinkCADError, \"There is not an entity for tunnel.\"\n \n\tmodel1 = tunnel\n\n #----------------------------------------------------------------\n # Set the Board model\n #----------------------------------------------------------------\n\n\tx0 = -boardDims[0] / 2\n y0 = -boardDims[1] / 2\n z0 = -boardDims[2] / 2\n x1 = boardDims[0] / 2\n y1 = boardDims[1] / 2\n z1 = boardDims[2] / 2\n \n (rc, board)= GO_BODY_create_box_distances( x0, x1, y0,\n y1, z0, z1 ) \n \n \n\tif board == pk.PK_ENTITY_null:\n raise HeatSinkCADError,\"There is not an entity for board.\"\n \n\tmodel2 = board\n\n #----------------------------------------------------------------\n # Set the Chip model\n #----------------------------------------------------------------\n\n\tx0 = -chipDims[0] / 2\n y0 = -chipDims[1] / 2\n z0 = z1\n x1 = chipDims[0] / 2\n y1 = chipDims[1] / 2\n z1 = z0 + chipDims[2]\n \n ( rc, chip )= GO_BODY_create_box_distances( x0, x1, y0, \n y1, z0, z1 )\n\n\tif chip == pk.PK_ENTITY_null: \n raise HeatSinkCADError,\"There is not an entity for chip.\"\n \n model3 = chip\n \n if sinkType:\n #----------------------------------------------------------------\n # Set the HeatSink base model\n #----------------------------------------------------------------\n\t\t\n x0 = -sinkBaseDims[0] / 2\n y0 = -sinkBaseDims[1] / 2\n z0 = z1\n x1 = sinkBaseDims[0] / 2\n y1 = sinkBaseDims[1] / 2\n z1 = z0 + sinkBaseDims[2]\n\n \n (rc,baseSink)= GO_BODY_create_box_distances(x0, x1, y0, \n y1, z0, z1 )\n\n if baseSink == pk.PK_ENTITY_null: \n raise HeatSinkCADError, \\\n \"There is not an entity for heat sink base.\"\n \n model4 = baseSink\n\n if sinkType in ['cross cut','extrusion'] :\n \n if sinkType == 'extrusion': \n ccYFinGap = exYFinGap\n ccYFinWidth = exYFinWidth\n\n setCrossExtFins(sinkDims, ccXFinGap, ccXFinWidth,\n ccYFinGap, ccYFinWidth,model4,\n z1 ) \n \n if sinkType == 'diamond cut':\n setDiamFin( sinkDims, dcFinWidth, dcFinGap,\n model4, z1 )\n \n if sinkType =='pin fin': \n setPinFin( sinkDims, pfPinDiameter,\n model4, z1 )\n \t\t\n #---------------------------------------------------------------\n # Create part names\n #--------------------------------------------------------------- \n\trc = GO_PART_set_name( model1, \"tunnel\" )\n\trc = GO_PART_set_name( model2, \"board\" )\n\trc = GO_PART_set_name( model3, \"chip\" )\n\n\tif sinkType:\n rc = GO_PART_set_name( model4, \"heat sink\" )\n\t\t\n break\n\n #--------------------------------------------------------------------\n # Writes the model on a file\n #--------------------------------------------------------------------\n if sinkType: \n if model1 != pk.PK_ENTITY_null and model2 != pk.PK_ENTITY_null and \\\n model3 != pk.PK_ENTITY_null and model4 != pk.PK_ENTITY_null:\n \n if fileExt == \".x_t\" :\n rc = GO_PART_write_text( [model1,model2,\n model3,model4],\n fileName )\n elif fileExt == \".x_b\" :\n rc = GO_PART_write_binary([model1,model2,\n model3,model4],\n fileName )\n else:\n if model1 != pk.PK_ENTITY_null and model2 != pk.PK_ENTITY_null and \\\n model3 != pk.PK_ENTITY_null:\n\n if fileExt == \".x_t\" :\n rc = GO_PART_write_text( [model1,model2,\n model3],\n fileName )\n elif fileExt == \".x_b\" :\n rc = GO_PART_write_binary([model1,model2,\n model3],\n fileName )\n \n #--------------------------------------------------------------------\n # Deletes the entities\n #--------------------------------------------------------------------\n\n GO_ENTITY_delete_all(\t\t\t\t\t\t)\n\n #--------------------------------------------------------------------\n # Close the session\n #--------------------------------------------------------------------", "def Model3():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n snr = M2.GetGroupByName(\"SNr/GPi\")\n\n \n \n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n\n sn.SetActivationFunction(neu.Tanh_plus)\n sp.SetActivationFunction(neu.Tanh_plus)\n snr.SetActivationFunction(neu.Tanh_plus)\n\n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*-0.25\n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n hb = np.average(sn.thresholds)/-tan.size\n HB = np.ones(tan.inputs.shape)*hb\n sn.thresholds = 0.1*np.ones(sn.activations.shape)\n sp.thresholds = 0.1*np.ones(sp.activations.shape)\n #sn.thresholds = -1*tan.GetActivationFunction()(np.ones(sn.inputs.shape)-1)\n #sp.thresholds = -1*tan.GetActivationFunction()(np.ones(sp.inputs.shape)-1)\n #sn.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sn.inputs.shape))\n #sp.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sp.inputs.shape))\n #c2tan.weights = np.random.random(c2tan.weights.shape)\n c2tan.weights = np.ones(c2tan.weights.shape)*1.5\n c2tan.mask = np.dot(np.ones(tan.inputs.shape),\n np.array([[1,1,1,0,0,0,0,0,0]]))\n c2tan.learningEnabled = True\n c2tan.learningFunction = TAN_LearningRule\n\n M2.SetParameter(\"TRACE_TAN\", True)\n M2.SetParameter(\"HB\", HB)\n return M2", "def computeEpistasis (model,objective_func = \"default\",heatmap = False, labels = False, export_matrix = False):\n #set model objective function\n if objective_func != \"default\":\n model.objective = [objective_func]\n \n #compute wild type growth rate\n solution = model.optimize()\n wt_grow = solution.objective_value\n \n ## knock-outs loops ##\n # initialize empty matricies\n rxns = len(model.reactions)\n single_ko = np.zeros((rxns))\n v1v2_grow = np.zeros((rxns,rxns))\n\n ## Single knockouts ##\n for i in range(rxns):\n #buffer\n upper_i = model.reactions[i].upper_bound\n lower_i = model.reactions[i].lower_bound\n # set upper and lower bounds to zero\n model.reactions[i].upper_bound = 0\n model.reactions[i].lower_bound = 0\n # solve model and record growth rate\n solution = model.optimize()\n single_ko[i] = solution.objective_value\n # return bounds to their previous state\n model.reactions[i].upper_bound = upper_i\n model.reactions[i].lower_bound = lower_i\n \n ## Combo knockout ##\n for i in range(rxns):\n for j in range(rxns):\n if j > i:\n #buffer\n upper_i = model.reactions[i].upper_bound\n lower_i = model.reactions[i].lower_bound\n upper_j = model.reactions[j].upper_bound\n lower_j = model.reactions[j].lower_bound\n # Set bounds on rxns to zero, now both are zero\n model.reactions[i].upper_bound = 0\n model.reactions[i].lower_bound = 0\n model.reactions[j].upper_bound = 0\n model.reactions[j].lower_bound = 0\n # Solve\n solution = model.optimize()\n v1v2_grow[i,j] = solution.objective_value\n # return them to what they were\n model.reactions[i].upper_bound = upper_i\n model.reactions[i].lower_bound = lower_i\n model.reactions[j].upper_bound = upper_j\n model.reactions[j].lower_bound = lower_j\n\n #adjusting matricies \n v1_grow = single_ko + np.zeros((rxns,rxns))\n v2_grow = np.transpose(v1_grow)\n np.fill_diagonal(v1v2_grow,single_ko)\n \n # distribution of epistatic interactions\n epistasis = (v1v2_grow/wt_grow) - ((v1_grow/wt_grow) * (v2_grow/wt_grow))\n ep_dist = epistasis[np.triu_indices(rxns,1)]\n epistasis_full = np.triu(epistasis)+np.rot90(np.fliplr(np.triu(epistasis,1)))\n \n # heatmap\n if heatmap:\n plt.figure(figsize = (12,9))\n if labels:\n reactions = []\n for x in model.reactions:\n reactions.append(x.id)\n sns.heatmap(epistasis_full,xticklabels = reactions, yticklabels = reactions, cmap='mako', linecolor = 'dimgrey', linewidth = 0.005)\n else:\n sns.heatmap(epistasis_full, cmap='mako', linecolor = 'dimgrey', linewidth = 0.005)\n plt.show\n \n #export distribution and matrix\n if export_matrix:\n return(ep_dist,epistasis_full)\n \n return(ep_dist)", "def FCBlock(model):\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))", "def heat_loss(self):\n return self._heat_loss", "def define(ensemble_model, k_neighbors, classes=2, freeze=False):\n #Neighbor input \n #shape is equal to the concat shape of the ensemble model\n if freeze:\n for x in ensemble_model.layers:\n x.trainable=False\n \n input_shape = (k_neighbors, classes)\n neighbor_inputs = tf.keras.layers.Input(shape=input_shape, name=\"neighbor_input\")\n \n neighbor_distances = tf.keras.layers.Input(shape=(k_neighbors), name=\"neighbor_distance_input\")\n \n #original featuers from target tree\n original_features = ensemble_model.get_layer(\"ensemble_learn\").output\n\n attention_features = tf.keras.layers.Attention(use_scale=True)([original_features, neighbor_inputs])\n \n ##Squueze 1st dim for addition with original features\n scaled_context = tf.keras.layers.GlobalAveragePooling1D()(attention_features)\n \n #Add as residual to original matrix normalized\n context_residual = WeightedSum(name=\"ensemble_add_bias\")([scaled_context,original_features]) \n context_residual = tf.keras.layers.Dense(classes)(context_residual)\n output = tf.keras.layers.Softmax(name=\"neighbor_softmax\")(context_residual)\n\n return ensemble_model.inputs, neighbor_inputs, neighbor_distances, output", "def produce_heatmaps(model, device, parameters):\n # Load exam info\n exam_list = pickling.unpickle_from_file(parameters['data_file'])[1:] \n\n # Create heatmaps\n making_heatmap_with_large_minibatch_potential(parameters, model, exam_list, device)", "def __init__(self, model):\n self.model = model\n self.model.eval()\n self.model.zero_grad()\n\n # Create class variable to store the gradients.\n self.gradients = []\n\n # Create class variable to store the feature maps.\n self.feature_maps = []", "def diffuse(self):\n transmission_coeff = 0.3\n # allow the grid to cool down\n sink_coeff = 0.1\n for idx, cell in self.grid.cells():\n # how much total heat the cell radiates\n emission_loss = cell.heat * transmission_coeff\n neighbors = self.grid.neighbors(idx)\n for nidx,n in neighbors:\n # Only colder cells (positive delta) will absorb the heat.\n # Sum of transmissions cannot be greater that the total emission.\n delta = cell.heat - n.heat\n n.heat += emission_loss / len(neighbors)\n cell.heat -= emission_loss + (cell.heat * sink_coeff)", "def heatmap(self):\n plt.imshow(self.M)\n plt.yticks([])\n plt.xticks(np.arange(self.size[1]))\n plt.show()", "def _init_aks_layers(self):\n self.t_pool, self.l_pool = nn.ModuleList(), nn.ModuleList()\n self.b_pool, self.r_pool = nn.ModuleList(), nn.ModuleList()\n\n self.t_heat, self.l_heat = nn.ModuleList(), nn.ModuleList()\n self.b_heat, self.r_heat = nn.ModuleList(), nn.ModuleList()\n\n self.t_off, self.l_off = nn.ModuleList(), nn.ModuleList()\n self.b_off, self.r_off = nn.ModuleList(), nn.ModuleList()\n\n self.t_feat_adaption = nn.ModuleList()\n self.l_feat_adaption = nn.ModuleList()\n self.b_feat_adaption = nn.ModuleList()\n self.r_feat_adaption = nn.ModuleList()\n\n self.t_dcn_offset = nn.ModuleList()\n self.l_dcn_offset = nn.ModuleList()\n self.b_dcn_offset = nn.ModuleList()\n self.r_dcn_offset = nn.ModuleList()\n\n self.t_guiding_shift = nn.ModuleList()\n self.l_guiding_shift = nn.ModuleList()\n self.b_guiding_shift = nn.ModuleList()\n self.r_guiding_shift = nn.ModuleList()\n\n self.t_centripetal_shift = nn.ModuleList()\n self.l_centripetal_shift = nn.ModuleList()\n self.b_centripetal_shift = nn.ModuleList()\n self.r_centripetal_shift = nn.ModuleList()\n\n\n\n for _ in range(self.num_feat_levels):\n self.t_pool.append(\n BiCornerPool(\n self.in_channels, ['top'],\n out_channels=self.in_channels))\n self.l_pool.append(\n BiCornerPool(\n self.in_channels, ['left'],\n out_channels=self.in_channels))\n self.b_pool.append(\n BiCornerPool(\n self.in_channels, ['bottom'],\n out_channels=self.in_channels))\n self.r_pool.append(\n BiCornerPool(\n self.in_channels, [ 'right'],\n out_channels=self.in_channels))\n\n self.t_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n self.l_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n self.b_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels)) \n self.r_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n\n self.tl_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n self.br_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n\n\n self.t_feat_adaption.append(\n DeformConv2d(self.in_channels, self.in_channels,\n self.feat_adaption_conv_kernel, 1, 1))\n self.l_feat_adaption.append(\n DeformConv2d(self.in_channels, self.in_channels,\n self.feat_adaption_conv_kernel, 1, 1))\n self.b_feat_adaption.append(\n DeformConv2d(self.in_channels, self.in_channels,\n self.feat_adaption_conv_kernel, 1, 1))\n self.r_feat_adaption.append(\n DeformConv2d(self.in_channels, self.in_channels,\n self.feat_adaption_conv_kernel, 1, 1))\n\n self.t_guiding_shift.append(\n self._make_layers(\n out_channels=self.guiding_shift_channels,\n in_channels=self.in_channels))\n self.l_guiding_shift.append(\n self._make_layers(\n out_channels=self.guiding_shift_channels,\n in_channels=self.in_channels))\n self.b_guiding_shift.append(\n self._make_layers(\n out_channels=self.guiding_shift_channels,\n in_channels=self.in_channels))\n self.r_guiding_shift.append(\n self._make_layers(\n out_channels=self.guiding_shift_channels,\n in_channels=self.in_channels))\n\n\n self.t_dcn_offset.append(\n ConvModule(\n self.guiding_shift_channels,\n self.feat_adaption_conv_kernel**2 *\n self.guiding_shift_channels,\n 1,\n bias=False,\n act_cfg=None))\n self.l_dcn_offset.append(\n ConvModule(\n self.guiding_shift_channels,\n self.feat_adaption_conv_kernel**2 *\n self.guiding_shift_channels,\n 1,\n bias=False,\n act_cfg=None))\n self.b_dcn_offset.append(\n ConvModule(\n self.guiding_shift_channels,\n self.feat_adaption_conv_kernel**2 *\n self.guiding_shift_channels,\n 1,\n bias=False,\n act_cfg=None))\n self.r_dcn_offset.append(\n ConvModule(\n self.guiding_shift_channels,\n self.feat_adaption_conv_kernel**2 *\n self.guiding_shift_channels,\n 1,\n bias=False,\n act_cfg=None))\n\n self.t_centripetal_shift.append(\n self._make_layers(\n out_channels=self.centripetal_shift_channels,\n in_channels=self.in_channels))\n self.l_centripetal_shift.append(\n self._make_layers(\n out_channels=self.centripetal_shift_channels,\n in_channels=self.in_channels))\n self.b_centripetal_shift.append(\n self._make_layers(\n out_channels=self.centripetal_shift_channels,\n in_channels=self.in_channels))\n self.r_centripetal_shift.append(\n self._make_layers(\n out_channels=self.centripetal_shift_channels,\n in_channels=self.in_channels))", "def updateHeatTemp(self):\r\n currTemp = self.getTemp()\r\n newTemp = currTemp + self.HEATDELTAT\r\n self.setACTemp(newTemp)\r\n self.setHeatTemp(newTemp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns parser object for Python version 2 or 3 depending on the parameter passed.
def get_python_parser(version, debug_parser): if version < 3.0: import uncompyle6.parsers.parse2 as parse2 p = parse2.Python2Parser(debug_parser) else: import uncompyle6.parsers.parse3 as parse3 p = parse3.Python3Parser(debug_parser) p.version = version return p
[ "def python_parser():\n\n if _has_setup_file():\n parse_fun = _parse_setup_file\n elif _has_pyproject_file():\n raise NotImplementedError(\"Not yet there, but will come!\")\n else:\n return None\n\n # Parse the corresponding file\n software_info = parse_fun()\n software_info.programming_language = ProgrammingLanguage.PYTHON.value\n\n return software_info", "def get_python_parser(\n version, debug_parser=PARSER_DEFAULT_DEBUG, compile_mode='exec',\n is_pypy = False):\n\n # FIXME: there has to be a better way...\n if version < 3.0:\n if version == 1.5:\n import uncompyle6.parsers.parse15 as parse15\n if compile_mode == 'exec':\n p = parse15.Python15Parser(debug_parser)\n else:\n p = parse15.Python15ParserSingle(debug_parser)\n elif version == 2.1:\n import uncompyle6.parsers.parse21 as parse21\n if compile_mode == 'exec':\n p = parse21.Python21Parser(debug_parser)\n else:\n p = parse21.Python21ParserSingle(debug_parser)\n elif version == 2.2:\n import uncompyle6.parsers.parse22 as parse22\n if compile_mode == 'exec':\n p = parse22.Python22Parser(debug_parser)\n else:\n p = parse22.Python22ParserSingle(debug_parser)\n elif version == 2.3:\n import uncompyle6.parsers.parse23 as parse23\n if compile_mode == 'exec':\n p = parse23.Python23Parser(debug_parser)\n else:\n p = parse23.Python23ParserSingle(debug_parser)\n elif version == 2.4:\n import uncompyle6.parsers.parse24 as parse24\n if compile_mode == 'exec':\n p = parse24.Python24Parser(debug_parser)\n else:\n p = parse24.Python24ParserSingle(debug_parser)\n elif version == 2.5:\n import uncompyle6.parsers.parse25 as parse25\n if compile_mode == 'exec':\n p = parse25.Python25Parser(debug_parser)\n else:\n p = parse25.Python25ParserSingle(debug_parser)\n elif version == 2.6:\n import uncompyle6.parsers.parse26 as parse26\n if compile_mode == 'exec':\n p = parse26.Python26Parser(debug_parser)\n else:\n p = parse26.Python26ParserSingle(debug_parser)\n elif version == 2.7:\n import uncompyle6.parsers.parse27 as parse27\n if compile_mode == 'exec':\n p = parse27.Python27Parser(debug_parser)\n else:\n p = parse27.Python27ParserSingle(debug_parser)\n else:\n import uncompyle6.parsers.parse2 as parse2\n if compile_mode == 'exec':\n p = parse2.Python2Parser(debug_parser)\n else:\n p = parse2.Python2ParserSingle(debug_parser)\n pass\n pass\n pass\n else:\n import uncompyle6.parsers.parse3 as parse3\n if version == 3.0:\n import uncompyle6.parsers.parse30 as parse30\n if compile_mode == 'exec':\n p = parse30.Python30Parser(debug_parser)\n else:\n p = parse30.Python30ParserSingle(debug_parser)\n elif version == 3.1:\n import uncompyle6.parsers.parse31 as parse31\n if compile_mode == 'exec':\n p = parse31.Python31Parser(debug_parser)\n else:\n p = parse31.Python31ParserSingle(debug_parser)\n elif version == 3.2:\n import uncompyle6.parsers.parse32 as parse32\n if compile_mode == 'exec':\n p = parse32.Python32Parser(debug_parser)\n else:\n p = parse32.Python32ParserSingle(debug_parser)\n elif version == 3.3:\n import uncompyle6.parsers.parse33 as parse33\n if compile_mode == 'exec':\n p = parse33.Python33Parser(debug_parser)\n else:\n p = parse33.Python33ParserSingle(debug_parser)\n elif version == 3.4:\n import uncompyle6.parsers.parse34 as parse34\n if compile_mode == 'exec':\n p = parse34.Python34Parser(debug_parser)\n else:\n p = parse34.Python34ParserSingle(debug_parser)\n elif version == 3.5:\n import uncompyle6.parsers.parse35 as parse35\n if compile_mode == 'exec':\n p = parse35.Python35Parser(debug_parser)\n else:\n p = parse35.Python35ParserSingle(debug_parser)\n elif version == 3.6:\n import uncompyle6.parsers.parse36 as parse36\n if compile_mode == 'exec':\n p = parse36.Python36Parser(debug_parser)\n else:\n p = parse36.Python36ParserSingle(debug_parser)\n else:\n if compile_mode == 'exec':\n p = parse3.Python3Parser(debug_parser)\n else:\n p = parse3.Python3ParserSingle(debug_parser)\n p.version = version\n # p.dumpGrammar() # debug\n return p", "def build_parser(self, **kwargs):\n version = \".\".join([str(v) for v in __version__])\n kwargs.setdefault('version', \"%prog {0}\".format(version))\n return OptionParser(**kwargs)", "def parser(self):\r\n if self._parser is None:\r\n self._parser = cache.load_module(self.path, self.name) \\\r\n or self._load_module()\r\n return self._parser", "def getParser(format):\n if format not in parser_index:\n emsg = \"no parser for '%s' format\" % format\n raise StructureFormatError(emsg)\n pmod = parser_index[format]['module']\n import_cmd = 'from matter.Parsers import %s as pm' % pmod\n exec(import_cmd)\n return pm.getParser()", "def version_parser(self):\n try:\n return self._version_parser\n except AttributeError:\n if not self._pip_is_set_up:\n self.setup_pip()\n from pip._vendor.packaging.version import Version\n self.__class__._version_parser = Version\n return self._version_parser", "def get_parser_version(self):\n return self._version", "def get_parser(self, force=False):\n if not self.__parser or force:\n self.__parser = self._create_parser()\n return self.__parser", "def test_parsing():\n v1 = SemanticVersion(\"1.0.0\")\n v2 = SemanticVersion(\"v1.0.0\")\n v3 = SemanticVersion(\"1.0.0+build\")\n v4 = SemanticVersion(\"v1.0.0+build\")\n v5 = SemanticVersion(\"1.0.0-beta\")\n v6 = SemanticVersion(\"v1.0.0-beta\")", "def _parse_to_version_info(version_str):\n v = parse_version(version_str)\n if v.pre is None and v.post is None and v.dev is None:\n return _VersionInfo(v.major, v.minor, v.micro, 'final', 0)\n elif v.dev is not None:\n return _VersionInfo(v.major, v.minor, v.micro, 'alpha', v.dev)\n elif v.pre is not None:\n releaselevel = {\n 'a': 'alpha',\n 'b': 'beta',\n 'rc': 'candidate'}.get(v.pre[0], 'alpha')\n return _VersionInfo(v.major, v.minor, v.micro, releaselevel, v.pre[1])\n else:\n # fallback for v.post: guess-next-dev scheme from setuptools_scm\n return _VersionInfo(v.major, v.minor, v.micro + 1, 'alpha', v.post)", "def get_parser(data):\n\n # If there are only two lines and the first begins with a '>', the\n # data is in FASTA format. Remove the first line to get the\n # sequence.\n if len(data.splitlines()) == 2:\n if data.startswith('>'):\n data = data.splitlines()[1]\n\n # Test for SnpSequence\n pattern = regex.compile(r'\\w|\\[.\\/.\\]')\n matched_chars = ''.join(regex.findall(pattern, data))\n if matched_chars == data:\n return SnpSequence(data)\n\n # Test for TwoAlleles\n lines = data.splitlines()\n if len(lines) == 4 and lines[0].startswith('>') and lines[2].startswith('>'):\n return TwoAlleles(data)\n\n # Test for Single Blast Sequence\n if '|' in data:\n return SingleBlastParser(data)\n\n # Format not recognized.\n raise StarpError(\"SNP Format Not Recognized\")", "def get_parser(self, format):\n try:\n parser = self._parsers[format]\n except KeyError:\n raise ValueError(f\"{format} is not a registered format.\")\n return parser", "def get_parser(typ: Type[P]) -> Callable[[str], P]:\n try:\n return cast(\n Callable[[str], P],\n {\n str: parse_str,\n bool: parse_bool,\n int: parse_int,\n tuple: parse_tuple,\n list: parse_list,\n set: parse_set,\n }[typ],\n )\n except KeyError:\n raise NotImplementedError(\"Unsupported setting type: %r\", typ)", "def create_parser_impl(self, common, handler: ParserHandler) -> BaseParser:", "def build_parser(self, parser: ArgumentParser):", "def parse_version(version):\n # version = <str>\n # vs = <str>\n # return <NoneType>|(*<int>)\n version = str(version).split('.')\n if len(version) != 3:\n return None\n if all(vs.isdigit() for vs in version):\n return tuple(int(vs) for vs in version)\n return None", "def find_correct_parser(repository_path):\n\n # Import parser plugins\n pyvcsshark.utils.find_plugins(os.path.dirname(os.path.realpath(__file__)))\n\n # Trying to find the correct parser by checking if it implements the\n # needed methods and calling the detect method\n correct_parser = None\n for sc in BaseParser.__subclasses__():\n parser = sc()\n if parser.detect(repository_path):\n return parser\n\n # Check if correct parser was found\n if correct_parser is None:\n raise Exception(\"No fitting parser found for repository located at %s\" % repository_path)\n else:\n return correct_parser", "def parse_version(self, version):\n opm_version = version.split('\"')[1].split('v', 1)[1]\n return VersionInfo.parse(version=opm_version)", "def get_parser_type(headers):\n\n server = headers[\"Server\"]\n\n if server is not None:\n if server == \"nginx\":\n return NginxParser" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the header (the first message chunk) for message length. Returns the length and the leftover message chunk.
def parse_header(self, header): header_separator = self.header_separator.encode() length, separator, message_chunk = header.partition(header_separator) try: return int(length), message_chunk except ValueError: return None, None
[ "def _get_chunk(self, header):\n # Read and check header of chunk\n header_chunk = self._file.read(HEADER_LENGTH)\n if header_chunk != header:\n raise RuntimeError(\"The LFP chunk header is invalid.\")\n\n data_pos = None\n sha1 = None\n\n # Read size\n size = struct.unpack(\">i\", self._file.read(SIZE_LENGTH))[0]\n if size > 0:\n # Read sha1\n sha1 = str(self._file.read(SHA1_LENGTH).decode(\"ASCII\"))\n # Skip fixed null chars\n self._file.read(PADDING_LENGTH)\n # Find start of data and skip data\n data_pos = self._file.tell()\n self._file.seek(size, 1)\n # Skip extra null chars\n ch = self._file.read(1)\n while ch == b\"\\0\":\n ch = self._file.read(1)\n self._file.seek(-1, 1)\n\n return data_pos, size, sha1", "def _get_chunk(self, header):\n # Read and check header of chunk\n header_chunk = self._file.read(HEADER_LENGTH)\n if header_chunk != header:\n raise RuntimeError(\"The LFR chunk header is invalid.\")\n\n data_pos = None\n sha1 = None\n\n # Read size\n size = struct.unpack(\">i\", self._file.read(SIZE_LENGTH))[0]\n if size > 0:\n # Read sha1\n sha1 = str(self._file.read(SHA1_LENGTH).decode(\"ASCII\"))\n # Skip fixed null chars\n self._file.read(PADDING_LENGTH)\n # Find start of data and skip data\n data_pos = self._file.tell()\n self._file.seek(size, 1)\n # Skip extra null chars\n ch = self._file.read(1)\n while ch == b\"\\0\":\n ch = self._file.read(1)\n self._file.seek(-1, 1)\n\n return data_pos, size, sha1", "def decode_msg_header(msg_header):\n # decode message header from bytes to string\n msg_header = msg_header.decode('utf-8')\n\n # extract 3 parts of header\n header_id_part = msg_header[0:Message.header_part_length]\n header_type_part = msg_header[Message.header_part_length:Message.header_part_length*2]\n header_length_part = msg_header[Message.header_part_length*2:Message.header_length]\n\n # parse client id from header\n msg_client_id = header_id_part.strip()\n # parse message type from header\n msg_type = header_type_part.strip()\n # parsing as int could fail, so catch error and return 0 msg length on error\n try:\n # parse message length from header\n msg_length = int(header_length_part)\n except ValueError:\n msg_length = 0\n\n # return 3 parts of message header\n return msg_client_id, msg_type, msg_length", "def receive_len_header(sock):\n buf = b''\n while not buf.endswith(b'\\n'):\n temp_buf = sock.recv(1)\n if len(temp_buf) == 0: # client disconnected\n return 0\n buf += temp_buf\n length = int(buf)\n logger.debug(\"message length should be {}\".format(length))\n return length", "def ParseHeader(self, data):\n header = struct.unpack('<BB',data);\n self.hdr_msgID = header[0]\n self.hdr_msgLen = header[1]\n return True", "def headerlen(msg, fixed_header_size=10):\n return int(msg[:fixed_header_size]) + fixed_header_size", "def _parse_headers(self, instr):\n top, rest = hdr_end.split(instr, 1)\n self.input_header_length = len(top)\n header_lines = top.splitlines()\n\n # chop off the top line\n while True: # TODO: limit?\n try:\n top_line = header_lines.pop(0)\n if top_line.strip() != \"\":\n break\n except IndexError: # empty\n return rest\n \n try:\n hdr_tuples, conn_tokens, transfer_codes, content_length \\\n = self._parse_fields(header_lines, True)\n except TypeError: # returned None because there was an error\n if not self.inspecting:\n return \"\" # throw away the rest\n \n # ignore content-length if transfer-encoding is present\n if transfer_codes != [] and content_length != None:\n content_length = None\n\n try:\n allows_body = self.input_start(top_line, hdr_tuples,\n conn_tokens, transfer_codes, content_length)\n except ValueError: # parsing error of some kind; abort.\n if not self.inspecting:\n return \"\" # throw away the rest\n allows_body = True\n\n self._input_state = HEADERS_DONE\n if not allows_body:\n self._input_delimit = NOBODY\n elif len(transfer_codes) > 0:\n if transfer_codes[-1] == 'chunked':\n self._input_delimit = CHUNKED\n self._input_body_left = -1 # flag that we don't know\n else:\n self._input_delimit = CLOSE\n elif content_length != None:\n self._input_delimit = COUNTED\n self._input_body_left = content_length\n else:\n self._input_delimit = CLOSE\n return rest", "def parse_header(self):\n\n chunk_id, chunk_len = self.next_chunk()\n instream = self.instream\n\n # check if it is a proper midi file\n if chunk_id != b\"MThd\":\n raise ParseError(\"Invalid MIDI file header. Chunk identifier must be 'MThd'.\")\n\n # Header values are at fixed locations, so no reason to be clever\n self.format = read_bew(instream.read(2))\n self.num_tracks = read_bew(instream.read(2))\n\n if self.format == 0 and self.num_tracks > 1:\n msg = (\n \"Invalid number of tracks (%i). Type 0 midi files may only \"\n \"contain a single track.\" % self.num_tracks\n )\n\n if self.strict:\n raise ParseError(msg)\n else:\n log.warning(msg)\n\n tick_div = instream.read(2)\n fps, resolution = tointseq(tick_div)\n\n if fps & 0x80:\n metrical = False\n else:\n metrical = True\n division = read_bew(tick_div)\n\n # Theoretically a header larger than 6 bytes can exist\n # but no one has seen one in the wild.\n # We will correctly ignore unknown data if present, though.\n if chunk_len > 6:\n log.warning(\"Invalid header size (%i). Skipping trailing header \" \"bytes\", chunk_len)\n instream.seek(chunk_len - 6, 1)\n\n # call the header event handler on the stream\n if metrical:\n self.dispatch(\n \"header\", self.format, self.num_tracks, metrical=True, tick_division=division\n )\n else:\n self.dispatch(\n \"header\",\n self.format,\n self.num_tracks,\n metrical=False,\n fps=fps,\n frame_resolution=resolution,\n )", "def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")", "def consume_header_bytes(self, data):\n # We're done if there is no content.\n if not data or (len(data) == 0):\n return None\n\n full_header_len = 4\n\n assert len(self.header_contents) < full_header_len\n\n bytes_avail = len(data)\n bytes_needed = full_header_len - len(self.header_contents)\n header_bytes_avail = min(bytes_needed, bytes_avail)\n self.header_contents += data[:header_bytes_avail]\n if len(self.header_contents) == full_header_len:\n import struct\n # End of header.\n self.packet_bytes_remaining = struct.unpack(\n \"!I\", self.header_contents)[0]\n self.header_contents = b\"\"\n self.reading_header = False\n return data[header_bytes_avail:]\n\n # If we made it here, we've exhausted the data and\n # we're still parsing header content.\n return None", "def _parse_header(header):\n names, lengths = [], []\n for line in header:\n if line.startswith(\"@SQ\"):\n for field in line.split(\"\\t\"):\n if field.startswith(\"SN:\"):\n names.append(field[3:])\n elif field.startswith(\"LN:\"):\n lengths.append(int(field[3:]))\n return names, lengths", "def parse_header(header_bytes):\n size = header_bytes[:4]\n size = int.from_bytes(size, byteorder=\"big\")\n\n f_hash = header_bytes[4:20]\n\n f_name = header_bytes[20:275]\n try:\n f_name = f_name.decode()\n except:\n print(f_name)\n raise Exception(\n \"Content could not be retrieved from image. (File name could not be parsed.)\")\n\n return size, f_hash, f_name.strip()", "def extract_header(msg):\n\n raw_header = unpack_from(\">HHHHHH\", msg, 0)\n\n x_id = raw_header[0]\n flags = raw_header[1]\n\n qr = flags >> 15\n opcode = (flags & 0x7800) >> 11\n aa = (flags & 0x0400) >> 10\n tc = (flags & 0x0200) >> 9\n rd = (flags & 0x0100) >> 8\n ra = (flags & 0x0080) >> 7\n rcode = (flags & 0x000f)\n\n qdcount = raw_header[2]\n ancount = raw_header[3]\n nscount = raw_header[4]\n arcount = raw_header[5]\n\n return Header(x_id, qr, opcode, aa, tc, rd, ra, rcode, qdcount, ancount, nscount, arcount)", "def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']\n slot_ind = hdict['slot']\n eud2_ind = hdict['eud2']\n\n station = self.bit_manip(head_int[stat_ind[0]], stat_ind[1], stat_ind[2])\n link = self.bit_manip(head_int[link_ind[0]], link_ind[1], link_ind[2])\n slot = self.bit_manip(head_int[slot_ind[0]], slot_ind[1], slot_ind[2])\n frame = self.bit_manip(head_int[frame_ind[0]], frame_ind[1], frame_ind[2])\n time = self.bit_manip(head_int[t_ind[0]], t_ind[1], t_ind[2])\n count = self.bit_manip(head_int[eud2_ind[0]], eud2_ind[1], eud2_ind[2])\n\n return station, link, slot, frame, time, count", "def header_read(buf, begin=0):\n buf.seek(begin) # starting at the given offset\n stringvar = str(buf.read(56)) # reading header\n listvar = stringvar.split() # spliting header\n listvar.pop(0) # first element of header is \"FCS\" and it's useless\n while len(listvar) > 4: # listvar needs only 4 elements, and elements are removed from\n listvar.pop() # the tail until list is 4 elements long\n # offsets are converted into string\n listvar = [int(x) for x in listvar]\n next_offset = listvar[-1]+1 # next offset is calculated\n text_begin = listvar[0]\n # the difference of BEGIN and END gives size-1\n text_size = listvar[1]-listvar[0]\n data_begin = listvar[2]\n # the difference of BEGIN and END gives size-1\n data_size = listvar[3]-listvar[2]\n listvar = [text_begin, text_size, data_begin, data_size]\n return(next_offset, listvar)", "def get_headerlength(self):\n return struct.calcsize(self.header_format)", "def read_ros_handshake_header(sock, b, buff_size):\n header_str = None\n while not header_str:\n d = sock.recv(buff_size)\n if not d:\n raise ROSHandshakeException('connection from sender terminated before handshake header received. %s bytes were received. Please check sender for additional details.' % b.tell())\n b.write(d)\n btell = b.tell()\n if btell > 4:\n # most likely we will get the full header in the first recv, so\n # not worth tiny optimizations possible here\n bval = b.getvalue()\n (size,) = struct.unpack('<I', bval[0:4])\n if btell - 4 >= size:\n header_str = bval\n\n # memmove the remnants of the buffer back to the start\n leftovers = bval[size+4:]\n b.truncate(len(leftovers))\n b.seek(0)\n b.write(leftovers)\n\n # process the header\n return decode_ros_handshake_header(bval)", "def parse_lead_headers(self):\n # parse General Header blocks\n self.header_data[\"General Header\"] = OrderedDict([])\n for header_block in self.schema[\"General Header\"]:\n self.header_data[\"General Header\"][header_block] = self._read_header_block(\n self.schema[\"General Header\"][header_block]\n )\n self.cursor_position += self.schema[\"General Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse Channel Set Descriptor blocks\n self.header_data[\"Channel Set Descriptor\"] = OrderedDict([])\n for n in range(\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n ):\n self.header_data[\"Channel Set Descriptor\"][\n \"Channel Set Descriptor Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"Channel Set Descriptor\"][\"Main Block\"]\n )\n self.cursor_position += self.schema[\"Channel Set Descriptor\"][\"Main Block\"][\n \"block_length_in_bytes\"\n ]\n # parse the first three Extended Header blocks\n self.header_data[\"Extended Header\"] = OrderedDict([])\n for n in range(3):\n header_block = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][header_block] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n self.number_of_trace_blocks = (\n self.header_data[\"Extended Header\"][\"32-byte Extended Header Block #2\"][\n \"number_of_records_in_file\"\n ][\"value\"]\n * self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n )\n # parse the next n 32-byte Extended Header blocks as necessary\n for n in range(\n 3,\n self.header_data[\"General Header\"][\"General Header Block #2\"][\n \"extended_header_blocks\"\n ][\"value\"],\n ):\n header_block = \"32-byte Extended Header Auxiliary Block\"\n block_label = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][block_label] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse the general External Header Block\n self.header_data[\"External Header\"] = OrderedDict([])\n self.header_data[\"External Header\"][\n \"External Header Block #1\"\n ] = self._read_header_block(\n self.schema[\"External Header\"][\"External Header Block #1\"]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"External Header Block #1\"\n ][\"block_length_in_bytes\"]\n # parse the next n 32-byte External Header blocks\n if (\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"number_of_32_byte_external_header_blocks\"\n ][\"value\"]\n == \"ff\"\n ):\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #2\"][\"external_header_blocks\"][\"value\"]\n else:\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #1\"][\"number_of_32_byte_external_header_blocks\"][\n \"value\"\n ]\n for n in range(number_of_32_byte_external_header_blocks - 1):\n self.header_data[\"External Header\"][\n \"32-byte External Header Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ][\"block_length_in_bytes\"]", "def _parse_header(self):\n header = int_from_lbytes(self._reader.read(4))\n if header != self._HEADER:\n raise StashFileParseError(f'Invalid header id: 0x{header:08X}')\n self.version = int_from_lbytes(self._reader.read(2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a socket and starts, binds an address to it, and enables the listen mode.
def start(self, *args, **kwargs): self.socket = Socket(*args, **kwargs) self.socket.bind(self.address) self.socket.listen(1)
[ "def __start_listen_socket(self):\n self.__listen_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.__listen_socket.bind((self.host, self.port))", "def create_and_bind_socket(self):\r\n try:\r\n self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.serverSocket.bind((self.host, self.serverPort))\r\n except socket.error as e:\r\n print(f\"Error creating socket: {e}\")", "def create(self):\r\n\t\tif self.type == 'i': # Internet socket\r\n\t\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind((self.host, self.port))\r\n\t\t\tutil.logMessage('Bound to TCP socket on port %d ' % self.port)\r\n\t\telse:\r\n\t\t\tif os.path.exists(self.file):\r\n\t\t\t\t# if socket already exists, remove it. This prevents errors when the socket is corrupt after a crash.\r\n\t\t\t\tos.remove(self.file)\r\n\t\t\tself.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind(self.file) # Bind BEERSOCKET\r\n\t\t\t# set all permissions for socket\r\n\t\t\tos.chmod(self.file, 0777)", "def bind_and_listen(self):\n \n host_socket = create_socket(AF_INET, SOCK_STREAM)\n try:\n host_socket.bind((self.host, self.port))\n host_socket.listen(10)\n self.host_socket = host_socket\n return host_socket\n except:\n raise Exception(\"Port binding and listening cannot be performed\")", "def init_socket(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((host, port))\n return sock", "def start_server():\n server_port = 8800\n incoming_addr = \"\"\n address = (incoming_addr, server_port)\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SO_REUSEADDR)\n server_socket.bind(address)\n server_socket.listen(5)\n\n print(\"\\nServer Listening\\n\")\n return server_socket", "def create_socket():\r\n return socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def socket_create():\r\n try:\r\n global host\r\n global port\r\n global s\r\n host = '10.0.0.19'\r\n port = 9999\r\n s = socket.socket()\r\n except socket.error as msg:\r\n print('Socket creation error: ' + str(msg))", "def _open_socket(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind((\"0.0.0.0\", port))\n s.setblocking(False)\n return s", "def start(self):\n self._sock.bind((SERVER_IP_ADDRESS, SERVER_PORT))\n self._sock.listen(1)\n print(f\"Server has been binded to the IP: {SERVER_IP_ADDRESS} port: {SERVER_PORT}\")\n self.server_main_loop()", "def listen(self):\n\t\ttry:\n\t\t\tself.s_sock.getsockname() # check if socket already bound\n\t\texcept OSError:\n\t\t\tself.s_sock.bind(('', 0))\n\t\tself.s_sock.listen(3)", "def socket_open(self):\n log.info(\"Creating UDP socket %s:%d for communication with the client\",\n self.receiverIP, self.receiverPort)\n\n try:\n self.receiverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiverSocket.bind((self.receiverIP, self.receiverPort))\n except Exception as e:\n log.error(\"Could not create UDP socket for communication with the client!\")\n log.debug(e)\n traceback.print_exc()", "def create_socket (bindport): \n server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) \n server.bind((HOST, bindport))\n \n print \"Listening on localhost, port {0}\".format(bindport)\n \n client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n return server, client", "def start(self):\n self.__start_listen_socket()\n self.__wait_for_new_connections()", "def _tcp_listener(address, backlog=50, reuse_addr=None):\n sock = _socket.socket()\n if reuse_addr is not None:\n sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, reuse_addr)\n try:\n sock.bind(address)\n except _socket.error, ex:\n strerror = getattr(ex, 'strerror', None)\n if strerror is not None:\n ex.strerror = strerror + ': ' + repr(address)\n raise\n sock.listen(backlog)\n sock.setblocking(0)\n return sock", "def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False,\n dualstack_ipv6=False): # pragma: no cover\n if reuse_port and not hasattr(_socket, \"SO_REUSEPORT\"):\n raise ValueError(\"SO_REUSEPORT not supported on this platform\")\n if dualstack_ipv6:\n if not has_dualstack_ipv6():\n raise ValueError(\"dualstack_ipv6 not supported on this platform\")\n if family != AF_INET6:\n raise ValueError(\"dualstack_ipv6 requires AF_INET6 family\")\n sock = socket(family, SOCK_STREAM)\n try:\n # Note about Windows. We don't set SO_REUSEADDR because:\n # 1) It's unnecessary: bind() will succeed even in case of a\n # previous closed socket on the same address and still in\n # TIME_WAIT state.\n # 2) If set, another socket is free to bind() on the same\n # address, effectively preventing this one from accepting\n # connections. Also, it may set the process in a state where\n # it'll no longer respond to any signals or graceful kills.\n # See: msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx\n if os.name not in ('nt', 'cygwin') and \\\n hasattr(_socket, 'SO_REUSEADDR'):\n try:\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n except error:\n # Fail later on bind(), for platforms which may not\n # support this option.\n pass\n if reuse_port:\n sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)\n if has_ipv6 and family == AF_INET6:\n if dualstack_ipv6:\n sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)\n elif hasattr(_socket, \"IPV6_V6ONLY\") and \\\n hasattr(_socket, \"IPPROTO_IPV6\"):\n sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1)\n try:\n sock.bind(address)\n except error as err:\n msg = '%s (while attempting to bind on address %r)' % \\\n (err.strerror, address)\n raise error(err.errno, msg) from None\n if backlog is None:\n sock.listen()\n else:\n sock.listen(backlog)\n return sock\n except error:\n sock.close()\n raise", "def _setup_tcp(self, bind, basedir=None):\n obind = repr(bind(0))\n host, port, flags = bind(u'ip'), bind(u'port'), 0\n port = int(port)\n if not host or host == u'*':\n host, flags = None, _socket.AI_PASSIVE\n elif host.startswith(u'[') and host.endswith(u']'):\n host = host[1:-1].encode('ascii') # IPv6 notation [xxx:xxx:xxx]\n else:\n host = host.encode('idna')\n try:\n adi = _socket.getaddrinfo(host, port,\n _socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, flags)\n for family, stype, proto, _, bind in adi:\n if not _socket.has_ipv6 and family == AF_INET6:\n continue\n\n try:\n socket = _socket.socket(family, stype, proto)\n except _socket.error, e:\n if e[0] == _errno.EAFNOSUPPORT and host is None and \\\n family == AF_INET6:\n # grmpf.\n # There are systems (e.g. linux) which emit\n # IPv6 on ANY, even if they don't support it.\n # Or is it the libc? Who cares anyway...\n continue\n raise\n socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)\n self._sockets.append(\n InetSocket(socket, obind, host, family, bind)\n )\n except _socket.error:\n e = _sys.exc_info()\n try:\n raise ConfigurationError, e[1], e[2]\n finally:\n del e", "def listen_tcp_socket(self):\n host = self.host # Listen on localhost\n port = self.port # Listen on port 5000\n\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.logger.info('%s Socket created', time_now())\n\n # Connect to socket\n try:\n sock.bind((host, port))\n except socket.error:\n self.logger.error('%s Binding error', time_now())\n self.logger.error(\n '%s Socket in TIME-WAIT state, wait until socket is closed',\n time_now())\n exit(1)\n else:\n self.logger.info('%s Binding successful', time_now())\n\n # Listen to the socket\n try:\n sock.listen(1)\n except socket.error:\n self.logger.error(\n '%s Cannot listen on port %d', time_now(), port)\n else:\n self.logger.info(\n '%s Socket now listening on port %d', time_now(), port)\n return sock", "def start_listening(self, socket_id):\n self._cleanup_socket(socket_id)\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.bind(self._socket_path(socket_id))\n sock.listen(0)\n return sock" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compiles and executes the received code and returns the output.
def code_output(self, code): try: compiled = compile(code, '<inspector-server>', 'single') except (SyntaxError, OverflowError, ValueError): return traceback.format_exc(0) # only first entry in the stack # execute the compiled message and capture the output with self.output() as output: try: exec(compiled, self.namespace, self.namespace) except: return traceback.format_exc() return output.getvalue()
[ "def pythonCode(self):\n return compile(self.pythonString(), \"\", \"exec\")", "def code(self):\n if self._code is None:\n if self.stale:\n pyfile = self.py_file\n if self.strip_dest_dir and \\\n self.py_file.startswith(self.strip_dest_dir):\n pyfile = os.path.normpath(\n self.py_file[len(self.strip_dest_dir):])\n try:\n self._code = py_compile(self.python, pyfile)\n except Exception:\n raise_template_error(filename=self.kid_file,\n encoding=self.encoding)\n else:\n self._code = marshal.load(self._pyc_fp)\n return self._code", "def run(code, input_data = None, do_print = True):\n\t# Set up the scope\n\tglobs = vivarium.scope.global_scope()\n\toutput = vivarium.pipes.Output(scope = globs)\n\tif input_data is not None:\n\t\tvivarium.pipes.Input(input_data, globs)\n\tprogram_scpe = vivarium.scope.Scope(globs)\n\tglobs.lockdown()\n\t# Compile and run\n\tbytecode = vivarium.transform.transform(code)\n\tbytecode.evaluate(program_scpe)\n\t# Return output\n\treturn output.get_data()", "def eval( self ):\n return self.doEval( self.code, self.ctx )", "def execute_code(self, code):\n ans = ''\n logging.debug('running', code)\n s0 = sys.stdout\n se = sys.stderr\n sys.stdout = StringIO.StringIO()\n sys.stderr = StringIO.StringIO()\n try:\n exec code in namespace\n except:\n ans += traceback.format_exc()\n finally:\n sys.stdout.seek(0)\n sys.stderr.seek(0)\n ans += str(sys.stdout.read()) + str(sys.stderr.read())\n sys.stdout = s0\n sys.stderr = se\n logging.debug('output', ans)\n return ans", "def eval( self, code, ctx ):\n return self.doEval( code, ctx )", "def code_input():\n code = ''\n compiled = None\n while not compiled:\n prompt = PROMPT_INIT if not code else PROMPT_MORE\n code += input(prompt) # add a line to the code string\n try:\n # returns None if the code is valid but not finished\n compiled = compile(code, '<inspector-shell>', 'single')\n except (SyntaxError, OverflowError, ValueError):\n traceback.print_exc(0) # only first entry in the stack\n code = ''\n else:\n code += '\\n'\n return code", "def compileBytecode(self, code):\n btc = dis.get_instructions(code)\n \n print(dis.code_info(code))\n dis.dis(code)\n \n level_name = code.co_name\n \n env = Env(code)\n\n # if we are not at the toplevel we setup the function prologue\n if level_name != \"<module>\":\n csts = env.getConsts()\n \n # Emit const strings before function definition\n for i, v in enumerate(csts):\n if v.type == ConstVal.Addr:\n self.emitter.emitString(env.getStringRef(i), v.value)\n\n self.emitter.emitLabel(level_name)\n self.emitter.emitPrologue(code.co_nlocals)\n \n # Copy args into slot\n for i in range(code.co_argcount):\n self.emitter.emitStoreSlot(REGS[i], i)\n\n for ins in btc:\n if ins.opname == \"MAKE_FUNCTION\":\n name = env.popEvent().value\n code = env.popEvent().value\n\n if not isinstance(code, type(self.compileBytecode.__code__)):\n raise Exception(\"MAKE_FUNCTION instruction with no code object\")\n\n self.compileBytecode(code)\n if ins.opname == \"CALL_FUNCTION\":\n arg_count = ins.argval\n\n if arg_count >= len(REGS)-1:\n raise Exception(\"Functions must have at most {} arguments\".format(len(REGS)-1))\n \n # TODO: Emit movs of variables into regs\n env.setupArgs(arg_count, self.emitter)\n\n func = env.popEvent().value\n self.emitter.emitRaw(\"call #{}\".format(func))\n \n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n\n if ins.opname == \"LOAD_FAST\":\n env.pushEvent(StackEvent(StackEvent.LOAD_FAST, ins.argval, ins.arg))\n if ins.opname == \"LOAD_CONST\":\n env.pushEvent(StackEvent(StackEvent.LOAD_CONST, ins.argval, ins.arg))\n if ins.opname == \"LOAD_GLOBAL\":\n env.pushEvent(StackEvent(StackEvent.LOAD_GLOBAL, ins.argval, ins.arg))\n if ins.opname == \"STORE_FAST\":\n evt = env.popEvent()\n \n # We returned from a function\n if evt.type == StackEvent.MAKE_FUNCTION_DUMMY:\n self.emitter.emitStoreSlot(REGS[0], evt.index)\n if evt.type == StackEvent.LOAD_CONST:\n cstval = env.getConsts()[evt.index]\n\n if cstval.type == ConstVal.Imm:\n self.emitter.emitMovImm(REGS[0], cstval.value)\n if cstval.type == ConstVal.Addr:\n self.emitter.emitMovRef(REGS[0], cstval.value)\n\n self.emitter.emitStoreSlot(REGS[0], ins.arg)\n\n if ins.opname == \"RETURN_VALUE\":\n evt = env.popEvent()\n\n if evt.type == StackEvent.LOAD_FAST:\n self.emitter.emitLoadSlot(REGS[0], evt.index)\n if evt.type == StackEvent.LOAD_CONST:\n cstval = env.getConsts()[evt.index]\n\n if cstval.type == ConstVal.Imm:\n self.emitter.emitMovImm(REGS[0], cstval.value)\n if cstval.type == ConstVal.Addr:\n self.emitter.emitMovAddr(REGS[0], env.getStringRef(evt.index))\n\n if ins.opname.startswith(\"BINARY\") or ins.opname.startswith(\"INPLACE\"):\n env.setupArgs(2, self.emitter)\n\n if ins.opname == \"BINARY_ADD\" or ins.opname == \"INPLACE_ADD\":\n self.emitter.emitRaw(\"add $A $B\")\n if ins.opname == \"BINARY_MULTIPLY\" or ins.opname == \"INPLACE_MULTIPLY\":\n self.emitter.emitRaw(\"mul $A $B\")\n if ins.opname == \"BINARY_SUBSTRACT\" or ins.opname == \"INPLACE_SUBSTRACT\":\n self.emitter.emitRaw(\"sub $A $B\")\n if ins.opname == \"BINARY_LSHIFT\":\n self.emitter.emitRaw(\"shl $A $B\")\n if ins.opname == \"BINARY_RSHIFT\":\n self.emitter.emitRaw(\"shr $A $B\")\n if ins.opname == \"BINARY_AND\":\n self.emitter.emitRaw(\"and $A $B\")\n if ins.opname == \"BINARY_XOR\":\n self.emitter.emitRaw(\"xor $A $B\")\n if ins.opname == \"BINARY_OR\":\n self.emitter.emitRaw(\"or $A $B\")\n\n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n if ins.opname == \"SETUP_LOOP\":\n self.emitter.emitLabel(env.addLoop())\n if ins.opname == \"JUMP_ABSOLUTE\":\n self.emitter.emitRaw(\"jmp #{}\".format(env.getLoopTop()))\n if ins.opname == \"POP_BLOCK\":\n self.emitter.emitRaw(env.popLoop())\n\n if ins.opname == \"COMPARE_OP\":\n env.setupArgs(2, self.emitter)\n env.addComparison(ins.argval)\n self.emitter.emitRaw(\"cmp $A $B\")\n env.pushEvent(StackEvent(StackEvent.MAKE_FUNCTION_DUMMY, 0, 0))\n \n if ins.opname == \"POP_JUMP_IF_TRUE\":\n cmp = env.popComparison()\n dest = env.getLoopTop() + \"_end\"\n\n if cmp == '>':\n self.emitter.emitRaw(\"jbe #{}\".format(dest))\n if cmp == '<':\n self.emitter.emitRaw(\"jle #{}\".format(dest))\n if cmp == \"==\":\n self.emitter.emitRaw(\"je #{}\".format(dest))\n if cmp == \"!=\":\n self.emitter.emitRaw(\"jne #{}\".format(dest))\n\n if ins.opname == \"POP_JUMP_IF_FALSE\":\n cmp = env.popComparison()\n dest = env.getLoopTop() + \"_end\"\n\n if cmp == '>':\n self.emitter.emitRaw(\"jle #{}\".format(dest))\n if cmp == '<':\n self.emitter.emitRaw(\"jbe #{}\".format(dest))\n if cmp == \"==\":\n self.emitter.emitRaw(\"jne #{}\".format(dest))\n if cmp == \"!=\":\n self.emitter.emitRaw(\"je #{}\".format(dest))\n\n\n if level_name != \"<module>\":\n self.emitter.emitEpilogue()", "def compile(self):\n\n code = []\n \n self.logger.debug(\"compiling graph...\")\n for block in self._startBlocks:\n code.extend(block.evaluate())\n\n return \"\\n\".join(code)", "def compile(self, ast):\n self.output.file_header()\n constant_mapping = ConstantCompiler(self.output).compile(ast)\n CodeCompiler(self.output, constant_mapping).compile(ast)\n self.output.file_footer()", "def exec_(_code_, _globs_=..., _locs_=...):\n ...", "def execute(code, param=None, context=None, filename='<string>'):\n\n status = 'ok'\n\n # Import context\n if context is not None:\n l = locals()\n for k, v in context.items():\n l[k] = v\n\n if code is not None:\n try:\n exec(compile(code, filename, 'exec'))\n except StopScript as ss:\n if ss.message != \"\":\n status = ss.message\n pass\n\n return status, param", "def parse_exec(self, code, lineno=1):\n\n if isinstance(code, unicode):\n code = renpy.python.escape_unicode(code)\n\n try:\n rv = ast.parse(code, 'exec')\n except SyntaxError as e:\n\n raise renpy.parser.ParseError(\n filename,\n lineno + e[1][1] - 1,\n \"Syntax error while parsing python code.\",\n e[1][3],\n e[1][2])\n\n increment_lineno(rv, lineno-1)\n\n return rv.body", "def run_program(self, code, inputs=[]):\n machine = IntcodeVM(code)\n outputs = machine.run(inputs)\n return machine, outputs", "def tree(self):\r\n\r\n try:\r\n code = self.code.encode('utf8') + b'\\n'\r\n return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST)\r\n except SyntaxError:\r\n return None", "def compile(source, filename='<string>', encoding=None, entity_map=None):\n # XXX all kinds of work to do here catching syntax errors and\n # adjusting line numbers...\n py = kid.codewriter.parse(source, encoding, filename, entity_map)\n return py_compile(py, filename)", "def compile_script(bogscript):\n\n ## Step 1: Convert source script into tokens\n tokens = lexer(bogscript)\n #print tokens\n\n ## Step 2: Convert tokens into Python Source\n pycode = pygen(tokens)\n #print '\\n' + pycode + '\\n'\n\n ## Step 3: Compile Python Source into bytecode\n bytecode = bytegen(pycode)\n\n return bytecode", "def compile_javascript_code(request):\n\n untrustedCode = request.GET.get('untrustedCode')\n\n js = \"exports.func = \" + untrustedCode\n\n try:\n with NodeVM.code(js) as module:\n result = module.call_member(\"func\") # Change to async | does not work in deployment\n \n data = {'output': result}\n except:\n data = {'output': \"Error with the input code. Take another look at your code.\"}\n return JsonResponse(data)", "def generate_code(self):\n return Codegen()(self.codegens.values()).buf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shuts down the server (closes the server socket) and deletes namespace.
def shutdown(self): if self.running: self.running = False self.socket.close() del self.namespace status(STATUS_SHUTDOWN)
[ "def shutdown(self):\n if self.is_running:\n self.server.server_close()\n self.server.socket.close()\n self.is_running = False", "def stop(self):\n self.unregister_all_servers()\n self.zeroconf.close()", "def shutdown(self):\n self._server.shutdown()", "def shutdown():\n logging.info('Shutting down server')\n app.stop()\n AsyncIOMainLoop().stop()\n server.stop()\n _ioloop.stop()", "def close_ws_server():\n server_address = askstring(\"Websocket\", \"Which server connection should be closed?\")\n Singleton.getInstance().remove_ws(server_address)", "def cleanupServer(self):\n if self._serverProc is not None:\n self._client(\"killServer\")\n if isinstance(self._serverProc, subprocess.Popen):\n self._serverProc.communicate()\n self._devnull.close()\n self.callCmd(\n [\"rsh\", \"-l\", \"root\", self._remoteHost,\n \"rm -rf /var/tmp/bcpython /var/lib/python2.7\"])\n self._serverProc = None", "def stop(self):\n try:\n self.socket.close()\n self.context.destroy()\n except zmq.ZMQError:\n print('Server.stop!\\n')", "def shutdown(self):\r\n\t\tself.server.server_close()\r\n\t\tprint(\"[+] XMLRPC server stopped\")\r\n\t\tsetattr(self.server, \"shutdown\", True)\r\n\t\treturn 0", "def shutdown(self):\n try:\n Server.socket.shutdown(socket.SHUT_RDWR)\n print(\"Spojeni se serverem bylo ukonceno\\n\")\n except:\n print(\"Nepodarilo se ukoncit spojeni se serverem\\n\")", "def collectd_shutdown(self):\n self.server.stop()", "def cleanup_netns(self):\n self.log_info(\"Cleaning up network namespace for %s\" % self.device_path)\n self.netns_killall()\n self.delete_netns()", "def shutdown(self):\n\t\tbody = dict()\n\t\tbody[\"stop_server\"] = {\n\t\t\t\"stop_type\" : \"soft\",\n \t\t\t\"timeout\" : \"30\"\n\t\t}\n\t\tself.cloud_manager.post_request(\"/server/\" + self.uuid + \"/stop\" , body)\n\t\tobject.__setattr__(self, \"state\", \"maintenance\") # post_request already handles any errors from API", "def shutdown (self):\n # Clear DomainManagers config if needed\n if CONFIG.reset_domains_after_shutdown():\n self.domains.reset_initiated_mgrs()\n elif CONFIG.clear_domains_after_shutdown():\n self.domains.clear_initiated_mgrs()\n # Stop initiated DomainManagers\n self.domains.stop_initiated_mgrs()", "def close_uds(self):\n self.sock.close()", "async def _shutdown(self):\n logger.debug('Shutdown stop server')\n server.stop()\n\n # in real app could be extended to request endpoint for check active requests count\n logger.debug(f'Shutdown waiting {self.SHUTDOWN_WAIT_TIME} seconds to process existing requests')\n await gen.sleep(self.SHUTDOWN_WAIT_TIME)\n ioloop.IOLoop.current().stop()\n\n logger.debug('Shutdown close pool connection')\n self.db.pool.close()", "def terminate(self):\n try:\n sel.unregister(self.socket)\n except Exception:\n pass\n if self.registered():\n self.session = None\n self.auto.join()\n try:\n self.socket.close()\n except socket.error:\n pass\n except Exception:\n self.exception('unhandled exception')", "def tearDownClass(cls):\n cls.http_server.shutdown()", "def at_server_shutdown(self):\r\n pass", "def shutdown(self):\n self.commands[master_setup.subcommand].shutdown()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Opens a socket for communicating with the importer from the shell side. Runs a shell after connection is established.
def inspector_shell(host, port, timeout, passphrase): sock = Socket(timeout=timeout, passphrase=passphrase) try: sock.connect((host, port)) # get the file name that runs the server importer_file = sock.message('code', '__importer_file__')['data'] importer_file = importer_file.strip().strip("'") # display some information about the connection print("<Inspector @ %s:%d (%s)>" % (host, port, importer_file)) # enable shell history and tab completion if readline is available if readline: shell_history() tab_completion(sock) while True: # get input from the user code = code_input() if code.strip() == 'exit': break # send the input and receive the output output = sock.message('code', code) # print if the input has executed if output['data']: sys.stdout.write(str(output['data'])) except (EOFError, KeyboardInterrupt): print('') except (socket.error, socket.timeout) as error: print(error) finally: sock.close()
[ "def __init__(self, mojo_shell_path, shell_args=None):\n self._tempdir = mkdtemp(prefix='background_shell_')\n self._socket_path = os.path.join(self._tempdir, 'socket')\n self._output_file = TemporaryFile()\n\n shell_command = [mojo_shell_path,\n '--enable-external-applications=' + self._socket_path]\n if shell_args:\n shell_command += shell_args\n logging.getLogger().debug(shell_command)\n\n self._shell = subprocess.Popen(shell_command, stdout=self._output_file,\n stderr=subprocess.STDOUT)\n _poll_for_condition(lambda: os.path.exists(self._socket_path),\n desc=\"External app socket creation.\")", "def openShell(self, transport):\n protocol = EchoProtocol()\n # Connect the new protocol to the transport and the transport\n # to the new protocol so they can communicate in both directions.\n protocol.makeConnection(transport)\n transport.makeConnection(session.wrapProtocol(protocol))", "def open(self,host='',port=1314,nostart=False):\n\t\n from subprocess import STDOUT, Popen\n\t\n\t sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t try:\n\t \tsock.connect((host,port))\n\t except socket.error:\n\t \tif nostart:\n\t \t\traise socket.error\n\t \telse:\n self.festival_pid = Popen([\"festival\", \"--server\"]).pid \n\t\t \tatexit.register(self._kill_server)\n\t\t \tfor t in xrange(20):\n\t\t \t\ttry:\n\t\t \t\t\ttime.sleep(.25)\n\t\t \t\t\tsock.connect((host,port))\n\t\t \t\texcept socket.error:\n\t\t \t\t\tpass\n\t\t \t\telse:\n\t\t \t\t\tbreak\n\t\t \telse:\n\t\t \t\traise socket.error\n\t\t\n\t self.sock = sock\n return sock", "def importer_server():\n # this behaves strangely for me, so I'm checking the whole stack to make it work for everybody\n importer_globals = None\n for frame in inspect.stack():\n if frame[0].f_globals['__name__'] != __name__:\n importer_globals = frame[0].f_globals\n break\n if not importer_globals:\n print('From where are you importing?')\n return\n # save file variable for inspector's shell to display\n importer_file = importer_globals.get('__file__', 'Python shell')\n importer_globals['__importer_file__'] = importer_file\n # server variables\n host = importer_globals.get('INSPECTOR_HOST', HOST)\n port = importer_globals.get('INSPECTOR_PORT', PORT)\n timeout = importer_globals.get('INSPECTOR_TIMEOUT', TIMEOUT_SERVER)\n passphrase = importer_globals.get('INSPECTOR_PASSPHRASE', PASSPHRASE)\n # does it want to be a shell instead?\n if importer_globals.get('INSPECTOR_SHELL'):\n timeout = importer_globals.get('INSPECTOR_TIMEOUT', TIMEOUT_CLIENT)\n inspector_shell(host, port, timeout, passphrase)\n return\n elif importer_globals.get('INSPECTOR_DISABLE'):\n return\n # server initialization\n server = ImporterServer((host, port), importer_globals)\n # server start-up\n server.start(timeout=timeout, passphrase=passphrase)\n server.run()\n # assure server shutdown at exit\n atexit.register(server.shutdown)", "def run_netconsole(self, port=1337, bind=\"0.0.0.0\"):\n old_stdin = sys.stdin\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n\n serversocket = socket.socket()\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind((bind, port))\n serversocket.listen(1)\n clientsocket, address = serversocket.accept() # client socket\n self.logger.info(\"Interactive python connection from %s/%s\" % (address[0], address[1]))\n\n class sw: # socket wrapper\n\n def __init__(self, s):\n self.s = s\n\n def read(self, length):\n return self.s.recv(length)\n\n def write(self, st):\n return self.s.send(st)\n\n def readline(self):\n return self.read(256)\n sw = sw(clientsocket)\n sys.stdin = sw\n sys.stdout = sw\n sys.stderr = sw\n mc = self\n terp = code.InteractiveConsole(locals())\n try:\n terp.interact(\n \"Fuglu Python Shell - MainController available as 'mc'\")\n except:\n pass\n self.logger.info(\n \"done talking to %s - closing interactive shell on %s/%s\" % (address[0], bind, port))\n sys.stdin = old_stdin\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n try:\n clientsocket.close()\n except Exception as e:\n self.logger.warning(\n \"Failed to close shell client socket: %s\" % str(e))\n try:\n serversocket.close()\n except Exception as e:\n self.logger.warning(\n \"Failed to close shell server socket: %s\" % str(e))", "def openSocket():\n host, port, pw, nick, channel = getSettings()\n s = socket.socket()\n s.connect((host, port))\n sysMessage(s, \"PASS \" + pw)\n sysMessage(s, \"NICK \" + nick)\n sysMessage(s, \"JOIN #\" + channel)\n return s", "def open_connection(self):\n logging.debug(\"Creating socket connection to host: {0}, port: {1}\".format(\n self.hostname, self.port))\n try:\n self._sock = socket.create_connection((self.hostname, self.port),10)\n except socket.error:\n logging.exception(\"Unable to connect to Munin host {0}, port: {1}\".format(\n self.hostname, self.port))\n sys.exit(1)\n\n self._conn = self._sock.makefile()\n self.hello_string = self._readline()", "def create(self):\r\n\t\tif self.type == 'i': # Internet socket\r\n\t\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind((self.host, self.port))\r\n\t\t\tutil.logMessage('Bound to TCP socket on port %d ' % self.port)\r\n\t\telse:\r\n\t\t\tif os.path.exists(self.file):\r\n\t\t\t\t# if socket already exists, remove it. This prevents errors when the socket is corrupt after a crash.\r\n\t\t\t\tos.remove(self.file)\r\n\t\t\tself.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind(self.file) # Bind BEERSOCKET\r\n\t\t\t# set all permissions for socket\r\n\t\t\tos.chmod(self.file, 0777)", "def load_and_run_shell() -> None:\n sys.path.append(os.getcwd())\n\n parser = argparse.ArgumentParser(\n description=\"Open a shell for a Thrift service with app configuration loaded.\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--debug\", action=\"store_true\", default=False, help=\"enable extra-verbose debug logging\"\n )\n parser.add_argument(\n \"--app-name\",\n default=\"main\",\n metavar=\"NAME\",\n help=\"name of app to load from config_file (default: main)\",\n )\n parser.add_argument(\n \"config_file\", type=argparse.FileType(\"r\"), help=\"path to a configuration file\"\n )\n\n args = parser.parse_args(sys.argv[1:])\n with args.config_file:\n config = read_config(args.config_file, server_name=None, app_name=args.app_name)\n logging.basicConfig(level=logging.INFO)\n\n env: Dict[str, Any] = {}\n env_banner = {\n \"app\": \"This project's app instance\",\n \"context\": \"The context for this shell instance's span\",\n }\n\n app = make_app(config.app)\n env[\"app\"] = app\n\n baseplate: Baseplate = app.baseplate # type: ignore\n context = baseplate.make_context_object()\n span = baseplate.make_server_span(context, \"shell\")\n env[\"context\"] = span.context\n\n if config.shell and \"setup\" in config.shell:\n setup = _load_factory(config.shell[\"setup\"])\n setup(env, env_banner)\n\n configure_logging(config, args.debug)\n\n # generate banner text\n banner = \"Available Objects:\\n\"\n for var in sorted(env_banner.keys()):\n banner += f\"\\n {var:<12} {env_banner[var]}\"\n\n console_logpath = _get_shell_log_path()\n\n try:\n # try to use IPython if possible\n from IPython import start_ipython\n\n try:\n # IPython 5.x+\n from traitlets.config.loader import Config\n except ImportError:\n # IPython 4 and below\n from IPython import Config\n\n ipython_config = Config()\n ipython_config.InteractiveShellApp.exec_lines = [\n # monkeypatch IPython's log-write() to enable formatted input logging, copying original code:\n # https://github.com/ipython/ipython/blob/a54bf00feb5182fa821bd5457897b3b30a313436/IPython/core/logger.py#L187-L201\n f\"\"\"\n ip = get_ipython()\n from functools import partial\n def log_write(self, data, kind=\"input\", message_id=\"IEXC\"):\n import datetime, os\n if self.log_active and data:\n write = self.logfile.write\n if kind=='input':\n # Generate an RFC 5424 compliant syslog format\n write(f'<13>1 {{datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")}} {{os.uname().nodename}} baseplate-shell {{os.getpid()}} {{message_id}} - {{data}}')\n elif kind=='output' and self.log_output:\n odata = u'\\\\n'.join([u'#[Out]# %s' % s\n for s in data.splitlines()])\n write(u'%s\\\\n' % odata)\n self.logfile.flush()\n ip.logger.logstop = None\n ip.logger.log_write = partial(log_write, ip.logger)\n ip.magic('logstart {console_logpath} append')\n ip.logger.log_write(data=\"Start IPython logging\\\\n\", message_id=\"ISTR\")\n \"\"\"\n ]\n ipython_config.TerminalInteractiveShell.banner2 = banner\n ipython_config.LoggingMagics.quiet = True\n start_ipython(argv=[], user_ns=env, config=ipython_config)\n raise SystemExit\n except ImportError:\n pass\n\n newbanner = f\"Baseplate Interactive Shell\\nPython {sys.version}\\n\\n\"\n banner = newbanner + banner\n\n try:\n import readline\n\n readline.set_completer(Completer(env).complete)\n readline.parse_and_bind(\"tab: complete\")\n\n except ImportError:\n pass\n\n shell = LoggedInteractiveConsole(_locals=env, logpath=console_logpath)\n shell.interact(banner)", "def initDomainSocketConnection(self, aShellExecPath, aDomainSocket):\n self.myShellExecPath=aShellExecPath\n self.myDomainSocket=aDomainSocket", "def launch_shell(args, mount,\n interactive_variables,\n print_banner, print_help,\n logger):\n\n discovered_devices = []\n shutdown_token = [False]\n globals().update(interactive_variables)\n\n fibre.libfibre.get_user_name = lambda obj: get_user_name(interactive_variables, obj)\n\n # Connect to device\n with fibre.Domain(args.path) as domain:\n on_discovery = lambda dev: discovered_device(dev, interactive_variables, discovered_devices, mount, shutdown_token, logger)\n discovery = domain.run_discovery(on_discovery)\n\n # Check if IPython is installed\n if args.no_ipython:\n use_ipython = False\n else:\n try:\n import IPython\n use_ipython = True\n except:\n print(\"Warning: you don't have IPython installed.\")\n print(\"If you want to have an improved interactive console with pretty colors,\")\n print(\"you should install IPython\\n\")\n use_ipython = False\n\n interactive_variables[\"help\"] = lambda: print_help(args, len(discovered_devices) > 0)\n\n # If IPython is installed, embed IPython shell, otherwise embed regular shell\n if use_ipython:\n # Override help function # pylint: disable=W0612\n help = lambda: print_help(args, len(discovered_devices) > 0) \n # to fix broken \"%run -i script.py\"\n locals()['__name__'] = globals()['__name__'] \n console = IPython.terminal.embed.InteractiveShellEmbed(banner1='')\n\n # hack to make IPython look like the regular console\n console.runcode = console.run_cell \n interact = console\n\n # Catch ObjectLostError (since disconnect is not always an error)\n default_exception_hook = console._showtraceback\n def filtered_exception_hook(ex_class, ex, trace):\n if(ex_class.__module__+'.'+ex_class.__name__ != 'fibre.libfibre.ObjectLostError'):\n default_exception_hook(ex_class,ex,trace)\n \n console._showtraceback = filtered_exception_hook\n else:\n # Enable tab complete if possible\n try:\n import readline # Works only on Unix\n readline.parse_and_bind(\"tab: complete\")\n except:\n sudo_prefix = \"\" if platform.system() == \"Windows\" else \"sudo \"\n print(\"Warning: could not enable tab-complete. User experience will suffer.\\n\"\n \"Run `{}pip install readline` and then restart this script to fix this.\"\n .format(sudo_prefix))\n\n import code\n console = code.InteractiveConsole(locals=interactive_variables)\n interact = lambda: console.interact(banner='')\n\n # Catch ObjectLostError (since disconnect is not alway an error)\n console.runcode(\"import sys\")\n console.runcode(\"default_exception_hook = sys.excepthook\")\n console.runcode(\"def filtered_exception_hook(ex_class, ex, trace):\\n\"\n \" if ex_class.__module__ + '.' + ex_class.__name__ != 'fibre.libfibre.ObjectLostError':\\n\"\n \" default_exception_hook(ex_class,ex,trace)\")\n console.runcode(\"sys.excepthook=filtered_exception_hook\")\n\n # Launch shell\n print_banner()\n logger._skip_bottom_line = True\n interact()\n\n shutdown_token[0] = True\n discovery.stop()", "def app_shell():\n\n require('hosts')\n\n run(\"invoke shell\")", "def start(self, *args, **kwargs):\n self.socket = Socket(*args, **kwargs)\n self.socket.bind(self.address)\n self.socket.listen(1)", "def open(self, url_component=None):\n if not self.origin_check():\n raise tornado.web.HTTPError(404, \"Websocket origin mismatch\")\n\n self._log.info(\"TermSocket.open: %s\", url_component)\n\n url_component = _cast_unicode(url_component)\n self.term_name = url_component or 'tty'\n self.terminal = self.term_manager.get_terminal(url_component)\n for s in self.terminal.read_buffer:\n self.on_pty_read(s)\n self.terminal.clients.append(self)\n\n self.send_json_message([\"setup\", {}])\n self._log.info(\"TermSocket.open: Opened %s\", self.term_name)", "def open_console(shell_command):\n #print shell_command\n\n # Unique filename\n tmpfile = \"/tmp/tmpscript_34f0195c-1759-11e4-a995-6466b326b337.sh\"\n\n tmpscript =\\\n \"\"\"\n #! /bin/bash\n $@\n sleep 1\n /bin/bash\n \"\"\"\n open(tmpfile,\"w\").write(tmpscript)\n os.chmod(tmpfile, 0777)\n\n cmd= [\"lxterminal\", \"-e\", tmpfile, shell_command]\n #cmd= [\"gnome-terminal\", \"-x\", tmpfile, shell_command]\n\n #print \" \".join(cmd)\n\n Popen(cmd)", "def openSocket(self, device=None):\n # Hard-coded socket port needs to match the one in DefaultVehicleHal\n remotePortNumber = 33452\n extraArgs = '' if device is None else '-s %s' % device\n adbCmd = '/home/himinds/Android/android-pie-compile/out/host/linux-x86/bin/adb %s forward tcp:0 tcp:%d' % (\n extraArgs, remotePortNumber)\n adbResp = subprocess.check_output(adbCmd, shell=True)[0:-1]\n localPortNumber = int(adbResp)\n #localPortNumber = 44567\n #adbCmd1 = 'qemu-system-x86_64 -m 2048 -boot d -enable-kvm -smp 3 -net nic -net user,hostfwd=tcp::4444-:5555,hostfwd=tcp::%d-:33452 -hda /home/himinds/Android/android-image-x86_64/android-oreo.img -cdrom /home/himinds/Android/android-x86-oreo/out/target/product/x86_64android_x86_64.iso' % (extraArgs, localPortNumber)\n #adbResp1 = subprocess.check_output(adbCmd1, shell=True)[0:-1]\n print('Connecting local port %s to remote port %s on %s' %\n (localPortNumber, remotePortNumber,\n 'default device' if device is None else 'device %s' % device))\n # Open the socket and connect\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(('localhost', localPortNumber))\n print(\"hello\")", "def open(self):\n self.socket.connect(self.addr)\n logger.info(\"%s socket connected to %s\", self.name, self.addr)", "def _open_tunnel(tunnel_cmd) -> None:\n\n # The following doesnt appear to work on ubuntu\n if not 'ubuntu' in platform.platform().lower():\n FNULL = open(os.devnull, 'w')\n s = subprocess.call('lsof -ti:3306',shell=True,stdout=FNULL,stderr=subprocess.STDOUT)\n # Only if the tunnel isnt already open, attempt to open a new one.\n if s == 1:\n os.system(tunnel_cmd)", "def socket_open(self, socket_host, socket_port, socket_name):\n msg = \"socket_open(\\\"{}\\\",{},\\\"{}\\\")\".format(socket_host,\n socket_port,\n socket_name)\n self._add_line_to_program(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This runs on the inspector's (shell) side. The compiler is used to perform multiline code input.
def code_input(): code = '' compiled = None while not compiled: prompt = PROMPT_INIT if not code else PROMPT_MORE code += input(prompt) # add a line to the code string try: # returns None if the code is valid but not finished compiled = compile(code, '<inspector-shell>', 'single') except (SyntaxError, OverflowError, ValueError): traceback.print_exc(0) # only first entry in the stack code = '' else: code += '\n' return code
[ "def run_compiled(filename):\n bytecode = open(filename, 'rb').read()\n codeobject = Deserializer().deserialize_bytecode(bytecode)\n vm = BobVM(output_stream=sys.stdout)\n vm.run(codeobject)", "def test_cli_compiles_source_file(monkeypatch):\n params = [\"overreact\", \"--compile\", \"data/ethane/B97-3c/model.k\"]\n monkeypatch.setattr(\"sys.argv\", params)\n cli.main()", "def _pre_compile(self, content=None):\r\n pass", "def run(code, input_data = None, do_print = True):\n\t# Set up the scope\n\tglobs = vivarium.scope.global_scope()\n\toutput = vivarium.pipes.Output(scope = globs)\n\tif input_data is not None:\n\t\tvivarium.pipes.Input(input_data, globs)\n\tprogram_scpe = vivarium.scope.Scope(globs)\n\tglobs.lockdown()\n\t# Compile and run\n\tbytecode = vivarium.transform.transform(code)\n\tbytecode.evaluate(program_scpe)\n\t# Return output\n\treturn output.get_data()", "def main(pyfile, overrides={}, initial_options=copy.copy(default_options), \n global_allowed_types=allowed_types):\n # Beware of passing by reference. We need to make copies of options as\n # Much as possible to avoid histerisis effects:\n options = copy.copy(initial_options)\n allowed_types = global_allowed_types.copy()\n\n # Options used to start the parsing:\n parsing_options = copy.copy(options)\n parsing_options._update_loose(overrides)\n # Slice the input file into code blocks\n block_list = code_hasher.iterblocks(pyfile)\n # FIXME: Need to deal with the script's options\n script_options = {}\n\n # Override the options given by the script by the command line switch\n script_options.update(overrides)\n # And now merge this to the default options (! this a not a dict)\n options._update_loose(script_options)\n options = guess_names_and_types(options, allowed_types=allowed_types)\n\n # Process the blocks\n output_list = execute_block_list(block_list, options)\n DEBUGwrite( output_list, 'output_list')\n\n open_outfile(options)\n \n output_list = shape_output_list(output_list, options)\n \n global compilers\n compiler = compilers.get(options.outtype, TexCompiler)(options)\n compiler.compile( output_list, options.outfile, options)", "def main():\r\n #We check if the file ends in .vm else its an error\r\n if len(sys.argv) != 2 or sys.argv[1][-3:] != \".vm\":\r\n badusage()\r\n #We get the filename\r\n filename = str(sys.argv[1])\r\n #Create the parser\r\n parser = Parser(filename)\r\n #We create the codewriter without the .vm extention\r\n codewriter = Codewriter(filename[0:-3])\r\n #Start reading checking if it has more commands advancing and seeing the command\r\n while parser.hasMoreCommands():\r\n parser.advance()\r\n #If it is a arithmetic command we write it or a push or pop command\r\n if parser.command_type() == \"C_ARITHMETIC\":\r\n command = parser.arg1()\r\n codewriter.writeArithmetic(command)\r\n elif parser.command_type() == \"C_PUSH\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_PUSH\", m_segment, index)\r\n elif parser.command_type() == \"C_POP\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_POP\", m_segment, index)\r\n #Then we close the codewriter and parser\r\n del codewriter\r\n del parser", "def code(self):\n txt = self.stxt[self.section]\n stxt = txt.split('\\n')\n in_code = False\n block = []\n for l in stxt:\n if l == '```python':\n in_code = True\n elif l == '```':\n if in_code:\n in_code = False\n else:\n raise ValueError('end block with no start block')\n elif in_code:\n block.append(l)\n if len(block):\n code = '\\n'.join(block)\n self._ip.set_next_input(code) # , replace=True)", "def code_output(self, code):\n try:\n compiled = compile(code, '<inspector-server>', 'single')\n except (SyntaxError, OverflowError, ValueError):\n return traceback.format_exc(0) # only first entry in the stack\n # execute the compiled message and capture the output\n with self.output() as output:\n try:\n exec(compiled, self.namespace, self.namespace)\n except:\n return traceback.format_exc()\n return output.getvalue()", "def main(pyfile, overrides={}, initial_options=copy.copy(default_options), \r\n global_allowed_types=allowed_types):\r\n # Beware of passing by reference. We need to make copies of options as\r\n # Much as possible to avoid histerisis effects:\r\n options = copy.copy(initial_options)\r\n allowed_types = global_allowed_types.copy()\r\n\r\n # Options used to start the parsing:\r\n parsing_options = copy.copy(options)\r\n parsing_options._update_loose(overrides)\r\n # Slice the input file into code blocks\r\n block_list = code_hasher.iterblocks(pyfile)\r\n # FIXME: Need to deal with the script's options\r\n script_options = {}\r\n\r\n # Override the options given by the script by the command line switch\r\n script_options.update(overrides)\r\n # And now merge this to the default options (! this a not a dict)\r\n options._update_loose(script_options)\r\n options = guess_names_and_types(options, allowed_types=allowed_types)\r\n\r\n # Process the blocks\r\n output_list = execute_block_list(block_list, options)\r\n DEBUGwrite( output_list, 'output_list')\r\n\r\n open_outfile(options)\r\n \r\n output_list = shape_output_list(output_list, options)\r\n \r\n global compilers\r\n compiler = compilers.get(options.outtype, TexCompiler)(options)\r\n compiler.compile( output_list, options.outfile, options)", "def compile_script(bogscript):\n\n ## Step 1: Convert source script into tokens\n tokens = lexer(bogscript)\n #print tokens\n\n ## Step 2: Convert tokens into Python Source\n pycode = pygen(tokens)\n #print '\\n' + pycode + '\\n'\n\n ## Step 3: Compile Python Source into bytecode\n bytecode = bytegen(pycode)\n\n return bytecode", "def compile(source, filename='<string>', encoding=None, entity_map=None):\n # XXX all kinds of work to do here catching syntax errors and\n # adjusting line numbers...\n py = kid.codewriter.parse(source, encoding, filename, entity_map)\n return py_compile(py, filename)", "def test_code(self):\r\n code = Element('code')\r\n text = Text()\r\n text.data = u\"print this: twisted.lore.latex\"\r\n code.appendChild(text)\r\n\r\n self.spitter.visitNode(code)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\texttt{print this: twisted.\\\\linebreak[1]lore.\\\\\"\r\n \"linebreak[1]latex}\")", "def program(self, occam_objs, native_objs, source, output):\n\t\tpass", "def run(self):\n answer = 'spam'\n if self.editor.changed():\n answer = tkMessageBox._show(\"Save File\",\"Save the current file?\",icon=tkMessageBox.QUESTION,type=tkMessageBox.YESNOCANCEL)\n if answer == 'yes':\n self.save()\n elif answer == 'cancel':\n return\n code = self.editor.get()\n self.root.withdraw()\n try:\n exec code in vanilla\n except:\n print \"There has been an error during execution\"\n time.sleep(5)\n self.root.deiconify() #!!! on PPC you can close the console and idleCE remains running so\n raise #!!! so we need to deiconify on error or you will have an invisable program...\n time.sleep(5)\n self.root.deiconify()", "def codeblock_cmd(self, task, step):\n\n name = self.get_command_params(step.command, \"codeblock\")[0]\n name = self.replace_variables(name)\n self.process_codeblock(task, name.upper())", "def __processCodeBlock(self, parentElem, lines, inList):\r\n detabbed, theRest = self.detectTabbed(lines)\r\n pre = etree.SubElement(parentElem, \"pre\")\r\n code = etree.SubElement(pre, \"code\")\r\n text = \"\\n\".join(detabbed).rstrip()+\"\\n\"\r\n code.text = markdown.AtomicString(text)\r\n self.parseChunk(parentElem, theRest, inList)", "def __startMiniEditor(self):\n self.__startProc(\"eric6_editor.py\")", "def exec_(_code_, _globs_=..., _locs_=...):\n ...", "async def _code(self, ctx):\n await ctx.send(\"The source code for WatchDog can be found here: https://github.com/Fido2603/WatchDog\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads shell history from a file, registers writing at exit
def shell_history(): history_file = os.path.expanduser(SHELL_HISTORY_FILE) try: readline.read_history_file(history_file) except IOError: pass atexit.register(readline.write_history_file, history_file)
[ "def readHistory(self):\n\n hist = self.history[self.hposition]\n thist = hist[3]\n if thist is not None:\n hist = thist\n else:\n hist = hist[:3]\n self.string, self.position, self.view = hist\n self.hdirty = False", "def read_history(fn):\n with open(fn, 'rb') as f:\n for row in itertools.islice(csv.reader(f), 1, None):\n yield IshHistory._make(row)", "def loadHistory(histInputfile):\n f = open(histInputfile, 'rb')\n history = pickle.load(f)\n f.close() \n return history", "def dl_history():\n with open(list_file, 'a+t') as f: \n # Nothing to add to our list\n if file_name is None:\n # error for no data\n sys.stderr.write(\"\\n[!] There was no history \" \\\n \"occurance to write to history file.\")\n else:\n print(\"\\n[+] Writing a history occurance to \" \\\n \"{0}.\".format(list_file))\n f.write(\"\\n\" + strftime(\"@ %H:%M:%S on %A %d %B %Y\", \\\n gmtime()) + \"\\n\" + prfx + file_name + \"\\n\")\n f.close()\n print(\"\\n[+] Done!\")", "def test_history(self):\n try:\n import @READLINE@\n except ImportError:\n return\n result=run_debugger(testname='history',\n pythonfile='%shanoi.py' % srcdir)\n self.assertEqual(True, result, \"history\")\n if os.path.exists(\"history.hst\"): os.unlink(\"history.hst\")", "def __load(self):\n historyFile = QFile(self.getFileName())\n if not historyFile.exists():\n return\n if not historyFile.open(QIODevice.ReadOnly):\n E5MessageBox.warning(\n None,\n self.tr(\"Loading History\"),\n self.tr(\n \"\"\"<p>Unable to open history file <b>{0}</b>.<br/>\"\"\"\n \"\"\"Reason: {1}</p>\"\"\")\n .format(historyFile.fileName, historyFile.errorString()))\n return\n \n history = []\n \n # double check, that the history file is sorted as it is read\n needToSort = False\n lastInsertedItem = HistoryEntry()\n data = QByteArray(historyFile.readAll())\n stream = QDataStream(data, QIODevice.ReadOnly)\n stream.setVersion(QDataStream.Qt_4_6)\n while not stream.atEnd():\n ver = stream.readUInt32()\n if ver not in HISTORY_VERSIONS:\n continue\n itm = HistoryEntry()\n itm.url = Utilities.readStringFromStream(stream)\n stream >> itm.dateTime\n itm.title = Utilities.readStringFromStream(stream)\n if ver == HISTORY_VERSION_60:\n itm.visitCount = stream.readUInt32()\n \n if not itm.dateTime.isValid():\n continue\n \n if itm == lastInsertedItem:\n if not lastInsertedItem.title and len(history) > 0:\n history[0].title = itm.title\n continue\n \n if ver == HISTORY_VERSION_42:\n firstEntry = self.__findFirstHistoryEntry(itm.url)\n if firstEntry.isValid():\n visitCount = firstEntry.visitCount + 1\n self.__updateVisitCount(itm.url, visitCount)\n else:\n visitCount = 1\n itm.visitCount = visitCount\n \n if not needToSort and history and lastInsertedItem < itm:\n needToSort = True\n \n history.insert(0, itm)\n lastInsertedItem = itm\n historyFile.close()\n \n if needToSort:\n history.sort()\n \n self.setHistory(history, True)\n \n # if the history had to be sorted, rewrite the history sorted\n if needToSort:\n self.__lastSavedUrl = \"\"\n self.__saveTimer.changeOccurred()", "def save_history(cube, field, filename): \n\n try:\n history.append(cube.attributes['history'])\n except KeyError:\n pass", "def writeHistory(self):\n\n if self.hdirty:\n hist = list(self.history[self.hposition])\n if hist[3] is None:\n self.tmphistory.append(self.hposition)\n hist[3] = (self.string, self.position, self.view)\n self.history[self.hposition] = tuple(hist)\n self.hdirty = False", "def test_single_file() -> None:\n\n def zsh_small_test():\n yield Path(history_file)\n\n items = list(history(from_paths=zsh_small_test))\n assert len(items) == 11\n\n from datetime import datetime, timezone\n\n # from the test history file, fine to do\n e = Entry(\n dt=datetime(\n year=2020,\n month=7,\n day=14,\n hour=2,\n minute=21,\n second=37,\n tzinfo=timezone.utc,\n ),\n duration=0,\n command=\"ls\",\n )\n assert e in items", "def copy_chrome_history(path_to_file, current_dir):\n\thistory_file = current_dir+'/'+TEXT_FILE\n\toriginal_file_path = os.path.expanduser(path_to_file)\n\n\tif os.path.exists(original_file_path):\n\t\ttry:\n\t\t\thistory_db = original_file_path+'/History'\n\t\t\thistory_dest = current_dir + '/History'\n\t\t\tshutil.copyfile(history_db, history_dest)\n\t\t\tlogging.info('History sqlite file copied to: '+original_file_path)\n\t\t\tconnection = sqlite3.connect('History')\n\t\t\tlogging.info('Database connected to successfully')\n\t\t\tcursor = connection.execute(\"SELECT url FROM urls ORDER BY last_visit_time DESC\")\n\t\t\twith open(history_file, 'w') as f:\n\t\t\t\tfor row in cursor:\n\t\t\t\t\tf.write(row[0]+'\\n')\n\t\t\tlogging.info('History database has been copied to: '+history_file)\n\t\t\tconnection.close()\n\t\t\tlogging.info('sqlite3 connection closed.')\n\t\texcept sqlite3.Error as e:\n\t\t\tlogging.error('Sqlite3 error: '+str(e))\n\telse:\n\t\tlogging.error('ERROR: Directory for the History file does not exist.')\n\t\traise SystemExit(1)", "def open_job_history (history_file):\n if not history_file:\n history_file = get_default_history_file_name ()\n\n return dbdict.open(history_file, picklevalues=True)", "def get_all_history(path=\"./extracted_history.txt\"):\n with open(path, 'w') as handle:\n soup = uf.get_soup()\n cnt = 0\n print(\"Starting extracting history\")\n handle.write('name1\\tname2\\thistory\\tresult\\n')\n for i in soup.findAll(attrs={'class': 'norm'}):\n cnt += 1\n print(cnt)\n form = get_history('http://www.championat.com' + i['href'])\n if form is not None:\n handle.write('\\t'.join(str(e) for e in form) + '\\n')\n if cnt % 5 == 0:\n handle.flush()\n print(\"History extracting finished\")", "def iter_history():\n logname = '/var/log/dpkg.log'\n if not os.path.exists(logname):\n raise FileNotFoundError('File does not exist: {}'.format(logname))\n try:\n with open(logname, 'r') as f:\n # Going to read these backwards, latest first.\n for line in reversed(f.readlines()):\n historyline = HistoryLine.from_dpkg_line(line)\n if historyline is not None:\n yield historyline\n except EnvironmentError as exenv:\n errfmt = 'Failed to read history: {}\\n{}'\n raise EnvironmentError(errfmt.format(logname, exenv))", "def get_history():\n\n f = open('%s/%s/%s' % (sublime.packages_path(),\n \"TextTransmute\",\n \"History.sublime-project\"), 'r')\n content = f.readlines()\n f.close()\n return [x.strip() for x in content]", "def history(self):\r\n\t\tpass", "def add_path_history(self, dir_path: str):\n history: List[str] = self.get(\"io.history\", [])\n try:\n history.remove(dir_path)\n except ValueError:\n history = history[:9]\n\n self.set(\"io.history\", [dir_path] + history[-9:])", "def save(self):\n historyFile = QFile(self.getFileName())\n if not historyFile.exists():\n self.__lastSavedUrl = \"\"\n \n saveAll = self.__lastSavedUrl == \"\"\n first = len(self.__history) - 1\n if not saveAll:\n # find the first one to save\n for index in range(len(self.__history)):\n if self.__history[index].url == self.__lastSavedUrl:\n first = index - 1\n break\n if first == len(self.__history) - 1:\n saveAll = True\n \n if saveAll:\n # use a temporary file when saving everything\n f = QTemporaryFile()\n f.setAutoRemove(False)\n opened = f.open()\n else:\n f = historyFile\n opened = f.open(QIODevice.Append)\n \n if not opened:\n E5MessageBox.warning(\n None,\n self.tr(\"Saving History\"),\n self.tr(\n \"\"\"<p>Unable to open history file <b>{0}</b>.<br/>\"\"\"\n \"\"\"Reason: {1}</p>\"\"\")\n .format(f.fileName(), f.errorString()))\n return\n \n for index in range(first, -1, -1):\n data = QByteArray()\n stream = QDataStream(data, QIODevice.WriteOnly)\n stream.setVersion(QDataStream.Qt_4_6)\n itm = self.__history[index]\n stream.writeUInt32(HISTORY_VERSION_60)\n stream.writeString(itm.url.encode(\"utf-8\"))\n stream << itm.dateTime\n stream.writeString(itm.title.encode('utf-8'))\n stream.writeUInt32(itm.visitCount)\n f.write(data)\n \n f.close()\n if saveAll:\n if historyFile.exists() and not historyFile.remove():\n E5MessageBox.warning(\n None,\n self.tr(\"Saving History\"),\n self.tr(\n \"\"\"<p>Error removing old history file <b>{0}</b>.\"\"\"\n \"\"\"<br/>Reason: {1}</p>\"\"\")\n .format(historyFile.fileName(),\n historyFile.errorString()))\n if not f.copy(historyFile.fileName()):\n E5MessageBox.warning(\n None,\n self.tr(\"Saving History\"),\n self.tr(\n \"\"\"<p>Error moving new history file over old one \"\"\"\n \"\"\"(<b>{0}</b>).<br/>Reason: {1}</p>\"\"\")\n .format(historyFile.fileName(), f.errorString()))\n f.remove() # get rid of the temporary file\n self.historySaved.emit()\n try:\n self.__lastSavedUrl = self.__history[0].url\n except IndexError:\n self.__lastSavedUrl = \"\"", "def hdu_history_write(hdu, message):\n hdu.header['HISTORY'] = message", "def __init__(self, pyshell):\n if 'SPORKPATH' not in os.environ.keys():\n self.ph = Prehistory(os.path.expanduser('~'))\n else:\n self.ph = Prehistory(os.environ['SPORKPATH'])\n self.pyshell = pyshell\n self.text = text = pyshell.text\n self.history = self.ph.get()\n self.super_history = list(enumerate(self.history))\n self.smart_history = []\n self.prefix = None\n self.pointer = None\n self.suggested = []\n self.cyclic = idleConf.GetOption(\"main\", \"History\", \"cyclic\", 1, \"bool\")\n text.bind(\"<<history-previous>>\", self.history_prev)\n text.bind(\"<<history-next>>\", self.history_next)\n text.bind(\"<<history-guess>>\", self.history_guess)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a server on the importer's side.
def importer_server(): # this behaves strangely for me, so I'm checking the whole stack to make it work for everybody importer_globals = None for frame in inspect.stack(): if frame[0].f_globals['__name__'] != __name__: importer_globals = frame[0].f_globals break if not importer_globals: print('From where are you importing?') return # save file variable for inspector's shell to display importer_file = importer_globals.get('__file__', 'Python shell') importer_globals['__importer_file__'] = importer_file # server variables host = importer_globals.get('INSPECTOR_HOST', HOST) port = importer_globals.get('INSPECTOR_PORT', PORT) timeout = importer_globals.get('INSPECTOR_TIMEOUT', TIMEOUT_SERVER) passphrase = importer_globals.get('INSPECTOR_PASSPHRASE', PASSPHRASE) # does it want to be a shell instead? if importer_globals.get('INSPECTOR_SHELL'): timeout = importer_globals.get('INSPECTOR_TIMEOUT', TIMEOUT_CLIENT) inspector_shell(host, port, timeout, passphrase) return elif importer_globals.get('INSPECTOR_DISABLE'): return # server initialization server = ImporterServer((host, port), importer_globals) # server start-up server.start(timeout=timeout, passphrase=passphrase) server.run() # assure server shutdown at exit atexit.register(server.shutdown)
[ "def main() -> None:\n\n start_server()", "def runserver():\n from web.server import runserver\n runserver()", "def run_server(server):\n\n server.start()\n server.wait()", "def runserver():\n load_app().run()", "def run(self):\n parts = urlparse(HOST_BASE)\n domain, port = parts.netloc.split(\":\")\n self.srv = make_server(domain, int(port), self.app)\n try:\n self.srv.serve_forever()\n except:\n import traceback\n traceback.print_exc()\n # Failed to start\n self.srv = None", "def api_server(db):\n\tProcess(target=run_api_server, args=()).start()\n\ttime.sleep(1)", "def run(self, host='127.0.0.1', port=5000):\n httpd = wsgiref.simple_server.make_server('', port, self)\n log(\"PWF now running on http://%s:%s/\" % (host, port,))\n httpd.serve_forever()", "def runserver(args):\n TestServer().run(args.port)", "def launch_server(self, server):\n gt = eventlet.spawn(self.run_server, server)\n self._services.append(gt)", "def run(page):\n server = Server(page).run()", "def RunServer(self, host='0.0.0.0', port=9992):\n caching = CachingServer(port + 1)\n server = SimpleXMLRPCServer((host, port), allow_none=True,\n requestHandler=ChameleonXMLRPCRequestHandler,\n logRequests=True)\n server.register_introspection_functions()\n # Setting allow_dotted_names=True allows a client to access the object\n # members of self._driver. This is useful to group methods into\n # different objects, e.g., audio, video, bluetooth hid, etc., in addition\n # to some generic methods.\n server.register_instance(self._driver, allow_dotted_names=True)\n\n signal_handler = lambda signum, frame: sys.exit(0)\n signal.signal(signal.SIGTERM, signal_handler)\n\n try:\n # Launch the caching server on the next port, serving cached files.\n logging.info('Start the caching server process.')\n caching.start()\n\n # Launch the XMLRPC server to serve Chameleond APIs.\n logging.info('Listening on %s port %d...', host, port)\n server.serve_forever()\n finally:\n logging.info('Terminate the caching server process.')\n caching.terminate()", "def start_server(port: int):\n run(port)", "def start(self):\n self.action_server.start()", "def __call__(self):\n if self.repo.vreg.config[\"start_sftp_server\"]:\n cube_path = os.path.dirname(os.path.abspath(__file__))\n ftpserver_path = os.path.join(cube_path,\n \"twistedserver/main.py\")\n basedir_opt = \"\"\n sftp_server_basedir = self.repo.vreg.config[\"basedir\"]\n if sftp_server_basedir:\n basedir_opt = \"--base-dir=%s\" % sftp_server_basedir\n subprocess.Popen([sys.executable, ftpserver_path, basedir_opt])", "def run_server(host='localhost'):\n run(server='paste', host=host)", "def runserver():\n #\n # The Flasks's runserver command was overwritten because we are using connexion.\n #\n # Make sure that you do not run the application with multiple processes since we would\n # have multiple scheduler instances. If you would like to do so, just create one scheduler\n # that would serve jobs and per-process scheduler would be in paused mode\n # just for creating/listing jobs.\n app.run(\n port=os.environ.get('JOB_SERVICE_PORT', defaults.DEFAULT_SERVICE_PORT),\n server='flask',\n debug=True,\n use_reloader=True,\n threaded=True,\n json_encoder=SafeJSONEncoder,\n processes=1\n )", "def run_server(\n context: Context = None,\n client: dask.distributed.Client = None,\n host: str = \"0.0.0.0\",\n port: int = 8080,\n startup=False,\n log_level=None,\n): # pragma: no cover\n _init_app(app, context=context, client=client)\n\n if startup:\n app.c.sql(\"SELECT 1 + 1\").compute()\n\n uvicorn.run(app, host=host, port=port, log_level=log_level)", "def runserver():\n typer.echo(\"Starting server...\")\n\n # Webserver config settings\n config = container[Configuration]\n event_loop = container[EventLoopBase]\n hostname = config['app']['hostname'].get()\n port = config['app']['port'].get()\n # Webservice application\n app = fastapi_app\n server_config = Config(app=app, host=hostname, port=port, loop=event_loop.get_loop())\n\n # Initialize the webserver\n uvicorn_server = Server(server_config)\n event_loop.run(uvicorn_server.serve())", "def start(self):\n self.log('Server started...')\n self.httpd.serve_forever()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dict of the top refresh for each source in the queue
def peek(self): return { source_name: self._top_refresh(source_name) for source_name in self.source_to_refresh_queue }
[ "def refresh_queue(self):\n #print(\"REF Q\")\n now_s = time.time()\n state = self.get_state()\n queue = self.queue = self.get_queue()\n for probe in self.get_probes():\n name = probe['name']\n if not name in queue:\n logger.debug(\"Adding entry for %s\", name)\n sched = self.cfg['schedules'][probe['schedule']]\n sched_st = self.mk_sched_entry(\n probe['name'],\n t_next=now_s,\n schedule=sched,\n )\n queue.add(sched_st)\n #print(\"Q: \", self.queue)\n s_queue = OrderedDict()\n for key, val in sorted(queue.items(), \n key=lambda key_val: (key_val[1]['t_next'], key_val[1]['interval'])):\n s_queue[key] = val\n self.queue = s_queue\n #print(\"SQ: \", s_queue)", "def top_forward_activation_producers(self, top: int = 10):\n return sorted(\n self.forward_traces, key=lambda a: a.event.memory_activations, reverse=True\n )[:top]", "def top_forward_activation_producers(self, top: int = 10) -> List[LayerMemoryTrace]:\n return sorted(self.forward_traces, key=lambda a: a.event.memory_activations, reverse=True)[:top]", "def analyse_queue_summary(get_next_queue_info, device_queue_info):\r\n result = {}\r\n if get_next_queue_info and device_queue_info:\r\n result = {\"data_process\": {\"status\": \"normal\"},\r\n \"device_queue_warning\": {\"status\": \"normal\"},\r\n \"data_transmission\": {\"status\": \"normal\"},\r\n \"get_next\": {\"status\": \"normal\"}}\r\n\r\n get_next_queue_empty_count = get_next_queue_info.get(\r\n \"summary\", {}).get(\"queue_summary\", {}).get(\"empty_queue\", 0)\r\n result[\"get_next_queue_info\"] = {\r\n \"summary\": {\r\n \"empty_batch_count\": get_next_queue_empty_count,\r\n \"total_batch\": get_next_queue_info.get(\"size\")\r\n }\r\n }\r\n\r\n device_queue_empty_count = device_queue_info.get(\r\n \"summary\", {}).get(\"queue_summary\", {}).get(\"empty_queue\", 0)\r\n device_queue_full_count = device_queue_info.get(\r\n \"summary\", {}).get(\"queue_summary\", {}).get(\"full_queue\", 0)\r\n\r\n result[\"device_queue_info\"] = {\"summary\": {\r\n \"empty_batch_count\": device_queue_empty_count,\r\n \"full_batch_count\": device_queue_full_count,\r\n \"total_batch\": device_queue_info.get(\"size\")}}\r\n\r\n # Adapt to the case that the first step data in the GPU is always empty\r\n if device_queue_empty_count > device_queue_info.get(\"size\", 0)*\\\r\n MinddataAnalyser.DEVICE_QUEUE_EMPTY_WARNING_THRESHOLD:\r\n result[\"data_process\"][\"status\"] = \"warning\"\r\n elif device_queue_empty_count <= device_queue_info.get(\"size\", 0)*\\\r\n MinddataAnalyser.DEVICE_QUEUE_EMPTY_WARNING_THRESHOLD and get_next_queue_empty_count > 0:\r\n result[\"data_transmission\"][\"status\"] = \"warning\"\r\n else:\r\n result[\"device_queue_warning\"][\"status\"] = \"warning\"\r\n\r\n elif device_queue_info and not get_next_queue_info:\r\n result = {\"data_process\": {\"status\": \"normal\"},\r\n \"device_queue_warning\": {\"status\": \"normal\"},\r\n \"fpbp\": {\"status\": \"normal\"}}\r\n\r\n device_queue_empty_count = device_queue_info.get(\r\n \"summary\", {}).get(\"queue_summary\", {}).get(\"empty_queue\", 0)\r\n device_queue_full_count = device_queue_info.get(\r\n \"summary\", {}).get(\"queue_summary\", {}).get(\"full_queue\", 0)\r\n\r\n result[\"device_queue_info\"] = {\r\n \"summary\": {\r\n \"empty_batch_count\": device_queue_empty_count,\r\n \"full_batch_count\": device_queue_full_count,\r\n \"total_batch\": device_queue_info.get(\"size\")\r\n }\r\n }\r\n\r\n if device_queue_empty_count > device_queue_info.get(\"size\", 0)*\\\r\n MinddataAnalyser.DEVICE_QUEUE_EMPTY_WARNING_THRESHOLD:\r\n result[\"data_process\"][\"status\"] = \"warning\"\r\n else:\r\n result[\"device_queue_warning\"][\"status\"] = \"warning\"\r\n\r\n return result", "def get_oldest_task_json_dict():\n # A list holding the different pending tasks.\n tasks = []\n\n for cls in TELEMETRY_DATA_MODELS:\n cls.add_oldest_pending_task(tasks)\n\n task_dict = {}\n if tasks:\n oldest_task = reduce(lambda x, y: x if x.requested_time < y.requested_time\n else y, tasks)\n task_dict = oldest_task.get_json_repr()\n return task_dict", "def get_stats(self, queue):\r\n return self._manager.get_stats(queue)", "def _queue_stat(self):\n\n api_body = self._request('/api/queues')\n\n if api_body:\n\n _queue_stat_objects = [\n 'auto_delete',\n 'consumer_utilisation',\n 'consumers',\n 'durable',\n 'exclusive_consumer_tag',\n 'idle_since',\n 'memory',\n 'state',\n ]\n\n for entry in json.loads(api_body):\n\n # Queue name\n name = entry['name']\n\n # Virtual host this queue belongs to\n vhost = entry['vhost']\n\n for _object in _queue_stat_objects:\n if _object in entry:\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},{2}]'\n ''.format(vhost, name, _object),\n entry[_object]\n )\n\n # backing_queue_status\n for key in entry['backing_queue_status']:\n if key == 'delta':\n continue\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},backing_queue_status,{2}]'\n ''.format(vhost, name, key),\n entry['backing_queue_status'][key]\n )\n\n # messages\n if 'messages' in entry:\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},messages]'\n ''.format(vhost, name),\n entry['messages']\n )\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},messages,rate]'\n ''.format(vhost, name),\n entry['messages_details']['rate']\n )\n # messages_ready\n if 'messages_ready' in entry:\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},messages_ready]'\n ''.format(vhost, name),\n entry['messages_ready']\n )\n self._enqueue(\n 'rabbitmq.stat.queue[{0},{1},messages_ready,rate]'\n ''.format(vhost, name),\n entry['messages_ready_details']['rate']\n )\n # message_stats\n if 'message_stats' in entry:\n for _object in self._message_stats_objects:\n _object_details = '{0}_details'.format(_object)\n if _object in entry['message_stats']:\n self._enqueue(\n 'rabbitmq.stat.queue[{0},message_stats,{1}]'\n ''.format(vhost, _object),\n entry['message_stats'][_object]\n )\n self._enqueue(\n 'rabbitmq.stat.queuet[{0},message_stats,{1},rate]'\n ''.format(vhost, _object),\n entry['message_stats'][_object_details]['rate']\n\n )", "def get_client_round_trip_times(self) -> dict[str, RoundTripData]:\n # first step: collect all round trip times of subscriptions, group them by notify_to_address\n tmp = defaultdict(list)\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n tmp[subscription.notify_to_address].append(subscription.get_roundtrip_stats())\n for key, stats in tmp.items():\n all_values = [stat.values for stat in stats]\n ret[key] = RoundTripData(all_values, max([s.max for s in stats]))\n return ret", "def get_heartbeats():\n redis = get_redis_connection()\n\n with redis.pipeline() as pipe:\n for source in HeartbeatSource:\n pipe.get(f\"heartbeat:{source.value}\")\n\n values = pipe.execute()\n\n result = {}\n\n for source, value in zip(HeartbeatSource, values):\n if value is not None:\n value = int(value)\n value = datetime.datetime.fromtimestamp(\n value, datetime.timezone.utc\n )\n\n result[source] = value\n\n return result", "def last_come_first_served(\n evs: List[SessionInfo], iface: Interface\n) -> List[SessionInfo]:\n return sorted(evs, key=lambda x: x.arrival, reverse=True)", "def sequence_performance_data(self, c):\n ans = []\n for (server, port), group in sorted(self.boardGroups.items()):\n pageTimes = [lock.times for lock in group.pageLocks]\n runTime = group.runLock.times\n runWaitTime = group.runWaitTimes\n readTime = group.readLock.times\n ans.append(((server, port), (pageTimes[0], pageTimes[1], runTime,\n runWaitTime, readTime)))\n return ans", "def process_base_pollster(sample):\n name = sample[\"name\"]\n payload = sample[\"volume\"]\n (address, sourcedict, timestamp) = get_core_triple(payload, sample, name)\n return (address, sourcedict, timestamp, payload)", "def _payload_aggregator_thread(self):\n aggregated_payload = []\n number_of_payloads = 0\n current_payload_size_in_bytes = 0\n current_target = None\n current_collection_id = None\n current_destination_table = None\n method = None\n while self._closed.value == 0:\n try:\n (\n payload_dict,\n destination_table,\n method,\n target,\n collection_id,\n ) = self._objects_queue.get(\n timeout=self._payload_aggregator_timeout_seconds\n )\n except queue.Empty:\n (\n aggregated_payload,\n number_of_payloads,\n current_payload_size_in_bytes,\n ) = self._queue_payload_for_posting(\n aggregated_payload,\n number_of_payloads,\n destination_table,\n method,\n current_target,\n current_collection_id,\n )\n continue\n\n # First payload will determine the target and collection_id\n if (\n not current_target\n and not current_collection_id\n and not current_destination_table\n ):\n current_target = target\n current_collection_id = collection_id\n current_destination_table = destination_table\n\n # When we get a payload with different than current target/collection_id/destination_table,\n # send the current payload and start aggregating for the new one.\n if (\n current_target != target\n or current_collection_id != collection_id\n or current_destination_table != destination_table\n ):\n (\n aggregated_payload,\n number_of_payloads,\n current_payload_size_in_bytes,\n ) = self._queue_payload_for_posting(\n aggregated_payload,\n number_of_payloads,\n current_destination_table,\n method,\n current_target,\n current_collection_id,\n )\n current_target = target\n current_collection_id = collection_id\n current_destination_table = destination_table\n\n # We are converting to string to get correct memory size. This may\n # cause performance issues.\n # TODO: Propose a way to calculate the object's memory footprint without converting to string.\n string_payload = str(payload_dict)\n\n if (\n sys.getsizeof(string_payload) + current_payload_size_in_bytes\n > self._payload_size_bytes_threshold\n ):\n (\n aggregated_payload,\n number_of_payloads,\n current_payload_size_in_bytes,\n ) = self._queue_payload_for_posting(\n aggregated_payload,\n number_of_payloads,\n current_destination_table,\n method,\n current_target,\n current_collection_id,\n )\n aggregated_payload.append(payload_dict)\n number_of_payloads += 1\n current_payload_size_in_bytes += sys.getsizeof(string_payload)", "def get_subscription_round_trip_times(self) -> dict[tuple[str, tuple[str]], RoundTripData]:\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n ret[(subscription.notify_to_address,\n subscription.short_filter_names())] = subscription.get_roundtrip_stats()\n return ret", "def __best_in_queue(self):\n #return graph, score\n graph = max(self._queue,key=self._queue.get)\n score = self._queue[graph]\n return graph, score", "def get_latest_tweets():\n tweet = twitter.Api(consumer_key=config.twitter_consumer_key, \n consumer_secret = config.twitter_consumer_secret, \n access_token_key = config.twitter_access_key, access_token_secret = config.twitter_access_secret)\n red = redis.Redis(host = 'localhost', db = config.subfeed_db)\n unique_new_list = []\n liverpool_tweet_list = tweet.GetUserTimeline(screen_name = config.twitter_screen_name, count = config.twitter_limit)\n twitter_key = \"lfc_twitter\"\n for lfctweet in liverpool_tweet_list:\n current_time = int(time.time()) \n present_in_db = red.zadd(twitter_key, lfctweet.id, current_time)\n if present_in_db == 1:\n twitter_url = \"https://www.twitter.com/\" + config.twitter_screen_name + \"/status/\" + str(lfctweet.id)\n unique_new_list.append(twitter_url)\n return unique_new_list", "def _queue_analysis(self):", "def _gen_refresh(self, current_sequence_id):\n refresh = []\n for k, v in self._sequence_freshness.items():\n if not v and k != current_sequence_id:\n refresh.append(k)\n return refresh", "def fetch_latencies(hosts, client_id=uuid.uuid4(), debug=False):\n latencies = dict()\n for host in hosts:\n timer_data = dict()\n url = \"https://\" + host['hostname'] + \"/testobject.svg?unique=\"\n url = url + client_id + \"-perfmap&popId=\" + host['popId']\n timer_set(\"start\", timer_data)\n requests.get(url, hooks={'response':timer_set('response', timer_data)})\n timer_set('end', timer_data)\n if debug:\n print(\"PoP: \" + host['popId'])\n print(\"Time Total: %f\"% (timer_data['end'] - timer_data['start']))\n print(\"Time start-response: %f\"% (timer_data['response'] - timer_data['start']))\n print(\"Time response-end: %f\"% (timer_data['end'] - timer_data['response']))\n timer_value = int((timer_data['end'] - timer_data['response']) * 100)\n latencies[host['popId']] = timer_value\n return latencies" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes and returns the top refresh for the given source using its name
def pop(self, source_name): if source_name not in self.source_to_refresh_queue: raise EmptyQueueError(source_name) refresh_id = self.source_to_refresh_queue[source_name].pop(0) item = self.refresh_ref.pop(refresh_id) if not self.source_to_refresh_queue[source_name]: del self.source_to_refresh_queue[source_name] return item
[ "def peek(self):\n return {\n source_name: self._top_refresh(source_name)\n for source_name in self.source_to_refresh_queue\n }", "async def cmd_remove_top(self, ctx, top):\n channel = ctx.channel\n\n if not re.match(r'^-?\\d+$', top):\n await ctx.send(\"Fehler! Der übergebene Parameter muss eine Zahl sein.\")\n else:\n if str(channel.id) in self.tops:\n channel_tops = self.tops.get(str(channel.id))\n\n if 0 < int(top) <= len(channel_tops):\n del channel_tops[int(top) - 1]\n\n if len(channel_tops) == 0:\n self.tops.pop(str(channel.id))\n\n tops_file = open(self.tops_file, mode='w')\n json.dump(self.tops, tops_file)", "def test_02_refresh_specific_source(self):\n completed_proc = self.client.run((\n 'pulp-admin content sources refresh '\n '--source-id {}'.format(self.source_id)\n ).split())\n self.check_error_existing(completed_proc)", "def remove_oldest(own):\n if own.capacityOf==0 or own.capacityOf<0:\n print(\"Capacity of RingBuffer is 0 or less than 1. Can't use this RingBuffer\")\n return\n return own.removeFirst()", "def get_last_snapshot(self):\n name = self.snapshot_names[-1]\n return self.get_snapshot(name)", "def remove_source(src):\n src.stop()\n try:\n src.data.release_data_flag = 1\n src.cell_scalars_name = ''\n src.cell_tensors_name = ''\n src.cell_vectors_name = ''\n src.point_scalars_name = ''\n src.point_tensors_name = ''\n src.point_vectors_name = ''\n except AttributeError:\n pass\n src.start()\n src.stop()\n src.remove()", "def get_oldest(class_name):\n ...", "def _remove_refresh(self) -> None:\n if self._refresh_remove is not None:\n self._refresh_remove()\n self._refresh_remove = None", "def find_most_recent_tag_dockerhub(name, url):\n res = json.loads(requests.get(url).text)\n\n updates_sorted = sorted(res[\"results\"], key=lambda k: k[\"last_updated\"])\n\n if updates_sorted[-1][\"name\"] == \"latest\":\n new_tag = updates_sorted[-2][\"name\"]\n else:\n new_tag = updates_sorted[-1][\"name\"]\n\n return new_tag", "def remove_top_card(self):\n if len(self._cards) == 0:\n print('Deck is empty')\n return\n return self._cards.pop(0)", "def pop_from_loaded_collection(name):\n pymod.collection.pop_from_loaded_collection(name)", "def remove_one_from_local_queue(self):\r\n\r\n next_node: Node = list(self.local_queue)[0]\r\n self.local_queue.remove(next_node)\r\n\r\n return next_node", "async def cmd_clear_tops(self, ctx):\n\n channel = ctx.channel\n\n if str(channel.id) in self.tops:\n self.tops.pop(str(channel.id))\n tops_file = open(self.tops_file, mode='w')\n json.dump(self.tops, tops_file)", "def get_source_by_name(self, name):\r\n sources = self.call(GetSourcesList())\r\n for source in sources.getSources():\r\n if source[\"name\"] == name:\r\n return source\r\n return None", "def pop(self):\n ts = int(time.time())\n item = self.rpop(keys=[self.name], client=self._redis, args=[ts])\n if item is None:\n return item\n msg, ts = item\n ts = int(ts) if ts else None\n return msg, ts", "def remove_edges_by_source(G, source):\n \n nG = G.copy()\n removed_edges = []\n sourcenames = nx.get_edge_attributes(nG, \"source\")\n for edge in nG.edges():\n try:\n if sourcenames[(edge[0], edge[1])] == source:\n removed_edges.append((edge[0], edge[1]))\n except:\n pass\n \n for edge in removed_edges:\n nG.remove_edge(edge[0], edge[1])\n \n return nG", "def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']", "def get_prev_top(self):\n self._get_next_item(self.tops_image_path, self.top_images, increment=False)", "def get_todays_stats(stock_name):\n\n latest_date = get_latest_date_for_stock(stock_name)\n if latest_date:\n return get_stock_stats(stock_name, latest_date)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
coseno = ( V1 V2 ) / ||V1|| x ||V2||
def coseno(vector1, vector2): #return float(dot(vector1,vector2) / (math.sqrt(sumarLista(map(cuadrado,vector1))) * math.sqrt(sumarLista(map(cuadrado,vector2)))) return float(dot(vector1,vector2) / (norm(vector1) * norm(vector2)))
[ "def cos_of_angle_between(v1, v2):\n assert len(v1) == len(v2)\n return np.dot(v1, v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))", "def cosAngle(vector1, vector2=ThreeVector(0, 0, 1)):\n\n return (vector1 ^ vector2) / vector1.norm / vector2.norm", "def cosine(fingerprint1, fingerprint2):\n return cosine(fingerprint1, fingerprint1) / 2.0", "def cos(val):\n return math.cos(val)", "def f(x):\r\n\treturn np.cos(2*x)**2-(x**2)", "def cosine_distance(point1, point2):\n return 1 - dot_product(point1, point2) / (norm(point1) * norm(point2))", "def angle_cos(self, other=None) -> float:\n if other is None:\n other = Vector(1, 0)\n return self.cross(other) / (self.length() * other.length())", "def cos_sim(cls, vec_a, vec_b):\n vec_a = vec_a.unsqueeze(1).expand(vec_a.shape[0], vec_b.shape[0], -1)\n vec_b = vec_b.unsqueeze(0).expand_as(vec_a)\n return F.cosine_similarity(vec_a, vec_b, dim=-1)", "def cos(x):\n\ttry:\n\t\tval = np.cos(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += -np.sin(x.val) * (x.der[key])\n\t\t\tsec_ders[key] += -x.sec_der[key]*np.sin(x.val)+(x.der[key]**2)*(-np.cos(x.val))\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.cos(x)", "def get_cos_theta(p1x, p1y, p2x, p2y, p3x, p3y):\r\n\r\n return np.dot([p1x - p3x, p1y - p3y], [p2x - p3x, p2y - p3y]) / ( math.sqrt( (p1x - p3x)**2 + (p1y - p3y)**2 ) * math.sqrt( (p2x - p3x)**2 + (p2y - p3y)**2 ) )", "def cos(X, max_order=30):\n op = 1 + 0*X\n X2 = X * X\n X2n = 1 + 0*X\n for n in range(1, max_order):\n X2n = X2n*X2\n op = op + ((-1) ** (n) / math.gamma(2 * n + 1)) * X2n\n return op", "def avcos (x):\n v = abs(math.exp(math.cos(x)))\n #Usong the math module to comput the absolute value of the exponential of \n #the cosine of any given #, x\n return v\n #returns the answer, v, to the euqation ", "def compute_angle(v1,v2):\n length_product = norm(v1) * norm(v2)\n cosine = dot(v1,v2) / length_product\n angle = degrees( acos( cosine ) )\n return angle", "def cos(x):\n\n return math.cos(math.radians(x))", "def cal_cosine_similarity(vec_a, vec_b):\n cosine_similarity = np.divide(np.dot(vec_a, vec_b), (np.linalg.norm(vec_a) * np.linalg.norm(vec_b)))\n cosine_similarity = cosine_similarity * 0.5 + 0.5\n return cosine_similarity", "def ts_cos_x(x, nofapprox): \n \n result = 0\n for i in range(nofapprox):\n coef = (-1)**i\n num = x**(2*i)\n denom = math.factorial(2*i)\n result += (coef) * ((num)/(denom))\n return result", "def arccos(x):\n\ttry:\n\t\tval = np.arccos(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += -1/((1 - x.val**2)**0.5) * (x.der[key])\n\t\t\tsec_ders[key] += -(x.val**2*(-x.sec_der[key]) + x.sec_der[key] + x.val*x.der[key]**2)/ \\\n\t\t\t\t\t\t\t ((1-x.val**2)**1.5)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.arccos(x)", "def calc_ang1(v1,v2):\n ang = np.arccos( np.abs(np.dot(v1,v2))/(np.linalg.norm(v1)*np.linalg.norm(v2)) )\n return ang * 180./np.pi", "def Cos(obj):\n \n branch_root_node = expression_evaluation.EquationNode( \n name = \"Cos ( \" + obj.object.name + \" )\", \\\n base_object = None, \\\n base_operation = expression_evaluation.EquationNode.Cos, \\\n base_operation_name = 'cos'\n )\n\n branch_root = expression_evaluation.ExpressionTree( object_ = branch_root_node )\n\n obj.parent = branch_root\n\n return(branch_root)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test thread creation after process attach.
def test_create_after_attach_with_fork(self): self.build(dictionary=self.getBuildFlags(use_cpp11=False)) self.create_after_attach(use_fork=True)
[ "def start_test(self):\n self.logger.info('Test thread starts')\n self.test_thread.start()", "def test_create_process(self):\n self.assertIsNotNone(self.pid)", "def create_test_thread(self):\n return self.create_thread(\n function=test_function,\n name='test',\n parameters={\n 'delay': 1,\n 'counter': 100,\n 'thread_id': f'thread{self.count}'\n }\n )", "def test_start_watchdog_thread(self):\n thread = FakeThread()\n reactor = FakeReactor()\n loop = EventLoop(lambda: reactor, lambda *args: None,\n watchdog_thread=thread)\n loop.setup()\n self.assertTrue(thread.started)", "def testProcessThreadAwareModule(self,\n mock_post_process,\n mock_pre_process,\n mock_threaded_process):\n cdm = CursesDisplayManager()\n test_state = state.DFTimewolfStateWithCDM(config.Config, cdm)\n test_state.command_line_options = {}\n test_state.LoadRecipe(test_recipe.threaded_no_preflights, TEST_MODULES)\n\n # pylint: disable=line-too-long\n with mock.patch('threading.current_thread') as mock_current_thread, \\\n mock.patch.object(cdm, 'UpdateModuleStatus') as mock_update_status, \\\n mock.patch.object(cdm, 'UpdateModuleThreadState') as module_update_thread_state:\n mock_current_thread.return_value.name = 'ThreadName'\n test_state.SetupModules()\n test_state.RunModules()\n # pylint: enable=line-too-long\n\n mock_update_status.assert_has_calls([\n mock.call('ContainerGeneratorModule', Status.PENDING),\n mock.call('ThreadAwareConsumerModule', Status.SETTINGUP),\n mock.call('ThreadAwareConsumerModule', Status.PENDING),\n mock.call('ContainerGeneratorModule', Status.PROCESSING),\n mock.call('ContainerGeneratorModule', Status.COMPLETED),\n mock.call('ThreadAwareConsumerModule', Status.PREPROCESSING),\n mock.call('ThreadAwareConsumerModule', Status.PENDING),\n mock.call('ThreadAwareConsumerModule', Status.PROCESSING),\n mock.call('ThreadAwareConsumerModule', Status.POSTPROCESSING),\n mock.call('ThreadAwareConsumerModule', Status.COMPLETED)], True)\n self.assertEqual(mock_update_status.call_count, 11)\n\n module_update_thread_state.assert_has_calls([\n mock.call('ThreadAwareConsumerModule', Status.RUNNING,\n 'ThreadName', 'one'),\n mock.call('ThreadAwareConsumerModule', Status.COMPLETED,\n 'ThreadName', 'one'),\n mock.call('ThreadAwareConsumerModule', Status.RUNNING,\n 'ThreadName', 'two'),\n mock.call('ThreadAwareConsumerModule', Status.COMPLETED,\n 'ThreadName', 'two'),\n mock.call('ThreadAwareConsumerModule', Status.RUNNING,\n 'ThreadName', 'three'),\n mock.call('ThreadAwareConsumerModule', Status.COMPLETED,\n 'ThreadName', 'three')], True)\n\n self.assertEqual(module_update_thread_state.call_count, 6)\n\n self.assertEqual(mock_threaded_process.call_count, 3)\n self.assertEqual(mock_post_process.call_count, 1)\n self.assertEqual(mock_pre_process.call_count, 1)\n self.assertEqual(3,\n len(test_state.GetContainers(thread_aware_modules.TestContainer)))", "def testProcessThreadAwareModule(self,\n mock_post_process,\n mock_pre_process,\n mock_threaded_process):\n test_state = state.DFTimewolfState(config.Config)\n test_state.command_line_options = {}\n test_state.LoadRecipe(test_recipe.threaded_no_preflights, TEST_MODULES)\n test_state.SetupModules()\n test_state.RunModules()\n self.assertEqual(mock_threaded_process.call_count, 3)\n self.assertEqual(mock_post_process.call_count, 1)\n self.assertEqual(mock_pre_process.call_count, 1)\n self.assertEqual(3,\n len(test_state.GetContainers(thread_aware_modules.TestContainer)))", "def test_case_run_in_new_process(self):\n BasicMultiprocessCase.pid_queue = self.pid_queue\n BasicMultiprocessCase.post_timeout_event = self.post_timeout_event\n\n MockSuite1.components = (BasicMultiprocessCase,)\n\n self.runner.run(MockSuite1)\n\n case_pid = self.pid_queue.get_nowait()\n\n self.assertNotEqual(case_pid, os.getpid(),\n \"%r wasn't run in a new process\" % MockSuite1)", "def test_start_thread(self):\n self.controller.faucet.start = Mock()\n self.controller.faucet.stop = Mock()\n self.controller.stream.save_as_closed = Mock()\n with patch('threading.Thread.start') as mock_start:\n thread = self.controller._start_stream()\n self.assertIs(thread.daemon, True)\n self.assertEqual(mock_start.call_count, 1)", "def test_multithreading():", "def _start_thread(self, queue, tid, tname):\n actorState = self.actorState\n actorState.threads = {}\n actorState.threads[tid] = threading.Thread(target=sopTester.FakeThread, name=tname, args=[actorState.actor,actorState.queues])\n actorState.threads[tid].daemon = True\n actorState.threads[tid].start()", "def prepareThread(self): \n setRuntime(self.runtime)", "def test_runnable_sync(self):\n run = RunAndExit()\n run.start_and_wait_completed(sync=True)", "def test_monitor_creation(processor, measurement, exopy_qtbot, dialog_sleep):\n def run(exopy_qtbot, measurement):\n t = Thread(target=processor._start_monitors, args=(measurement,))\n t.start()\n exopy_qtbot.wait_until(lambda: not t.is_alive(), timeout=10e3)\n exopy_qtbot.wait(dialog_sleep)\n\n processor.engine = processor.plugin.create('engine', 'dummy')\n\n measurement.add_tool('monitor', 'dummy')\n run(exopy_qtbot, measurement)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measurement.add_tool('monitor', 'dummy2')\n run(exopy_qtbot, measurement)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measurement.remove_tool('monitor', 'dummy2')\n run(exopy_qtbot, measurement)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measurement.add_tool('monitor', 'dummy3')\n run(exopy_qtbot, measurement)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measurement.add_tool('monitor', 'dummy4')\n run(exopy_qtbot, measurement)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n processor.plugin.stop()\n assert not processor.monitors_window", "def test_06_switch_thread(self):\n time.sleep(0.5) # allows debugger to start\n self.ikpdb.set_breakpoint(DEBUGGED_PROGRAM,\n line_number=42)\n\n self.ikpdb.run_script()\n debugged_thread = None\n debugged_thread_name = ''\n for i in range(5):\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], \n \"programBreak\", \n \"Received: %s while expecting 'programBreak'\" % (i_msg['command'],))\n if debugged_thread is None:\n debugged_thread = i_msg['frames'][0]['thread']\n debugged_thread_name = i_msg['frames'][0].get('thread_name')\n print(\"thread_ident=%s, thread_name=%s\" % (debugged_thread, debugged_thread_name,))\n else:\n print(\"thread_ident=%s, thread_name=%s\" % (i_msg['frames'][0]['thread'], \n i_msg['frames'][0].get('thread_name'),))\n self.assertEqual(i_msg['frames'][0]['thread'], \n debugged_thread,\n \"Debugged thread has changed (i=%s, \"\n \"first_thread=%s:%s, last_thread=%s:%s)\" % (\n i,\n debugged_thread, debugged_thread_name,\n i_msg['frames'][0]['thread'], i_msg['frames'][0].get('thread_name')))\n if i==4:\n threads_dict = i_msg['threads']\n test_threads = {td['name']:td['ident'] for td in filter(lambda t: t['name'].startswith(\"Thread-\"), [threads_dict[ident] for ident in threads_dict])}\n if debugged_thread_name == 'Thread-1':\n next_thread_ident = test_threads['Thread-2']\n next_thread_name = 'Thread-2'\n else:\n next_thread_ident = test_threads['Thread-1']\n next_thread_name = 'Thread-1'\n print(\"Switching to thread: %s, %s\" % (next_thread_ident, next_thread_name))\n reply = self.ikpdb.set_debugged_thread(next_thread_ident)\n self.ikpdb.resume()\n\n\n for i in range(5):\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], \n \"programBreak\", \n \"Received: %s while expecting 'programBreak'\" % (i_msg['command'],))\n print(\"thread_ident=%s, thread_name=%s\" % (i_msg['frames'][0]['thread'], \n i_msg['frames'][0].get('thread_name'),))\n self.assertEqual(i_msg['frames'][0]['thread'], \n next_thread_ident,\n \"Debugged thread has changed (i=%s, \"\n \"first_thread=%s:%s, last_thread=%s:%s)\" % (\n i,\n debugged_thread, debugged_thread_name,\n i_msg['frames'][0]['thread'], i_msg['frames'][0].get('thread_name')))\n self.ikpdb.resume()", "def test_01_same_thread(self):\n time.sleep(0.5) # allows debugger to start\n self.ikpdb.set_breakpoint(DEBUGGED_PROGRAM, line_number=42)\n self.ikpdb.run_script()\n debugged_thread = None\n debugged_thread_name = ''\n for i in range(10):\n \n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], \n \"programBreak\", \n \"Received: %s while expecting 'programBreak'\" % (i_msg['command'],))\n if debugged_thread is None:\n debugged_thread = i_msg['frames'][0]['thread']\n debugged_thread_name = i_msg['frames'][0].get('thread_name')\n print(\"thread_ident=%s, thread_name=%s\" % (debugged_thread, debugged_thread_name,))\n else:\n print(\"thread_ident=%s, thread_name=%s\" % (i_msg['frames'][0]['thread'], \n i_msg['frames'][0].get('thread_name'),))\n self.assertEqual(i_msg['frames'][0]['thread'], \n debugged_thread,\n \"Debugged thread has changed (i=%s, \"\n \"first_thread=%s:%s, last_thread=%s:%s)\" % (\n i,\n debugged_thread, debugged_thread_name,\n i_msg['frames'][0]['thread'], i_msg['frames'][0].get('thread_name')))\n self.ikpdb.resume()", "def test_create_producer(self):\n try:\n test_producer = Producer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n except Exception as e:\n self.fail(f\"test_create_producer() failed with exception: {e}\")\n\n try:\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_producer.start() in test_create_consumer() failed with exception: {e}\")\n\n # Sleep for a couple seconds to allow the thread to come up.\n time.sleep(2)\n self.assertEqual(3, threading.active_count()) # Main thread, producer thread, kafka-python sender daemon.\n\n test_producer.stop()\n test_producer.join()\n self.assertEqual(2, threading.active_count())", "def wait_test_done(self):\n self.test_thread.join()\n self.logger.info('Test thread is done')", "def test_subprocess_killed(self):\n SubprocessCreationCase.pid_queue = self.pid_queue\n SubprocessCreationCase.post_timeout_event = self.post_timeout_event\n\n MockSuite1.components = (SubprocessCreationCase,)\n\n self.runner.run(MockSuite1)\n\n self.validate_test_processes(2)", "def start_thread_with_cleanup(test_instance, thread_object, timeout=1,\n start_timeout=None):\n test_instance.addCleanup(thread_object.join, timeout=timeout)\n test_instance.addCleanup(thread_object.stop)\n if start_timeout is not None:\n thread_object.start(timeout=start_timeout)\n else:\n thread_object.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts the engine, which is in charge of running the all the routines. This method does not return unless there is a fatal erro with the engine.
def start_engine(): global _CONNECTION global _ENGINE _ENGINE = RemoteDispatchEngine(_CONNECTION) engine_results_thread = threading.Thread( target=_remote_dispatch_engine_result_thread) engine_results_thread.start() _ENGINE.start()
[ "def start_engine(self):\n if self.update: # if update specified\n self.start_update() # start the update process\n try:\n # Start the core process\n self.start_core_process()\n except KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n except Exception as e:\n self.logger.log(\n \"Error occurred: \" + str(e),\n logtype=\"error\"\n )\n\n # After completing all the process\n self.logger.log(\n \"Collecting found malicious files\",\n logtype=\"info\"\n )\n print(\"[!] Collecting found malicious files, please wait...\")\n # Sleep for 10 seconds to reset process\n time.sleep(10)\n # Clear screen\n print(chr(27) + \"[2J\")\n # Run the cleaner\n if self.auto_delete:\n # Auto delete all\n self.cleaner_obj.auto_delete()\n else:\n # Manually delete selected\n self.cleaner_obj.clean()", "async def run_runtime(self) -> None:\n self._state.set(RuntimeStates.starting)\n await asyncio.gather(\n self._start_multiplexer(), self._start_agent_loop(), self._start_storage()\n )", "def run(self):\n cherrypy.engine.SIGHUP = None\n cherrypy.engine.SIGTERM = None\n cherrypy.engine.autoreload_on = False\n\n # User config file if specified\n if self.configFile:\n cherrypy.config.update(self.configFile)\n # Override explicitly passed config options\n cherrypy.config.update(self.configDict)\n \n cherrypy.tree.mount(self.httpTree)\n cherrypy.server.quickstart()\n cherrypy.engine.start(blocking=False)\n \n # Loop till done\n finished = False\n while not finished:\n time.sleep(5)\n finished = self.exitFlag\n \n # When done, exit gracefully\n self._suicide()", "def start(self):\r\n from ubcs_auxiliary.threading import new_thread\r\n new_thread(self.run)", "def start_core_process(self):\n # Create scanner engine process\n scanner_engine_process = multiprocessing.Process(target=self.scanner_engine_obj.start_scanner_engine)\n # Create monitor engine process\n monitor_engine_process = multiprocessing.Process(target=self.monitor_engine_obj.start_monitor_engine)\n\n # Add scanner process to process pool\n self.process_pool.append(scanner_engine_process)\n # Add monitor process to process pool\n self.process_pool.append(monitor_engine_process)\n\n # Start all the process\n for process in self.process_pool:\n process.start()\n\n # Complete (join) all the process\n for process in self.process_pool:\n process.join()", "def __init__(self):\n\n self.handle_sysargs()\n self.init_engine()\n self.init_input()\n self.init_caches()\n\n self.start_game()\n self.running = True # When this becomes false, main loop inside run() will quit\n\n self.run()\n self.quit()", "async def start(self):\n self.internal_backend.set_loop(self.loop)\n await self._load_external_backend()\n await self._resolve_promised_settings()", "def create_engine(self):\n g_start()\n if self.engine is None:\n self.engine = create_engine(self.db_engine_path)\n g_end('created engine')", "def main():\n start()", "def start(self) -> None:\n while True:\n # check queue for command\n command: EngineCommand = self._queue.get()\n\n if command.quit:\n break\n elif command.stop:\n continue\n\n self._heuristic = self._choose_heuristic(command.search_options)\n self._start_timer(command.search_options)\n self._search(command.search_options.board, command.search_options.depth)", "def _engine_started(self) -> Deferred:\n return deferred_from_coro(self._launch_browser())", "def start(self):\n self.threadpool.callInThread(self.run)", "def run(self) -> None:\n try:\n if self.jobs > 1 and len(self.filenames) > 1:\n self.run_parallel()\n else:\n self.run_serial()\n except KeyboardInterrupt:\n LOG.warning(\"Flake8 was interrupted by the user\")\n raise exceptions.EarlyQuit(\"Early quit while running checks\")", "def _StartSingleThread(self, options):\n collection_queue = queue.SingleThreadedQueue()\n storage_queue = queue.SingleThreadedQueue()\n self._engine = engine.Engine(collection_queue, storage_queue)\n\n self._engine.SetSource(\n self._source_path_spec, resolver_context=self._resolver_context)\n\n logging.debug(u'Starting preprocessing.')\n pre_obj = self.PreprocessSource(options)\n\n # TODO: move FindAllParsers to engine as a class method?\n filter_query = getattr(options, 'parsers', '')\n self._parsers = putils.FindAllParsers(\n pre_obj=pre_obj, config=options, parser_filter_string=filter_query)\n self._parser_names = [parser.parser_name for parser in self._parsers['all']]\n\n self._PreprocessSetCollectionInformation(options, pre_obj)\n\n logging.debug(u'Preprocessing done.')\n\n if 'filestat' in self._parser_names:\n include_directory_stat = True\n else:\n include_directory_stat = False\n\n filter_file = getattr(options, 'file_filter', None)\n if filter_file:\n filter_find_specs = engine_utils.BuildFindSpecsFromFile(\n filter_file, pre_obj=pre_obj)\n else:\n filter_find_specs = None\n\n self._collector = self._engine.CreateCollector(\n include_directory_stat, vss_stores=self._vss_stores,\n filter_find_specs=filter_find_specs,\n resolver_context=self._resolver_context)\n\n self._DebugPrintCollector(options)\n\n logging.debug(u'Starting collection.')\n self._collector.Collect()\n logging.debug(u'Collection done.')\n\n extraction_worker = self._CreateExtractionWorker(0, options, pre_obj)\n\n logging.debug(u'Starting extraction worker.')\n extraction_worker.Run()\n logging.debug(u'Extraction worker done.')\n\n self._engine.SignalEndOfInputStorageQueue()\n\n output_module = getattr(options, 'output_module', None)\n if output_module:\n storage_writer = storage.BypassStorageWriter(\n storage_queue, self._storage_file_path,\n output_module_string=output_module, pre_obj=pre_obj)\n else:\n storage_writer = storage.StorageFileWriter(\n storage_queue, self._storage_file_path,\n buffer_size=self._buffer_size, pre_obj=pre_obj)\n\n logging.debug(u'Starting storage.')\n storage_writer.WriteEventObjects()\n logging.debug(u'Storage done.')\n\n self._resolver_context.Empty()", "def run(self):\n\n\t\ttime.sleep(1)\n\t\tself.state = 'running' \t\t# update function state of biochemistry object\n\n\t\t#----------------------- Flowcell preparation ----------------------------------\n\n\t\tif self.cycle[0:2] == 'WL' and self.flowcell == 0:\t# if white light image cycle on flowcell 0\n\t\t\tself.init()\t\t\t\t\t# do only once at beginning\n\t\t\t#self.exo_start()\n\t\t\tself.logging.info(\"%s\\t%i\\t--> Device initialization and Exonuclease I digestion is done: [%s]\\n\" % (self.cycle_name, self.flowcell, self.state))\n\n\t\telif self.cycle[0:2] == 'WL' and self.flowcell == 1:\t# if white light image cycle on flowcell 1\n\t\t\t#self.exo_start()\n\t\t\tself.logging.info(\"%s\\t%i\\t--> Exonuclease I digestion is done: [%s]\\n\" % (self.cycle_name, self.flowcell, self.state))\n\n\t\telse:\n\t\t\tself.cycle_ligation() # perform query cycle on selected flowcell", "def run(self):\n logger.debug(\"run() was called\")\n \n try:\n self.srv = RPyCServer(CrispyService, hostname=self.host, port=self.port, authenticator=self.auth)\n self.srv.start()\n except Exception as e:\n print e", "def run(self):\n\n self.start_process()\n cwd = os.getcwd()\n\n db_file_obj = Database.db_find_by_id(self.file_id)\n Database.db_gui_insert_newtype(db_file_obj['Name'].split(\".\")[-1])\n output_obj = self.check_cuckoo(db_file_obj['location'])\n\n for module in self.modules:\n if module in self.modules_ignore:\n continue\n\n #location main python file in modules folder on system\n location_of_module = '{0}/modules/{1}/{1}.py'.format(cwd, module)\n module_dir = '{0}/modules/{1}/'.format(cwd, module)\n\n os.chdir(module_dir)\n p = subprocess.Popen(['python', \"{0}.py\".format(module), db_file_obj['location']], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n os.chdir(cwd)\n stdoutdata, stderrdata = p.communicate()\n\n #if we get error data the module 'failed'\n module_passed = True\n if stderrdata:\n module_passed = False\n self.modules[module] = module_passed\n\n output = self.processData(stdoutdata)\n output_obj[module] = output\n\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())\n\n output_obj = self.check_cuckoo(output_obj)\n\n self.finish_process()\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())", "def main():\n\n # Initialize config directory\n config_dir_path = str(Path.home()) + \"/.config/Kaspa/\"\n if not os.path.exists(config_dir_path):\n os.makedirs(config_dir_path)\n print(\"Created Folder\" + config_dir_path)\n\n # Initialize Logger\n logger = logging.getLogger(\"Kaspa\")\n handler = logging.FileHandler(config_dir_path + '/Kaspa.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n # add modules\n logger.info(\"Loading Modules...\")\n init_modules()\n\n # Initialize Config\n Config.set_instance(config_dir_path)\n Config.get_instance().load_modules()\n\n # Initialize signal handling for Sigint\n signal.signal(signal.SIGINT, sigint_handler)\n\n # Start mopidy server\n devnull = open(os.devnull, 'w')\n logger.info(\"Starting mopidy server...\")\n subprocess.call(\"killall mopidy\", shell=True, stderr=devnull, stdout=devnull, stdin=devnull)\n subprocess.call(\"mopidy -q &\", shell=True, stderr=devnull, stdout=devnull, stdin=devnull)\n logger.info(\"Mopidy server started\")\n\n # start communicators\n logger.info(\"Starting Communicators...\")\n start_communicators()", "def start(self):\r\n if self.initial_state == None: # Check that an initial state was declared\r\n raise RuntimeError(\"No initial state set on the state machine.\")\r\n\r\n self.current_state = self.initial_state\r\n\r\n for state in self.states.values():\r\n state.generator = state.handler_func(self)\r\n next(state.generator) # start up the co-routine\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It adds inline comment to selected lines based on the file extesion.
def add_inline_comment(area): comment = TABLE.get(os.path.splitext(area.filename)[1], DEFAULT) area.replace_ranges('sel', '^ *|^\t*', lambda data, index0, index1: '%s%s ' % (data, comment)) area.clear_selection() area.chmode('NORMAL')
[ "def _comment_format(self, path):\n _, extension = os.path.splitext(path)\n return '# {}\\n' if extension == '.py' else '<!-- {} -->'", "def test_include_filelist_with_full_line_comment(self):\n self.ParseTest([(\"--include-filelist\", \"file\")],\n [(), ('1',), ('1', '1'), ('1', '1', '2'),\n ('1', '1', '3')],\n ['- testfiles/select/1/1/1\\n'\n '# This is a test\\n'\n 'testfiles/select/1/1\\n'\n '- testfiles/select/1\\n'\n '- **'])", "def ingest_comments(self, raw_lines):\n # First get a dictionary with every existing line of code. That way\n # we know whether to look for an inline comment or a full line comment\n for file in self.output_files:\n all_lines_dict = {}\n for cfunction in file.functions.values():\n # Source: https://stackoverflow.com/questions/38987/how-do-i\n # -merge-two-dictionaries-in-a-single-expression-in-python\n # -taking-union-o\n all_lines_dict = {**all_lines_dict, **cfunction.lines}\n\n # Going through all lines in the script we are parsing\n for index in range(len(raw_lines)):\n # Line numbers count from 1 while list starts from 0, so we need to offset by 1\n if (index+1) in all_lines_dict:\n # Looking for inline comment\n code_line = all_lines_dict[index+1]\n comment = raw_lines[index][code_line.end_char_index:].lstrip()\n\n # Verify there is a comment present\n if len(comment) > 0 and comment[0] == \"#\":\n # Trim off the comment symbol as it will be changed\n # to the C++ style comment\n all_lines_dict[index+1].comment_str = comment[1:].lstrip()\n\n else:\n # Determine which function the line belongs to\n for function in file.functions.values():\n if function.lineno < index + 1 < function.end_lineno:\n line = raw_lines[index]\n comment = line.lstrip()\n if len(comment) > 0 and comment[0] == \"#\":\n # C++ uses '//' to indicate comments instead of '#'\n comment = line.replace(\"#\", \"//\", 1)\n function.lines[index + 1] = cline.CPPCodeLine(index + 1,\n index + 1,\n len(line),\n 0,\n comment)\n break\n else:\n line = raw_lines[index]\n comment = line.lstrip()\n if len(comment) > 0 and comment[0] == \"#\":\n # We add an extra indent on code not in a function\n # since it will go into a function in C++\n comment = cline.CPPCodeLine.tab_delimiter + line.replace(\"#\", \"//\", 1)\n file.functions[\"0\"].lines[index + 1] = cline.CPPCodeLine(index + 1,\n index + 1,\n len(line),\n 0,\n comment)\n\n # Sort function line dictionaries so output is in proper order\n for function in file.functions.values():\n sorted_lines = {}\n for line in sorted(function.lines.keys()):\n sorted_lines[line] = function.lines[line]\n function.lines = sorted_lines", "def include(filename, first=None, after=None, numlines=None, numblanks=None, dedent=True):\n global LAST_LINES, LAST_FILENAME\n if filename != LAST_FILENAME:\n with open(filename) as f:\n lines = LAST_LINES = f.readlines()\n LAST_FILENAME = filename\n else:\n lines = LAST_LINES\n\n including = \"\".join(selected_lines(lines, first, after, numlines, numblanks))\n if dedent:\n including = textwrap.dedent(including)\n\n cog.outl(\"```\")\n cog.out(including)\n cog.outl(\"```\")", "def comment_toggle(code_edit):\n blocks = get_selected_blocks(code_edit)\n\n # iterate through lines in doc commenting or uncommenting\n # based on whether everything is commented or not\n commentAllOut = any([not str(block.text()).lstrip().startswith('#')\n for block in blocks])\n if commentAllOut:\n for block in blocks:\n cursor = QtGui.QTextCursor(block)\n cursor.select(QtGui.QTextCursor.LineUnderCursor)\n selectedText = cursor.selectedText()\n right_split = len(selectedText.lstrip())\n count = len(selectedText)\n split_index = count-right_split\n split_text = selectedText[split_index:]\n newText = ' '*split_index + '#' + split_text\n cursor.insertText(newText)\n else:\n for block in blocks:\n cursor = QtGui.QTextCursor(block)\n cursor.select(QtGui.QTextCursor.LineUnderCursor)\n selectedText = cursor.selectedText()\n newText = str(selectedText).replace('#', '', 1)\n cursor.insertText(newText)", "def Comment(self):\n sel = self.GetSelection()\n start = self.LineFromPosition(sel[0])\n end = self.LineFromPosition(sel[1])\n if start>end: #swap around\n start,end=end,start\n #start an undo mark\n self.BeginUndoAction()\n for ln in range(start, end + 1):\n linestart = self.PositionFromLine(ln)\n self.InsertText(linestart, '#')\n #finish the undo mark\n self.EndUndoAction()", "def add_indention_to_include_file_content(block: Block) -> str:\n blanks_to_add = ' ' * block.include_file_number_of_blanks_to_add_to_content\n l_sliced_content = block.include_file_sliced_content.split('\\n')\n l_sliced_content = [''.join((blanks_to_add, line)).rstrip() for line in l_sliced_content]\n content = '\\n'.join(l_sliced_content)\n return content", "def test_indented_code_comment(self):\n \n inp = '1_5_code_comment.txt'\n self.run_single_file_case(inp)", "def show_comments(self):\n\n cf = '\\n'.join(str(self[x]) for x in sorted(\n set(self.selected_ones()) & set(self.commented_ones())))\n ucf = self.uncommented_files_summary()\n if cf and ucf:\n cf += '\\n\\n'\n if cf or ucf:\n print cf + ucf", "def rm_inline_comment(area):\n\n comment = TABLE.get(os.path.splitext(area.filename)[1], DEFAULT)\n area.replace_ranges('sel', '^ *%s ?|^\\t*%s ?' % (comment, comment), \n lambda data, index0, index1: data.replace(\n '%s ' % comment, '').replace(comment, ''))\n area.clear_selection()\n area.chmode('NORMAL')", "def _write_additional_lines(self, f, lines, formats=None):\n if not isinstance(lines, list):\n lines = [lines]\n if formats:\n lines = [i.format(formats) for i in lines]\n f.writelines([i + '\\n' for i in lines])", "def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]", "def test_already_formatted_block_comment(self):\n \n inp = '2_5_block_comment.txt'\n self.run_single_file_case(inp)", "def handle_file(filename):\n include_block = []\n with open(filename, encoding=\"utf-8\") as filep:\n for i, line in enumerate(filep):\n # find a set of #include lines (with no separation)\n included_filename = get_included_filename(line)\n if included_filename:\n include_block.append(included_filename)\n else:\n # check that block was alphabetical\n if len(include_block) > 0:\n if not is_block_correct_order(include_block):\n print(\"Non-alphabetical include block\")\n print(\"\\t%s\" % filename)\n print(\"\\tstarts line %i\" % i)\n print(\"\\t%s\" % include_block)\n include_block = []", "def cmd_comment(line: str) -> str:\n return f\"@REM {line}\"", "def select_lines(self):\r\n self.embed.description = ''\r\n for line in range(0, self.no_lines()):\r\n self.embed.description += self.lines[line]\r\n if len(self.embed.description) > self.bot.settings.max_chars:\r\n self.embed.description = self.embed.description[:self.bot.settings.max_chars]\r\n if self.embed.description.count('```') % 2 != 0:\r\n self.embed.description += '```'\r\n if len(self.lines) > self.no_lines():\r\n self.embed.description += '\\n*Continued...*'", "def merge_continued_lines(lines, f_ext):\n chg = True\n while chg:\n chg = False\n i = 0\n while i < len(lines):\n line = lines[i]\n if line_is_continuation(line, f_ext):\n assert i > 0, \"Weird continuation line (line {}): {}\".format(\n i + 1, line\n )\n prev_line_code = lines[i - 1]\n curr_line_code = line.lstrip()[1:] # remove continuation char\n merged_code = (\n prev_line_code.rstrip()\n + \" \"\n + curr_line_code.lstrip()\n + \"\\n\"\n )\n lines[i - 1] = merged_code\n lines.pop(i)\n chg = True\n elif line_is_continued(line):\n assert i < len(lines) - 1 # there must be a next line\n next_line_code = lines[i + 1]\n curr_line_code = line.rstrip()[\n :-1\n ].rstrip() # remove continuation char\n merged_code = curr_line_code + \" \" + next_line_code.lstrip()\n lines[i] = merged_code\n lines.pop(i + 1)\n chg = True\n\n i += 1\n return lines", "def commentify(lang):\n plaintext = pyperclip.paste().split('\\n')\n\n if lang == 'python':\n comment = ['###\\n']\n char = ' # '\n end = '###\\n'\n\n else:\n comment = ['/*\\n']\n char = ' * '\n end = '*/\\n'\n\n for line in plaintext:\n comment.append(char + line + '\\n')\n\n comment.append(end)\n return ''.join(comment)", "def test_indented_block_comment(self):\n \n inp = '2_4_block_comment.txt'\n self.run_single_file_case(inp)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It removes the inline comments.
def rm_inline_comment(area): comment = TABLE.get(os.path.splitext(area.filename)[1], DEFAULT) area.replace_ranges('sel', '^ *%s ?|^\t*%s ?' % (comment, comment), lambda data, index0, index1: data.replace( '%s ' % comment, '').replace(comment, '')) area.clear_selection() area.chmode('NORMAL')
[ "def remove_commentlines(self):\n\n tmp = self.main.splitlines()\n tmp = list(itertools.filterfalse(re.compile(r\"^\\s*%.*$\").match, tmp))\n self.main = \"\\n\".join(tmp)", "def remove_inline_comments(_code):\n lines = _code.split('\\n')\n counter = 0\n for i in range(len(lines)):\n inline_comment = parse_line_comment(lines[i])\n if inline_comment != \"\": counter += 1\n lines[i] = lines[i].replace(inline_comment, \"\")\n\n return '\\n'.join(lines), counter", "def remove_comments(self):\n self.main = remove_comments(self.main)", "def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)", "def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()", "def _remove_comments(source):\n comment_re = r'(/[*].*?[*]/)|(//[^\\n]*)'\n return re.sub(comment_re, '', source, flags=re.MULTILINE | re.DOTALL)", "def removeComments(self,origFile):\n return remove_comments_and_docstrings(origFile)", "def remove_comments(tex):\n return re.sub(r'%(.+)\\n', r'', tex)", "def _strip_comments(code):\n return re.sub(r'(?m)^ *#.*\\n?', '', code)", "def remove_comments(source):\n return re.sub(r'#[^#\\n]+', '', source)", "def testInlineComments(self):\n contents = validate_tag_consistency.TAG_HEADER + \"\"\"\ncrbug.com/1234 [ win ] foo/test [ Failure ]\ncrbug.com/2345 [ win ] foo/test [ Failure ] # finder:disable\ncrbug.com/3456 [ win ] foo/test [ Failure ]\n\"\"\"\n stale_expectations = [\n data_types.Expectation('foo/test', ['win'], ['Failure'])\n ]\n expected_contents = validate_tag_consistency.TAG_HEADER + \"\"\"\ncrbug.com/2345 [ win ] foo/test [ Failure ] # finder:disable\n\"\"\"\n with open(self.filename, 'w') as f:\n f.write(contents)\n removed_urls = expectations.RemoveExpectationsFromFile(\n stale_expectations, self.filename)\n self.assertEqual(removed_urls, set(['crbug.com/1234', 'crbug.com/3456']))\n with open(self.filename) as f:\n self.assertEqual(f.read(), expected_contents)", "def remove_comments(source):\n return re.sub(r\";.*\\n\", \"\\n\", source)", "def remove_comment(self, lines):\r\n return [self.remove_comment_in_a_line(line) for line in lines]", "def cleanup_comment(raw_comment):\n def pop_prepending_empty_lines(lines):\n first_non_empty_line_idx = 0\n for line in lines:\n if line == '':\n first_non_empty_line_idx += 1\n else:\n break\n return lines[first_non_empty_line_idx:]\n\n import string\n lines = raw_comment.split('\\n')\n chars_to_strip = '/' + '*' + '!' + string.whitespace\n lines = [line.lstrip(chars_to_strip) for line in lines]\n lines = pop_prepending_empty_lines(lines)\n clean_lines = []\n is_brief_comment = True\n for line in lines:\n if line == '' and is_brief_comment:\n # Skip lines that belong to brief comment.\n is_brief_comment = False\n continue\n if is_brief_comment:\n continue\n clean_lines.append(line)\n return '\\n'.join(clean_lines)", "def test_remove_comments_singleLine_and_MultiLines():\n javaString = '''\n /**\n * Compares two {@code int} values numerically.\n * The value returned is identical to what would be returned by:\n * <pre>\n * Integer.valueOf(x).compareTo(Integer.valueOf(y))\n * </pre>\n *\n * @param x the first {@code int} to compare\n */\n if(x < y) { // begin if block\n x = y;\n y = 0;\n } // end if block\n\n '''\n actualString = java_to_string.remove_comments(javaString)\n # print(\"actual: \" , repr(actualString))\n correctString = ' if(x < y) { x = y; y = 0; } '\n assert actualString == correctString", "def ClearExternalComment(matchobj):\n\treturn ''", "def UnComment(self):\n sel = self.GetSelection()\n start = self.LineFromPosition(sel[0])\n end = self.LineFromPosition(sel[1])\n if start>end: #swap around\n start,end=end,start\n #start an undo mark\n self.BeginUndoAction()\n for ln in range(start, end + 1):\n linestart = self.PositionFromLine(ln)\n if chr(self.GetCharAt(linestart)) == '#':\n #set cursor to the right of the #\n self.SetCurrentPos(linestart + 1)\n #delete to the beginning of th line\n self.DelLineLeft()\n #finish the undo mark\n self.EndUndoAction()", "def remove_comments_and_spaces(segment):\n pattern = re.compile(r\"\\s+\") # remove spaces\n segment = re.sub(pattern, '', segment)\n pattern = re.compile(r\"//.*\") # remove comments\n segment = re.sub(pattern, '', segment)\n return segment", "def strip_comments(lines: list[str]) -> list[str]:\n global results\n results = []\n for line in lines:\n index = line.find('#')\n if index >= 0:\n modified = line[0:index]\n else:\n modified = line\n modified = modified.strip()\n if len(modified) > 0:\n results.append(modified)\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the u and fvalues at the collocation nodes > corresponds to a single sweep over all nodes
def update_nodes(self): # get current level and problem description L = self.level P = L.prob # only if the level has been touched before assert L.status.unlocked # get number of collocation nodes for easier access M = self.coll.num_nodes # initialize integral terms with zeros, will add stuff later integral = [P.dtype_u(P.init, val=0.0) for l in range(M)] # gather all terms which are known already (e.g. from the previous iteration) # this corresponds to SF(u^k) - SdF(u^k) + tau (note: have integrals in pos and vel!) for m in range(M): for j in range(M + 1): # build RHS from f-terms (containing the E field) and the B field f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) # add SQF(u^k) - SxF(u^k) for the position integral[m].pos += L.dt * (L.dt * (self.SQ[m + 1, j] - self.Sx[m + 1, j]) * f) # add SF(u^k) - STF(u^k) for the velocity integral[m].vel += L.dt * (self.S[m + 1, j] - self.ST[m + 1, j]) * f # add tau if associated if L.tau[m] is not None: integral[m] += L.tau[m] # tau is 0-to-node, need to change it to node-to-node here if m > 0: integral[m] -= L.tau[m - 1] # do the sweep for m in range(0, M): # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) tmp = P.dtype_u(integral[m]) for j in range(m + 1): # build RHS from f-terms (containing the E field) and the B field f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1]) # add SxF(u^{k+1}) tmp.pos += L.dt * (L.dt * self.Sx[m + 1, j] * f) # add pos at previous node + dt*v0 tmp.pos += L.u[m].pos + L.dt * self.coll.delta_m[m] * L.u[0].vel # set new position, is explicit L.u[m + 1].pos = tmp.pos # get E field with new positions and compute mean L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) ck = tmp.vel # do the boris scheme L.u[m + 1].vel = P.boris_solver(ck, L.dt * self.coll.delta_m[m], L.f[m], L.f[m + 1], L.u[m]) # indicate presence of new values at this level L.status.updated = True return None
[ "def update_nodes(self):\n\n # get current level and problem description\n L = self.level\n P = L.prob\n\n # only if the level has been touched before\n assert L.status.unlocked\n\n # get number of collocation nodes for easier access\n M = self.coll.num_nodes\n\n # gather all terms which are known already (e.g. from the previous iteration)\n # this corresponds to u0 + QF(u^k) - QIFI(u^k) - QEFE(u^k) + tau\n\n # get QF(u^k)\n integral = self.integrate()\n\n # This is somewhat ugly, but we have to apply the mass matrix on u0 only on the finest level\n if L.level_index == 0:\n u0 = P.apply_mass_matrix(L.u[0])\n else:\n u0 = L.u[0]\n\n for m in range(M):\n # subtract QIFI(u^k)_m + QEFE(u^k)_m\n for j in range(M + 1):\n integral[m] -= L.dt * (self.QI[m + 1, j] * L.f[j].impl + self.QE[m + 1, j] * L.f[j].expl)\n # add initial value\n integral[m] += u0\n # add tau if associated\n if L.tau[m] is not None:\n integral[m] += L.tau[m]\n\n # do the sweep\n for m in range(0, M):\n # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)\n rhs = P.dtype_u(integral[m])\n for j in range(m + 1):\n rhs += L.dt * (self.QI[m + 1, j] * L.f[j].impl + self.QE[m + 1, j] * L.f[j].expl)\n\n # implicit solve with prefactor stemming from QI\n L.u[m + 1] = P.solve_system(\n rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]\n )\n # update function values\n L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])\n\n # indicate presence of new values at this level\n L.status.updated = True\n\n return None", "def sweep(self,color) :\n\n tmp = int(4*self.param.n_cells)\n self.scattering_src = self.scattering_src.reshape(self.param.n_mom,tmp)\n\n flux_moments = np.zeros((4*self.param.n_mom*self.param.n_cells))\n psi_ij = np.zeros((self.quad.n_dir,4*self.param.n_cells))\n \n if color=='red' :\n i_begin = 0\n i_end = self.param.n_x-1\n j_begin = 0\n j_end = self.param.n_y-1\n elif color=='black' :\n i_begin = 1\n i_end = self.param.n_x-2\n j_begin = 0\n j_end = self.param.n_y-1\n elif color=='orange' :\n i_begin = 0\n i_end = self.param.n_x-1\n j_begin = 1\n j_end = self.param.n_y-2\n else :\n i_begin = 1\n i_end = self.param.n_x-2\n j_begin = 1\n j_end = self.param.n_y-2\n \n for i in xrange(i_begin,i_end,2) :\n for j in xrange(j_begin,j_end,2) :\n for idir in xrange(0,self.quad.n_dir) :\n psi = np.zeros((4*self.param.n_cells))\n\n# Direction alias\n omega_x = self.quad.omega[idir,0]\n omega_y = self.quad.omega[idir,1]\n\n# Upwind/downwind indices \n if omega_x>0.0 :\n sx = 0\n x_begin = i\n x_end = i+2\n x_incr = 1\n else :\n sx = 1\n x_begin = i+1\n x_end = i-1\n x_incr = -1\n if omega_y>0.0 :\n sy = 0\n y_begin = j\n y_end = j+2\n y_incr = 1\n else :\n sy = 1\n y_begin = j+1\n y_end = j-1\n y_incr = -1\n\n# Compute the gradient\n gradient = omega_x*(-self.fe.x_grad_matrix+self.x_down[sx,:,:])+\\\n omega_y*(-self.fe.y_grad_matrix+self.y_down[sy,:,:])\n\n for m in xrange(x_begin,x_end,x_incr) :\n for n in xrange(y_begin,y_end,y_incr) : \n i_mat = self.param.mat_id[i,j]\n sig_t = self.param.sig_t[i_mat]\n\n# Volumetric term of the rhs\n i_src = self.param.src_id[m,n]\n rhs = self.param.src[i_src]*self.fe.width_cell[0]*\\\n self.fe.width_cell[1]*np.ones((4))/4.\n\n# Get location in the matrix\n ii = t_s.mapping(m,n,self.param.n_x)\n\n# Add scattering source contribution \n scat_src = np.dot(self.quad.M[idir,:],self.scattering_src[:,ii])\n rhs += scat_src\n\n# Block diagonal term\n L = gradient+sig_t*self.fe.mass_matrix\n\n# Upwind term in x\n if m>0 and sx==0 :\n jj = t_s.mapping(m-1,n,self.param.n_x)\n if m==x_begin :\n rhs -= omega_x*np.dot(self.x_up[sx,:,:],\\\n self.all_psi[idir][jj])\n else :\n rhs -= omega_x*np.dot(self.x_up[sx,:,:],psi[jj])\n elif m==0 and idir in self.most_n['left'] :\n rhs -= omega_x*np.dot(self.x_up[sx,:,:],\\\n self.param.inc_left[n]*np.ones((4)))\n if m<self.param.n_x-1 and sx==1 :\n jj = t_s.mapping(m+1,n,self.param.n_x)\n if m==x_begin :\n rhs -=omega_x*np.dot(self.x_up[sx,:,:],\\\n self.all_psi[idir][jj])\n else :\n rhs -= omega_x*np.dot(self.x_up[sx,:,:],psi[jj])\n elif m==self.param.n_x-1 and idir in self.most_n['right'] :\n rhs -= omega_x*np.dot(self.x_up[sx,:,:],\\\n self.param.inc_right[j]*np.ones((4)))\n\n# Upwind term in y\n if n>0 and sy==0 :\n jj = t_s.mapping(m,n-1,self.param.n_x)\n if n==y_begin :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],\\\n self.all_psi[idir][jj])\n else :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],psi[jj])\n elif n==0 and idir in self.most_n['bottom'] :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],\\\n self.param.inc_bottom[i]*np.ones((4)))\n if n<self.param.n_y-1 and sy==1 :\n jj = t_s.mapping(m,n+1,self.param.n_x)\n if n==y_begin :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],psi[jj])\n else :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],psi[jj])\n elif n==self.param.n_y-1 and idir in self.most_n['top'] :\n rhs -= omega_y*np.dot(self.y_up[sy,:,:],\\\n self.param.inc_top[i]*np.ones((4)))\n\n psi[ii] = scipy.linalg.solve(L,rhs,sym_pos=False,lower=False,\n overwrite_a=True,overwrite_b=True)\n\n ratio = .25\n if i==0 or i==self.param.n_x-2 :\n ratio *= 2.\n if j==0 or j==self.param.n_y-2 :\n ratio *= 2.\n \n psi_ij[idir][ii] += ratio*psi[ii]\n\n# Update the flux_moments\n for k in xrange(0,self.param.n_mom) :\n k_begin = k*4*self.param.n_cells\n k_end = (k+1)*4*self.param.n_cells\n for idir in xrange(0,self.quad.n_dir) :\n flux_moments[k_begin:k_end] += self.quad.D[k,idir]*psi_ij[idir,:]\n \n# Update the angular flux\n self.update_angular_flux(psi_ij)\n\n return flux_moments", "def updating_ghost_cells(self,uu):\n\n\n for i in range(1, self.Nx +1):\n uu[i,0] = uu[i,2]\n uu[i,self.Ny+1] = uu[i,self.Ny-1]\n\n\n for j in range(1, self.Ny +1):\n uu[0,j] = uu[2,j]\n uu[self.Nx+1,j] = uu[self.Nx-1,j]\n \"\"\"\n\n uu[1:-1,0] = uu[1:-1,2]\n uu[1:-1,self.Ny+1] = uu[1:-1,self.Ny-1]\n uu[0,1:-1] = uu[2,1:-1]\n uu[self.Nx+1,1:-1] = uu[self.Nx-1,1:-1]\n \"\"\"", "def copy_values_to_grid(self):\n self.grid.at_node[\"flow__depth\"] = self.h\n self.grid.at_link[\"flow__horizontal_velocity\"] = self.u\n self.grid.at_link[\"flow__vertical_velocity\"] = self.v\n self.C_i[:, :] = self.C_init\n all_wet_nodes = np.where(self.h > self.h_w)\n self.C_i[:, all_wet_nodes] = self.Ch_i[:,\n all_wet_nodes] / self.h[all_wet_nodes]\n for i in range(self.number_gclass):\n self.grid.at_node[\"flow__sediment_concentration_\" +\n str(i)] = self.C_i[i, :]\n self.grid.at_node[\n \"flow_sediment_volume__horizontal_gradient_\" + str(i)\n ] = self.dChdx_i[i, :]\n self.grid.at_node[\n \"flow_sediment_volume__vertical_gradient_\" + str(i)\n ] = self.dChdy_i[i, :]\n self.grid.at_node[\n \"bed__sediment_volume_per_unit_area_\" + str(i)\n ] = self.bed_thick_i[i, :]\n self.grid.at_node[\n \"bed__active_layer_fraction_\" + str(i)\n ] = self.bed_active_layer[i, :]\n self.C[:] = np.sum(self.C_i, axis=0)\n self.grid.at_node[\"flow__sediment_concentration_total\"] = self.C\n self.grid.at_node[\"topographic__elevation\"] = self.eta\n self.grid.at_node[\"bed__thickness\"] = self.bed_thick\n self.grid.at_node[\"flow__surface_elevation\"] = self.eta + self.h\n self.grid.at_node[\"flow__horizontal_velocity_at_node\"] = self.u_node\n self.grid.at_node[\"flow__vertical_velocity_at_node\"] = self.v_node\n self.grid.at_link[\"flow_horizontal_velocity__horizontal_gradient\"] = self.dudx\n self.grid.at_link[\"flow_horizontal_velocity__vertical_gradient\"] = self.dudy\n self.grid.at_link[\"flow_vertical_velocity__horizontal_gradient\"] = self.dvdx\n self.grid.at_link[\"flow_vertical_velocity__vertical_gradient\"] = self.dvdy\n self.grid.at_node[\"flow_depth__horizontal_gradient\"] = self.dhdx\n self.grid.at_node[\"flow_depth__vertical_gradient\"] = self.dhdy\n if self.model == \"4eq\":\n self.grid.at_link[\"flow__TKE\"] = self.Kh\n self.grid.at_link[\"flow_TKE__horizontal_gradient\"] = self.dKhdx\n self.grid.at_link[\"flow_TKE__vertical_gradient\"] = self.dKhdy", "def update_sweep_points(self):\n\n # update self.sweep_points\n swpts = deepcopy(self.sweep_points)\n par_names = []\n vals = []\n for par in swpts.get_parameters(0):\n values = swpts[par]\n if np.unique(values[:3]).size > 1:\n # the sweep points are not repeated 3 times (for each\n # pair of base_ops)\n par_names += [par]\n vals += [np.repeat(values, 3)]\n self.sweep_points.update_property(par_names, values=vals)\n\n # update sweep points in preprocessed_task_list\n for task in self.preprocessed_task_list:\n swpts = task['sweep_points']\n for par in swpts.get_parameters(0):\n values = swpts[par]\n if np.unique(values[:3]).size > 1:\n swpts.update_property([par], values=[np.repeat(values, 3)])", "def update_step():\n # for all states\n for x in range(0, mapper.MAX_CELLS_X):\n for y in range(0, mapper.MAX_CELLS_Y):\n for a in range(0, mapper.MAX_CELLS_A):\n if loc.bel_bar[x, y, a]>0.0001:\n loc.bel[x, y, a] = np.prod(loc.gaussian(loc.obs_range_data, mapper.obs_views[x, y, a, :], loc.sensor_sigma)) * loc.bel_bar[x, y, a]\n loc.bel = loc.bel / np.sum(loc.bel) # normalize belief grid", "def update_sweep_points(self):\n for task in self.preprocessed_task_list:\n swpts = task['sweep_points']\n if swpts.find_parameter('dc_voltage_offsets') is not None:\n if swpts.find_parameter('dc_voltages') is not None:\n # Do not overwrite the values provided by the user\n log.warning(f'Both \"dc_voltages\" and \"dc_voltage_offsets\" '\n f'were provided for {task[\"qb\"]}. The latter '\n f'will be ignored.')\n continue\n\n fluxline = task['fluxline']\n values_to_set = np.array(swpts.get_sweep_params_property(\n 'values',\n dimension=swpts.find_parameter('dc_voltage_offsets'),\n param_names='dc_voltage_offsets')) + fluxline()\n # update sweep points\n par_name = f'{task[\"prefix\"]}dc_voltages'\n self.sweep_points.add_sweep_parameter(par_name, values_to_set,\n 'V', 'DC voltage', 1)", "def update_memvecs(self):\n d = self.get_all_leaves_with_path(self.level0)\n for node in d:\n node.set_memvec(d[node])", "def gather_constant_folding(g, node):\n node_to_del = []\n\n pre_data_node = helper.find_node_by_output_name(g, node.input[0])\n pre_indices_node = helper.find_node_by_output_name(g, node.input[1])\n\n shape, data = helper.constant_to_list(pre_data_node)\n indice_shape, indices = helper.constant_to_list(pre_indices_node)\n if type(indice_shape) == int:\n indices = indices[0]\n\n np_data = np.reshape(data, shape)\n if len(node.attribute) < 1:\n axis = 0\n else:\n axis = node.attribute[0].i\n\n new_data = np.take(np_data, indices, axis=axis)\n new_shape = new_data.shape\n new_node = helper.list_to_constant(\n node.output[0],\n new_shape,\n new_data.flatten().tolist(),\n data_type=pre_data_node.attribute[0].t.data_type,\n )\n\n node_to_del.extend([node, pre_data_node, pre_indices_node])\n g.node.extend([new_node])\n\n val_info_1 = helper.find_value_by_name(g, node.input[0])\n val_info_2 = helper.find_value_by_name(g, node.input[1])\n val_info_3 = helper.find_value_by_name(g, node.output[0])\n new_val_info = onnx.helper.make_tensor_value_info(\n new_node.output[0], pre_data_node.attribute[0].t.data_type, new_shape\n )\n\n if val_info_1 is not None:\n g.value_info.remove(val_info_1)\n if val_info_2 is not None:\n g.value_info.remove(val_info_2)\n if val_info_3 is not None:\n g.value_info.remove(val_info_3)\n g.value_info.extend([new_val_info])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n return True", "def update_model(self):\n for j, agent_pos in enumerate(self.aPos):\n agent_index = j + 1\n var_key = new_var('x', self.time, agent_index)\n factor = t.tensor(self.N * [self.MIN], dtype=self.dtype)\n factor[self.get_index(agent_pos)] = self.NEU\n self.mrf.set_unary_factor(var_key, factor)\n self.mrf.create_matrices() # IMPORTANT", "def apply_bc(self):\n nsize = len(self._nodes)\n ncount = 0\n for node in self._nodes:\n for dof in range(3):\n i = nsize*dof + ncount\n if not node._fixed[dof]:\n # not fixed: apply load to right hand side vector\n self._R[i] = node._r[dof]\n else:\n # is fixed: apply displacement and set corresponding equations to identity\n self._R[i] = node._u[dof]\n self._K[i].fill(0)\n self._K[i,i] = 1\n # TODO: apply suture constraints\n ncount = ncount + 1", "def update_cluster(self, cluster_id, values):", "def update_euclidean(self, U, V):\n X = self.X\n W = self.W\n lmbda = self.lmbda\n L = self.L\n D = self.D\n # update V\n V = V * np.divide(U.T @ X + lmbda * (V @ W), U.T @ U @ V + lmbda * (V @ D))\n # update U\n U = U * np.divide(X @ V.T, U @ V @ V.T)\n # calc objective func\n R = X - (U @ V)\n obj_val = np.sum(R * R) + lmbda * np.trace(V @ L @ V.T)\n return(U, V, obj_val)", "def update(self, *args):\n return _vnl_vectorPython.vnl_vectorF_update(self, *args)", "def set_and_calculate(self, new_values):\n self.iteration += 1\n t = 0\n for x in range(0, len(self.nodes)):\n if not np.any(self.nodes[x].optimize):\n continue\n for val in range(0, len(self.nodes[x].optimize)):\n if self.nodes[x].optimize[val] != 0:\n self.nodes[x].pos[val] = new_values[t]\n t += 1\n self.set_beams()\n self.get_weight()\n if self.inter_plot:\n try:\n self.plot_construction()\n except:\n print(\"\\nWarning plot failed \\n\")\n return self.weight", "def cgm_update(self, eta, u, v, alpha):\n self.Y = (1 - eta) * self.Y + eta * u.dot(v.dot(self.Omega))\n self.W = (1 - eta) * self.W + eta * (self.Psi.dot(u)).dot(v)", "def sgd_update(trainables, learning_rate=1e-2):\n for node in trainables:\n node.value -= learning_rate * node.gradients[node]", "def grad_free_energy_at_cells(self, uh, c_bcs):\n\n uh_val = self.space.value(uh, c_bcs) # (NQ,NC)\n guh_val = self.space.grad_value(uh, c_bcs) # (NQ,NC,2)\n\n guh_val[..., 0] = 3 * uh_val ** 2 * guh_val[..., 0] - guh_val[..., 0]\n guh_val[..., 1] = 3 * uh_val ** 2 * guh_val[..., 1] - guh_val[..., 1]\n return guh_val # (NQ,NC,2)", "def test_update_C(p1,p2,p3):\n na = (0,0) # placeholder point, not used\n update_C(np.array([[p1,na],[p2,na],[p3,na]]), verbose=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
3. Set Dispersion Size
def measureMomentumSpreadSetDispSize(self): '''Fix Dispersion section needs work!''' print 'does nothing'
[ "def setSize_0(self, size):", "def resize(self, size):\r\n self.instance.resize_volume(size)\r\n self.size = size", "def setSize(self, width, height):", "def _set_resolution(self, size: (float, float)) -> None:\n self._stream.set(CAP_PROP_FRAME_WIDTH, size[0])\n self._stream.set(CAP_PROP_FRAME_HEIGHT, size[1])", "def set_size(self, size):\n self.height = int(size)\n self.width = int(size) * 2", "def updateCatSize(self): \n self.size=(Window.size[0]*1/4,Window.size[1]*2/3)", "def setFilmSize(self, size):\n self.filmSize = size", "def set_width(self, width: float):", "def test_set_size(self):\n self.label.set_size(.75, .8)\n self.assertEqual(self.label.get_size(), (1024, 614))\n self.assertTrue(self.label.width > 0.75 * 0.99)\n self.assertTrue(self.label.width < 0.75 * 1.01)", "def set_size(self,new_width,new_height):\r\n self.SW = (SW[0]-new_width/2,SW[1]-new_height/2)\r\n self.NE = (NE[0]+new_width/2,NE[1]+new_height/2)", "def resize_vdi(self, name, size):\n wrap_popen('collie', 'vdi', 'resize', name, size)", "def _set_pixel_size(self) -> None:\n raise NotImplementedError", "def _set_pixel_size(self) -> None:\n # Using az resolution\n if self.sensor_mode == CapellaSensorMode.SP:\n def_pixel_size = 0.35\n def_res = 0.5\n elif self.sensor_mode == CapellaSensorMode.SM:\n def_pixel_size = 0.6\n def_res = 1.0\n elif self.sensor_mode == CapellaSensorMode.SS:\n def_pixel_size = 0.8\n def_res = 1.2\n else:\n raise InvalidProductError(f\"Unknown sensor mode: {self.sensor_mode}\")\n self.pixel_size = def_pixel_size\n self.resolution = def_res", "def setImagesize(self, W, H) -> None:\n ...", "def _set_size_labels(self):\n\t\t# TODO contrast this size with the size of the saved file\n\t\tpx_width = self._image.get_pixbuf_width()\n\t\tpx_height = self._image.get_pixbuf_height()\n\t\tif self.unit == 'in':\n\t\t\twidth = round(px_width/96.0, 2)\n\t\t\theight = round(px_height/96.0, 2)\n\t\telif self.unit == 'cm':\n\t\t\twidth = round(px_width/37.795275591, 2)\n\t\t\theight = round(px_height/37.795275591, 2)\n\t\telse:\n\t\t\twidth = px_width\n\t\t\theight = px_height\n\t\tself.label_width.set_label(self.units_dict[self.unit] % str(width))\n\t\tself.label_height.set_label(self.units_dict[self.unit] % str(height))", "def setSize(self, newsize: 'SbVec2s') -> \"void\":\n return _coin.SoRenderManager_setSize(self, newsize)", "def onSetVoxelSize(self, obj, evt, arg):\n\t\tself.voxelSize = arg", "def setRenderWindowSize(self, size):\t\t \n\t\tx, y = size\n\t\tif self.visualizer:\n\t\t\tself.visualizer.setRenderWindowSize((x, y))", "def updateSize(self):\n evalDeferred(self.updateSizeDeferred)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
5. Calculate Momentum Spread
def measureMomentumSpreadCalc(self): #if self.view.checkBox_4_s.isChecked()==True: self.pSpread = self.func.calcMomSpread(self.Cmagnets,'DIP01',self.Is,self.I)
[ "def measureMomentumSpreadCalc(self):\n #if self.view.checkBox_4_s.isChecked()==True:\n self.beamSigma = self.cam.getSigX(self.C2Vcam)\n self.Is = self.beamSigma/self.Dispersion\n self.pSpread = self.func.calcMomSpread(self.Cmagnets,self.dipole,self.Is,self.data.values['I_rough'])\n print str(datetime.datetime.now())[:-7], ', Momentum spread =', self.pSpread, 'MeV/c, =>', 100*self.pSpread/self.data.values['I_rough'], '%'\n\n #a = self.func.calcMomSpread(self.Cmagnets,'DIP01',self.Is,self.I)\n #print a\n #else:\n # print 'Not confirmed momentum measurement'", "def momentum(self, n_days):\n \n print(\"Computing momentum...\")\n momentum_list = []\n for idx, row in self.data.iterrows():\n current_idx = int(row['idx'])\n\n if current_idx <= len(self.data['close']) - n_days:\n \n # Formula C_t - C_t-n\n c_t = self.data['close'][current_idx]\n c_t_n_days = self.data['close'][n_days+current_idx-1]\n momentum_list.append(c_t-c_t_n_days)\n \n else:\n # If the index is out of bound. for example if n_days is 10 and we are at index < 10 we won't be able to compute the momentum \n momentum_list.append(0)\n \n \n self.data['momentum'.format(n_days)] = momentum_list \n print(\"Done !\")", "def angular_momentum(snap: SnapLike) -> ndarray:\n mass: ndarray = snap['mass']\n pos: ndarray = snap['position']\n vel: ndarray = snap['velocity']\n return (mass[:, np.newaxis] * np.cross(pos, vel)).sum(axis=0)", "def mpd(data):\n months_length = [\n 31.0,\n 28.0,\n 31.0,\n 30.0,\n 31.0,\n 30.0,\n 31.0,\n 31.0,\n 30.0,\n 31.0,\n 30.0,\n 31.0,\n ]\n mjjas = compute_season(data, [4, 5, 6, 7, 8], months_length)\n ndjfm = compute_season(data, [10, 11, 0, 1, 2], months_length)\n ann = compute_season(data, list(range(12)), months_length)\n\n annrange = MV2.subtract(mjjas, ndjfm)\n\n lat = annrange.getAxis(0)\n i, e = lat.mapInterval((-91, 0, \"con\"))\n if i > e: # reveresedlats\n tmp = i + 1\n i = e + 1\n e = tmp\n\n annrange[slice(i, e)] = -annrange[slice(i, e)]\n annrange.id = data.id + \"_ar\"\n annrange.longname = \"annual range\"\n\n mpi = MV2.divide(annrange, ann)\n mpi.id = data.id + \"_int\"\n mpi.longname = \"intensity\"\n\n return annrange, mpi", "def frame_moment ( frame , order , expression , cuts = '' ) :\n node = as_rnode ( frame )\n return SV.data_moment ( node , order = order , expression = expression , cuts = cuts )", "def FindBunchMeanMomentum(self):\r\n totalMomentum = np.array([0.0, 0.0, 0.0], dtype=float) # dummy variable\r\n\r\n for i in range(len(self.listOfParticles)):\r\n totalMomentum += self.listOfParticles[i].Momentum()\r\n \r\n return (totalMomentum / float(len(self.listOfParticles)))", "def som_eom(self) -> None:\n self.data['dt'] = pd.to_datetime(self.data.index,\n format='%Y-%m-%d')\n self.data['month'] = self.data['dt'].dt.month.diff()\n self.data['month'].fillna(0.0, inplace=True)\n\n self.data['week'] = self.data['dt'].dt.isocalendar().week.diff()\n self.data['week'].fillna(0.0, inplace=True)\n\n # Start of month.\n self.data['is_som'] = self.data['month'].apply(lambda x: 1 if x != 0.0 else 0)\n\n # End of month.\n self.data['is_eom'] = self.data['is_som'].shift(-1)\n self.data['is_eom'].fillna(0.0, inplace=True)\n self.data['is_eom'] = self.data['is_eom'].astype(int)\n\n # Start of week.\n self.data['is_sow'] = self.data['week'].apply(lambda x: 1 if x != 0.0 else 0)\n\n # End of week.\n self.data['is_eow'] = self.data['is_sow'].shift(-1)\n self.data['is_eow'].fillna(0.0, inplace=True)\n self.data['is_eow'] = self.data['is_eow'].astype(int)\n\n # Drop temp columns.\n self.data.drop(labels='month',\n axis='columns',\n inplace=True)\n self.data.drop(labels='week',\n axis='columns',\n inplace=True)\n self.data.drop(labels='dt',\n axis='columns',\n inplace=True)", "def guess_angular_momentum_ratios(obs):\n\n from scipy.interpolate import UnivariateSpline\n\n proper_motion = guess_proper_motion(obs)\n tb1 = {field:obs[field][2:-2] for field in obs}\n for comp in ['alpha', 'beta']:\n spl = UnivariateSpline(obs['t'], obs[comp], k=5)\n spl.set_smoothing_factor(0)\n der = spl.derivative()\n tb1['dot '+comp] = der(tb1['t'])\n aux = numpy.vstack((\n numpy.ones_like(tb1['beta']),\n -(tb1['dot alpha']-proper_motion['dot alpha 0']),\n -(tb1['dot beta']-proper_motion['dot beta 0']))).T\n vec = numpy.einsum('n,ni', tb1['vz'], aux)\n mat = numpy.einsum('ni,nj', aux, aux)\n res = numpy.linalg.solve(mat, vec)\n return {'w 0':res[0], 'd*lx/lz': res[1], 'd*ly/lz':res[2]}", "def get_nth_moment (result_times, n):\n if len(result_times) == 0:\n print(\"WARNING: get_nth_moment called with empty result_times list\")\n return float('NaN') # This should not happen\n powers = map(lambda x: x**n, result_times)\n return sum(powers)/float(len(result_times))", "def momentum_resolution(p) :\n return 0.005", "def gen_AngularMomentum (s):\n \n Sz = np.diag(np.arange(-s, s+1))\n eigenvalues = np.arange(-s, s+1)\n\n d = int(2*s) + 1\n I = np.identity(d)\n\n Splus = np.zeros((d, d))\n Sminus = np.zeros((d, d))\n\n for m in range(d - 1):\n splusfactor = sqrt(s*(s + 1) - eigenvalues[m]*(eigenvalues[m] + 1))\n sminusfactor = sqrt(s*(s + 1) - eigenvalues[m+1]*(eigenvalues[m+1] - 1))\n Splus = Splus + splusfactor * np.outer(I[m, :], I[m+1,:])\n Sminus = Sminus + sminusfactor * np.outer(I[m+1, :], I[m, :])\n\n Sx = 1/2 * (Splus + Sminus)\n Sy = -1j/2 * (Splus - Sminus)\n\n return Sx, Sy, Sz", "def profile_spreads(self) -> (float, float):\n return self.moment(2, 0), self.moment(0, 2)", "def get_monthly_data(self, means = True):\n\n delta = self.time[1] - self.time[0]\n if delta == 1:\n # daily data\n day, mon, year = self.extract_day_month_year()\n monthly_data = []\n monthly_time = []\n # if first day of the data is not the first day of month - shift month\n # by one to start with the full month\n if day[0] != 1:\n mi = mon[0]+1 if mon[0] < 12 else 1\n y = year[0] if mon[0] < 12 else year[0] + 1\n else:\n mi = mon[0]\n y = year[0]\n start_idx = self.find_date_ndx(date(y, mi, 1))\n end_idx = self._shift_index_by_month(start_idx)\n while end_idx <= self.data.shape[0] and end_idx is not None:\n if means:\n monthly_data.append(np.nanmean(self.data[start_idx : end_idx, ...], axis = 0))\n else:\n monthly_data.append(np.nansum(self.data[start_idx : end_idx, ...], axis = 0))\n monthly_time.append(self.time[start_idx])\n start_idx = end_idx\n end_idx = self._shift_index_by_month(start_idx)\n if end_idx is None: # last piece, then exit the loop\n if means:\n monthly_data.append(np.nanmean(self.data[start_idx : , ...], axis = 0))\n else:\n monthly_data.append(np.nansum(self.data[start_idx : , ...], axis = 0))\n monthly_time.append(self.time[start_idx])\n self.data = np.array(monthly_data)\n self.time = np.array(monthly_time)\n elif abs(delta - 30) < 3.0:\n # monhtly data\n print('The data are already monthly values. Nothing happend.')\n else:\n raise Exception('Unknown temporal sampling in the field.')", "def second_deg_spreads(self) -> float:\n return self.moment(2, 2)", "def momentum_factor_backtest_monthly(price_data, rebal, lookback_period, n_securities, long_short, sample_start, sample_end):\n mom = price_data.pct_change(lookback_period).dropna()\n \n mom.columns= ['IG 1-3Y', 'IG 3-5Y', 'IG 7-10Y', 'US HY', 'Crossover', 'EM HY',\n '1-3Y UST', 'Intermediate UST', '7-10Y UST', 'Long Term UST']\n price_data.columns = ['IG 1-3YM', 'IG 3-5YM', 'IG 7-10YM', 'US HYM', 'CrossoverM', 'EM HYM',\n '1-3Y USTM', 'Intermediate USTM', '7-10Y USTM', 'Long Term USTM']\n\n data = mom.merge(price_data, on='DATE')\n \n #Convert Data Frequency to Rebalancing Frequency\n #data = data.asfreq(\"\"+str(rebalancing_period)+\"D\")\n if rebal =='Monthly':\n month1 = pd.Series(data.index.month)\n month2 = pd.Series(data.index.month).shift(-1)\n elif rebal =='Quarterly':\n month1 = pd.Series(data.index.quarter)\n month2 = pd.Series(data.index.quarter).shift(-1)\n mask = (month1 != month2)\n data = data[mask.values]\n data = data[sample_start:sample_end]\n data.dropna(inplace=True)\n \n rets = data['Crossover'].copy()*0\n rets.name = 'Momentum Strategy'\n \n if long_short == 'No':\n for i in range(len(data)-1):\n if n_securities == 1:\n rets[i+1] = data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1\n elif n_securities == 2:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i]-1)/2\n elif n_securities == 3:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'M'][i]-1)/3\n \n if long_short == 'Yes':\n for i in range(len(data)-1):\n if n_securities == 1:\n rets[i+1] = data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1 - data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i]-1\n elif n_securities == 2:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i]-1)/2 - (data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'M'][i]-1)/2\n elif n_securities == 3:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'M'][i]-1)/3 - (data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'M'][i]-1 + data[str(data.iloc[i,:10].sort_values()[2:].idxmin())+'M'][i+1]/data[str(data.iloc[i,:10].sort_values()[2:].idxmin())+'M'][i]-1)/3\n \n \n #Merge Value Factor Returns Data with original data and other individual securities returns \n data = data.merge(rets, on='DATE')\n data.columns = ['IG 1-3 Yield', 'IG 3-5 Yield', 'IG 7-10 Yield', 'US HY Yield',\n 'Crossover Yield', 'EM High Yield', 'UST 1-3 Yield', 'UST Int Yield',\n 'UST 7-10 Yield', 'UST Long Yield', 'IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY',\n 'Crossover', 'EM HY', 'UST 1-3', 'UST Int',\n 'UST 7-10', 'UST Long', 'Momentum Strategy']\n m_rets = data[['IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY', 'Crossover', 'EM HY', 'UST 1-3', 'UST Int', 'UST 7-10', 'UST Long']].pct_change().dropna().merge(rets, on='DATE')\n \n #Add Equally Weighted Portfolio Returns for comparison as well\n m_rets['EW'] = m_rets[['IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY', 'Crossover', 'EM HY', 'UST 1-3', 'UST Int', 'UST 7-10', 'UST Long']].mean(axis=1)\n \n return m_rets", "def calculate_monthly_ET0():\n def calc_ET0(RA, tmin, tmax, P, temperature_mult_factor):\n \"\"\"Modified Hargreaves from Droogers and Allen 2002.\n\n Parameters:\n RA (float): extraterrestrial radiation for this month\n tmin (numpy.ndarray): minimum temperature for the month\n IN DEGREES * 10\n tmax (numpy.ndarray): maximum temperature for the month\n IN DEGREES * 10\n P (numpy.ndarray): monthly precipitation, mm\n temperature_mult_factor (float): factor by which temperature inputs\n must be multiplied in order to get degrees C\n\n Returns:\n monthly reference evapotranspiration (mm)\n\n \"\"\"\n valid_mask = (\n (~numpy.isclose(tmin, tmin_nodata)) &\n (~numpy.isclose(tmax, tmax_nodata)) &\n (~numpy.isclose(P, precip_nodata)))\n tavg = numpy.empty(tmin.shape, dtype=numpy.float32)\n tavg[:] = precip_nodata\n tavg[valid_mask] = (\n (tmin[valid_mask] + tmax[valid_mask] / 2.) *\n temperature_mult_factor)\n\n tdiff = numpy.empty(tmin.shape, dtype=numpy.float32)\n tdiff[:] = precip_nodata\n tdiff[valid_mask] = (\n (tmax[valid_mask] - tmin[valid_mask]) * temperature_mult_factor)\n\n result = numpy.empty(tmin.shape, dtype=numpy.float32)\n result[:] = precip_nodata\n result[valid_mask] = (\n 0.0013 * 0.408 * RA * (tavg[valid_mask] + 17.) *\n (numpy.power((tdiff[valid_mask] - 0.0123 * P[valid_mask]), 0.76)) *\n 29.5)\n return result\n\n # monthly extraterrestrial radiation for 10 deg S latitude\n # from table 2.6, FAO guidance\n radiation_dict = {\n 1: 39.5,\n 2: 39.3,\n 3: 37.8,\n 4: 34.6,\n 5: 31.1,\n 6: 29.1,\n 7: 29.8,\n 8: 32.8,\n 9: 36.3,\n 10: 38.5,\n 11: 39.3,\n 12: 39.4,\n }\n\n intermediate_dir = tempfile.mkdtemp()\n outer_ET_dir = \"F:/Moore_Amazon_backups/ET0\"\n\n # current\n current_precip_dir = \"F:/Moore_Amazon_backups/precipitation/current\"\n current_tmin_dir = \"E:/GIS_local_archive/General_useful_data/Worldclim_2.1/tmin\"\n current_tmax_dir = \"E:/GIS_local_archive/General_useful_data/Worldclim_2.1/tmax\"\n precip_eg = os.path.join(current_precip_dir, 'wc2.1_5m_prec_1.tif')\n tmin_eg = os.path.join(current_tmin_dir, 'wc2.1_5m_tmin_01.tif')\n tmax_eg = os.path.join(current_tmax_dir, 'wc2.1_5m_tmax_01.tif')\n tmin_nodata = pygeoprocessing.get_raster_info(tmin_eg)['nodata'][0]\n tmax_nodata = pygeoprocessing.get_raster_info(tmax_eg)['nodata'][0]\n\n target_info = pygeoprocessing.get_raster_info(precip_eg)\n precip_nodata = target_info['nodata'][0]\n clipping_box = target_info['bounding_box']\n target_srs_wkt = target_info['projection_wkt']\n model_resolution = target_info['pixel_size'][0]\n temperature_mult_factor = 1. # current temperature in units of deg C\n for m in range(1, 13):\n out_path = os.path.join(\n outer_ET_dir, 'current', 'ET0_{}.tif'.format(m))\n if not os.path.isfile(out_path):\n raw_tmin_path = os.path.join(\n current_tmin_dir, 'wc2.1_5m_tmin_{:02}.tif'.format(m))\n tmin_path = os.path.join(intermediate_dir, 'tmin_proj.tif')\n clip_and_project_raster(\n raw_tmin_path, clipping_box, target_srs_wkt,\n model_resolution, intermediate_dir, '', tmin_path)\n\n raw_tmax_path = os.path.join(\n current_tmax_dir, 'wc2.1_5m_tmax_{:02}.tif'.format(m))\n tmax_path = os.path.join(intermediate_dir, 'tmax_proj.tif')\n clip_and_project_raster(\n raw_tmax_path, clipping_box, target_srs_wkt,\n model_resolution, intermediate_dir, '', tmax_path)\n\n precip_path = os.path.join(\n current_precip_dir, 'wc2.1_5m_prec_{}.tif'.format(m))\n # align tmin, tmax, precip\n base_raster_path_list = [tmin_path, tmax_path, precip_path]\n aligned_raster_path_list = [\n os.path.join(intermediate_dir, 'aligned_tmin.tif'),\n os.path.join(intermediate_dir, 'aligned_tmax.tif'),\n os.path.join(intermediate_dir, 'aligned_precip.tif')]\n pygeoprocessing.align_and_resize_raster_stack(\n base_raster_path_list, aligned_raster_path_list,\n ['near'] * len(base_raster_path_list),\n target_info['pixel_size'], 'intersection')\n\n radiation = radiation_dict[m]\n pygeoprocessing.raster_calculator(\n [(radiation, 'raw')] + [\n (path, 1) for path in aligned_raster_path_list] +\n [(temperature_mult_factor, 'raw')],\n calc_ET0, out_path, gdal.GDT_Float32, precip_nodata)\n\n # future\n precip_dir = \"F:/Moore_Amazon_backups/precipitation\"\n future_dir = 'E:/GIS_local_archive/General_useful_data/Worldclim_future_climate/cmip5/MIROC-ESM-CHEM'\n precip_eg = os.path.join(\n precip_dir, 'year_2050', 'rcp_2.6', \"mi26pr501.tif\")\n tmin_eg = \"E:/GIS_local_archive/General_useful_data/Worldclim_future_climate/cmip5/MIROC-ESM-CHEM/RCP2.6/2050/tmin/mi26tn505.tif\"\n tmax_eg = \"E:/GIS_local_archive/General_useful_data/Worldclim_future_climate/cmip5/MIROC-ESM-CHEM/RCP2.6/2050/tmax/mi26tx505.tif\"\n tmin_nodata = pygeoprocessing.get_raster_info(tmin_eg)['nodata'][0]\n tmax_nodata = pygeoprocessing.get_raster_info(tmax_eg)['nodata'][0]\n\n target_info = pygeoprocessing.get_raster_info(precip_eg)\n precip_nodata = target_info['nodata'][0]\n clipping_box = target_info['bounding_box']\n target_srs_wkt = target_info['projection_wkt']\n model_resolution = target_info['pixel_size'][0]\n temperature_mult_factor = 0.1 # future temperature in units of deg C * 10\n for year in ['50', '70']: # year after 2000\n for rcp in [2.6, 6.0, 8.5]: # RCP\n for m in range(1, 13):\n out_path = os.path.join(\n outer_ET_dir, 'year_20{}'.format(year),\n 'rcp_{}'.format(rcp), 'ET0_{}.tif'.format(m))\n if not os.path.isfile(out_path):\n # project and clip tmin and tmax to match precip\n raw_tmin_path = os.path.join(\n future_dir, \"RCP{}\".format(rcp), '20{}'.format(year),\n 'tmin', \"mi{}tn{}{}.tif\".format(\n int(rcp * 10), year, m))\n tmin_path = os.path.join(intermediate_dir, 'tmin_proj.tif')\n print(\"Processing raster {}\".format(tmin_path))\n clip_and_project_raster(\n raw_tmin_path, clipping_box, target_srs_wkt,\n model_resolution, intermediate_dir, '', tmin_path)\n\n raw_tmax_path = os.path.join(\n future_dir, \"RCP{}\".format(rcp), '20{}'.format(year),\n 'tmax', \"mi{}tx{}{}.tif\".format(\n int(rcp * 10), year, m))\n tmax_path = os.path.join(intermediate_dir, 'tmax_proj.tif')\n clip_and_project_raster(\n raw_tmax_path, clipping_box, target_srs_wkt,\n model_resolution, intermediate_dir, '', tmax_path)\n precip_path = os.path.join(\n precip_dir, 'year_20{}'.format(year),\n 'rcp_{}'.format(rcp),\n \"mi{}pr{}{}.tif\".format(int(rcp * 10), year, m))\n\n # align tmin, tmax, precip\n base_raster_path_list = [tmin_path, tmax_path, precip_path]\n aligned_raster_path_list = [\n os.path.join(intermediate_dir, 'aligned_tmin.tif'),\n os.path.join(intermediate_dir, 'aligned_tmax.tif'),\n os.path.join(intermediate_dir, 'aligned_precip.tif')]\n pygeoprocessing.align_and_resize_raster_stack(\n base_raster_path_list, aligned_raster_path_list,\n ['near'] * len(base_raster_path_list),\n target_info['pixel_size'], 'intersection')\n\n radiation = radiation_dict[m]\n pygeoprocessing.raster_calculator(\n [(radiation, 'raw')] + [\n (path, 1) for path in aligned_raster_path_list] +\n [(temperature_mult_factor, 'raw')],\n calc_ET0, out_path, gdal.GDT_Float32, precip_nodata)", "def linear_momentum(matchID, team, gd_vectors, window = 5, boolean = False):\n previous_results = get_window(matchID, team, gd_vectors, window, boolean)\n if not previous_results:\n return 0\n return sum(previous_results)", "def momentum(snap: SnapLike) -> ndarray:\n mass: ndarray = snap['mass']\n vel: ndarray = snap['velocity']\n return (mass[:, np.newaxis] * vel).sum(axis=0)", "def moment1_global(self) -> float:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the auth_ims_client_secret of this ComAdobeGraniteAuthImsImplIMSAccessTokenRequestCustomizerImplProperties.
def auth_ims_client_secret(self, auth_ims_client_secret): self._auth_ims_client_secret = auth_ims_client_secret
[ "def set_oauth_app_info(self, client_id, client_secret, redirect_uri):\n self.client_id = client_id\n self.client_secret = client_secret\n self.redirect_uri = redirect_uri", "def get_client_secret_authorizer(self):\n client = globus_sdk.ConfidentialAppAuthClient(self.CLIENT_ID, self.CLIENT_SECRET)\n token_response = client.oauth2_client_credentials_tokens()\n\n # the useful values that you want at the end of this\n globus_transfer_data = token_response.by_resource_server['transfer.api.globus.org']\n globus_transfer_token = globus_transfer_data['access_token']\n\n return globus_sdk.AccessTokenAuthorizer(globus_transfer_token)", "def extra_authorize_data(self) -> dict[str, Any]:\n return {\"client_secret\": self.client_secret}", "def set_AccessTokenSecret(self, value):\n super(UpdateAccountSettingsInputSet, self)._set_input('AccessTokenSecret', value)", "def get_access_token_secret(self, client_key, token, request):\r\n log.debug('Get access token secret of %r for %r',\r\n token, client_key)\r\n tok = request.access_token or self._tokengetter(\r\n client_key=client_key,\r\n token=token,\r\n )\r\n if tok:\r\n request.access_token = tok\r\n return tok.secret\r\n return None", "def __create_access_token(self):\n authHeader = {}\n authData = {}\n # Encoding clientID and clientSecret in base64\n message = f\"{self.clientID}:{self.clientSecret}\"\n message_bytes = message.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n base64_message = base64_bytes.decode('ascii')\n\n authHeader['Authorization'] = \"Basic \" + base64_message\n authData['grant_type'] = 'client_credentials'\n response = requests.post(self.url.authUrl, headers=authHeader, data=authData)\n # request returns json\n responseObject = response.json()\n self.access_token = responseObject['access_token']", "def get_access_token(self, oauth_verifier):\n access_pair = super(SGAuthService, self).get_access_token(\n self._request_token,\n self._request_token_secret,\n data={'oauth_verifier': oauth_verifier}\n )\n self.access_token, self.access_token_secret = access_pair\n return access_pair", "def __init__(self, client_id, client_secret, access_token=None,\n refresh_token=None, expires_at=None, refresh_cb=None,\n redirect_uri=None, **kwargs):\n self.client = FitBarkOauth2Client(\n client_id,\n client_secret,\n access_token=access_token,\n refresh_token=refresh_token,\n expires_at=expires_at,\n refresh_cb=refresh_cb,\n redirect_uri=redirect_uri,\n **kwargs\n )", "def set_AWSAccessKeyId(self, value):\n super(UploadServerCertificateInputSet, self)._set_input('AWSAccessKeyId', value)", "def get_access_token(self):\n url = self.url + \"/tokens\"\n if not self.api_key or not self.client_id or not self.client_secret:\n raise DAAuthException(\n \"API_KEY, CLIENT_ID and CLIENT_SECRET are required to generate an access token\"\n )\n self.session.headers[\"Authorization\"] = \"Basic {}\".format(\n base64.b64encode(\n \":\".join([self.client_id, self.client_secret]).encode()\n ).decode()\n )\n self.session.headers[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\n payload = {\"grant_type\": \"client_credentials\"}\n response = self.session.post(url, params=payload)\n self.logger.debug(\"Token response: \" + json.dumps(response.json(), indent=2))\n self.access_token = response.json()[\"access_token\"]\n self.session.headers[\"Authorization\"] = \"bearer {}\".format(self.access_token)\n return response.json()", "def _configure_access_token(client, access_token, user):\n # pylint: disable=protected-access\n\n # if we have a valid access token, use it\n # otherwise force a fetch for a new one and persist it\n authorizer = client._core._authorizer\n\n if access_token:\n # \"hydrate\" the authorizer from our stored access token\n authorizer.access_token = access_token.token_value\n authorizer._expiration_timestamp = access_token.token_expires_at.timestamp()\n authorizer.scopes = set([FULL_ACCESS_SCOPE])\n else:\n authorizer = client._core._authorizer\n authorizer.refresh()\n expires_at = datetime.fromtimestamp(authorizer._expiration_timestamp)\n RedditAccessToken.objects.create(\n user=user,\n token_value=authorizer.access_token,\n token_expires_at=expires_at.replace(tzinfo=pytz.utc),\n )\n\n return client", "def set_access_token(self, access_token: str):\n self.headers = {\n 'Authorization': f'{access_token}'\n }", "def get_client_credentials(self):\n if self.client_id == None or self.client_secret == None:\n raise Exception(\"You must set client_id and client_secret.\")\n else: \n client_creds = f\"{self.client_id}:{self.client_secret}\"\n client_creds_b64 = base64.b64encode(client_creds.encode())\n return client_creds_b64.decode()", "def update_body(self, body):\n body[\"data\"][\"AUTHENTICATOR\"] = OAUTH_AUTHENTICATOR\n body[\"data\"][\"TOKEN\"] = self._oauth_token", "def _get_access_token_claims(self, user, **options):\n\n return {}", "def set_access_token(access_token):\n global _access_token\n _access_token = access_token", "def set_client_credentials(client_id=None,\n client_secret=None,\n redirect_uri=None,\n scope=None):\n sessionenv.set('spotify_credentials',\n {'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': redirect_uri,\n 'scope': scope})", "def __init__(\n self,\n # rest needed to be able to refresh access_token\n client_id=None,\n client_secret=None,\n expiration_buffer=EXPIRATION_BUFFER_DEFAULT,\n ):\n self.client_id = client_id\n self.client_secret = client_secret\n self.expiration_buffer = expiration_buffer\n\n # initialize oauth2 token attributes.\n # use self.update_token(...) to set those attributes\n self.token_type = \"Bearer\"\n self.access_token = self.id_token = self.refresh_token = self.scope = None\n self.expires_at = self.expires_in = None", "def set_refresh_token(self, refresh_token: str, gcid: Optional[str] = None) -> None:\n self.config.authentication.refresh_token = refresh_token\n self.config.authentication.gcid = gcid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the customizer_type of this ComAdobeGraniteAuthImsImplIMSAccessTokenRequestCustomizerImplProperties.
def customizer_type(self, customizer_type): self._customizer_type = customizer_type
[ "def setCCM_TYPE(self, ccm_type) -> None:\n ...", "def client_type(self, client_type):\n \n self._client_type = client_type", "def set_image_type(self, content_type):\n content_types = RedditWallpaperChooser.constants.ACCEPTED_CONTENT_TYPES\n if content_type not in content_types:\n logger.warning(\n \"Unknown content type %s. Falling back to JPG.\",\n content_type\n )\n\n self.image_type = content_types.get(content_type, \"jpg\")", "def set_ContentType(self, value):\n super(UploadSessionFinishInputSet, self)._set_input('ContentType', value)", "def set_content_type( self, type ):\n self.headers[ \"content-type\" ] = type", "def set_override_type(self, override_type):\n if not isinstance(override_type, str) and override_type in [\"SHA256\", \"CERT\", \"IT_TOOL\"]:\n raise ApiError(\"Invalid override_type must be one of SHA256, CERT, IT_TOOL\")\n self._criteria[\"override_type\"] = override_type\n return self", "def set_type(self, ttype):\n self.type = ttype\n self.token.type = ttype", "def save_access_token(self, token, request):\r\n log.debug('Save access token %r', token)\r\n self._tokensetter(token, request)", "def set_access_token(self, access_token: str):\n self.headers = {\n 'Authorization': f'{access_token}'\n }", "def consent_token(self, consent_token):\n\n self._consent_token = consent_token", "def update_body(self, body):\n body[\"data\"][\"AUTHENTICATOR\"] = OAUTH_AUTHENTICATOR\n body[\"data\"][\"TOKEN\"] = self._oauth_token", "def set_AccessToken(self, value):\n super(UploadSessionFinishInputSet, self)._set_input('AccessToken', value)", "def optimization_type(self, optimization_type):\n\n self._optimization_type = optimization_type", "def _set_cameraType(self, *args) -> \"bool\" :\n return _core.Camera__set_cameraType(self, *args)", "def _get_access_token_claims(self, user, **options):\n\n return {}", "def zmi_update_resource_types(self, type_info=None, preview_action=None, default_resource=None, REQUEST=None):\n\n if type_info:\n self.updateResourceTypes(type_info)\n\n if preview_action:\n self.updatePreviewActions(preview_action)\n\n if default_resource is not None:\n self.default_resource = default_resource\n\n if REQUEST:\n REQUEST.RESPONSE.redirect(self.absolute_url() + '/zmi_resource_types')", "def _add_access_token_to_response(self, response, access_token):\n # type: (oic.message.AccessTokenResponse, se_leg_op.access_token.AccessToken) -> None\n response['access_token'] = access_token.value\n response['token_type'] = access_token.type\n response['expires_in'] = access_token.expires_in", "def set_actuator_type(self, context, type):\n editor = self._parent\n obj = editor.getSelected()[0]\n fsm, sensor = self._get_fsm_sensor()\n actuator = sensor.actuators[fsm.selected_actuator]\n actuator.type = type\n actuator.name = type\n self._initialize_actuator(obj, actuator)", "def patch_o_auth_access_token(self, name, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.patch_o_auth_access_token_with_http_info(name, body, **kwargs)\n else:\n (data) = self.patch_o_auth_access_token_with_http_info(name, body, **kwargs)\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update the external ip informations for the service
def _process_external_ip(self): self.infos.external_ip = self._find_external_ip() self._log_information(key='External IP', value=self.infos.external_ip, ljust=18)
[ "def test_ip_addresses_update(self):\n pass", "def test_ip_addresses_partial_update(self):\n pass", "def set_apiip_to_ext(self):\n ip = self.s.get(GET_IP_URL).text.strip()\n # TODO test if ipv4 returned\n return self.set_apiip(ip)", "def patch(self, request):\n address = request.DATA['address_id']\n port = request.DATA.get('port_id')\n if port is None:\n api.neutron.floating_ip_disassociate(request, address)\n else:\n api.neutron.floating_ip_associate(request, address, port)", "def set_mgmt_address(self, ip):\n self.update(mgmt_address=ip)", "def update_host(self):\n\t\tc = Common()\n\t\tl = Logger(c.client_name())\n\n\t\ttry:\n\n\t\t\tif self.__noop!='apply':\n\t\t\t\tself.noop_puppet()\n\n\t\t\telse:\n\t\t\t\tupdatePuppet = AddCert()\n\t\t\t\tupdatePuppet.start()\n\t\t\t\tupdatePuppet.join()\n\n\t\t\t\tprint '\\n' + '-'*60 + '\\n'\n\t\t\t\treturn 0\n\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e", "def RefreshIPList(self): \n \n self.Counter = 0 \n self.AssignedIP = \"\"", "def update_address(self, address_details):\n pass", "def test_update_port_add_additional_ip(self):\n with self.subnet() as subnet:\n with self.port(subnet=subnet) as port:\n data = {'port': {'admin_state_up': False,\n 'fixed_ips': [{'subnet_id':\n subnet['subnet']['id']},\n {'subnet_id':\n subnet['subnet']['id']}]}}\n req = self.new_update_request('ports', data,\n port['port']['id'])\n res = self.deserialize(self.fmt, req.get_response(self.api))\n self.assertEqual(data['port']['admin_state_up'],\n res['port']['admin_state_up'])\n ips = res['port']['fixed_ips']\n self.assertEqual(2, len(ips))\n self.assertNotEqual(ips[0]['ip_address'],\n ips[1]['ip_address'])\n network_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr'])\n self.assertIn(ips[0]['ip_address'], network_ip_net)\n self.assertIn(ips[1]['ip_address'], network_ip_net)", "def update_all(self):\n\n\t\ttry:\n\t\t\tclientListPipe = subprocess.Popen(['/usr/sbin/puppetca','-la'], stdout=subprocess.PIPE)\n\t\t\tclientList = clientListPipe.communicate()[0]\n\t\t\tpattern = re.compile(r'(\\S+)\\.%s' % self.re_domain)\n\t\t\tmatcher = re.findall(pattern,clientList)\n\n\t\t\tfor result in matcher:\n\t\t\t\tif re.match(r'^DNS:',result):\n\t\t\t\t\tpass\n\n\t\t\t\telse:\n\n\t\t\t\t\thost = '%s' % result\n\t\t\t\t\tself.__host = host\n\t\t\t\t\tenv.host_string = host\n\t\t\t\t\tself.update_host()\n\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e", "def ex_update_cloud_ip(self, cloud_ip_id, reverse_dns):\r\n response = self._put('/%s/cloud_ips/%s' % (self.api_version,\r\n cloud_ip_id),\r\n {'reverse_dns': reverse_dns})\r\n return response.status == httplib.OK", "def post(self, service):\n\n ip_address = self._get_param('ip', None)\n if not ip_address and self._get_param('auto_ip', None):\n # Discovery ELB is the single proxy, take last ip in route\n forwarded_for = request.remote_addr\n parts = forwarded_for.split('.')\n # 192.168.0.0/16\n valid = (len(parts) == 4 and\n int(parts[0]) == 192 and\n int(parts[1]) == 168 and\n 0 <= int(parts[2]) <= 255 and\n 0 <= int(parts[3]) <= 255)\n if valid:\n ip_address = forwarded_for\n logger.info('msg=\"auto_ip success\" service={}, auto_ip={}'\n .format(service, ip_address))\n else:\n logger.warn('msg=\"auto_ip invalid\" service={} auto_ip={}'\n .format(service, ip_address))\n service_repo_name = self._get_param('service_repo_name', '')\n port = int(self._get_param('port', -1))\n revision = self._get_param('revision', None)\n last_check_in = datetime.utcnow()\n tags = self._get_param('tags', '{}')\n\n try:\n tags = json.loads(tags)\n except ValueError as ex:\n logger.exception(\"Failed to parse tags json: {}. Exception: {}\".format(tags, ex))\n return {\"error\": \"Invalid json supplied in tags\"}, 400\n\n host_service = host.HostService(BACKEND_STORAGE)\n success = host_service.update(service, ip_address, service_repo_name,\n port, revision, last_check_in, tags)\n\n statsd = get_stats(\"registration\")\n if success:\n response_code = 200\n statsd.incr(\"%s.success\" % service)\n else:\n response_code = 400\n statsd.incr(\"%s.failure\" % service)\n return {}, response_code", "def test_update_port_update_ips(self):\n with self.subnet() as subnet:\n with self.port(subnet=subnet) as port:\n data = {'port': {'admin_state_up': False,\n 'fixed_ips': [{'subnet_id':\n subnet['subnet']['id'],\n 'ip_address': '10.0.0.3'}]}}\n req = self.new_update_request('ports', data,\n port['port']['id'])\n res = self.deserialize(self.fmt, req.get_response(self.api))\n self.assertEqual(data['port']['admin_state_up'],\n res['port']['admin_state_up'])\n ips = res['port']['fixed_ips']\n self.assertEqual(1, len(ips))\n self.assertEqual('10.0.0.3', ips[0]['ip_address'], '10.0.0.3')\n self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])", "def test_azure_service_api_vm_floating_ip_put(self):\n pass", "def iplocation(ip):\r\n\r\n\r\n \r\n data = requests.get(api+ip).json()\r\n sys.stdout.flush()\r\n \r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Ip]:\", data['query'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Operateur]:\", data['isp'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Organisation]:\", data['org'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Ville]:\", data['city'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Region]:\", data['region'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Code postal]:\", data['zip'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Pays]:\", data['country'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Longitude]:\", data['lon'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Latitude]:\", data['lat'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Time zone]:\", data['timezone'])\r\n print(\"\")", "def update_record_ip(record, ip):\n if ip != record.data:\n print(\"Updating Record\")\n record.update(data=ip)\n print(\"IP updated to {ip}\".format(ip=ip))\n else:\n print(\"IP Address up to date.\")", "def update_ip_list():\n logger.info(\"Updating TOR exit node list from %s\" % CSV_URL)\n ips = fetch_tor_ip_list()\n cache.set(CACHE_KEY, ips, 3600*24*3)", "def service_ip(service):\n service_type = service['type']\n service_protocol = service['data']['proto']\n data = {\n \"type\":service_type,\n \"protocol\":service_protocol\n }\n return data", "def update_subnet(self, request):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update the name informations for the service
def _process_service_name(self): self.infos.service_name = self._bind_data(self.configuration['service']['name']) self.infos.green_infos.stack['Parameters']['ServiceName']['Default'] = self.infos.service_name self.infos.init_infos.stack['Parameters']['ServiceName']['Default'] = self.infos.service_name self._log_information(key='Service', value=self.infos.service_name, ljust=18)
[ "def update_service_definition(service_name):\n data = request.json\n if not data:\n abort(400, 'No data received')\n try:\n services[service_name] = {}\n services[service_name]['name'] = service_name\n services[service_name]['image'] = data['image']\n except KeyError:\n abort(400, 'Missing parameter.')\n services[service_name]['command'] = data.get('command', None)\n services[service_name]['environment'] = data.get('environment', None)\n services[service_name]['ports'] = data.get('ports', None)", "def updateSurgeonName(self):\n\t\tpass", "def update_name(self):\n try:\n rc, result = self.request(\"storage-systems/%s/configuration\" % self.ssid, method=\"POST\", data={\"name\": self.name})\n except Exception as err:\n self.module.fail_json(msg=\"Failed to set the storage array name! Array Id [%s]. Error [%s].\" % (self.ssid, to_native(err)))", "def set_name(self, new_name):\n self.name = new_name", "def nameChanged(self, oldName, newName):", "def update_name(self, name: str, password: list):\r\n global Hotel_Name\r\n\r\n # Sets title, `Hotel_Name` to the user entered hotel name\r\n # and sets password variable in credentials dictionary.\r\n self.title(f\"M.Y. Hotel | {name}\")\r\n Hotel_Name = creds_rtypes['name'] = name\r\n creds_rtypes['password'] = password\r\n\r\n self.show_frame(LoginPage)\r\n self.current_visible_frame = LoginPage\r\n write_to_json()", "def rename(cls, client, resource, new_servicename) :\n\t\ttry :\n\t\t\trenameresource = gslbservice()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.servicename = resource.servicename\n\t\t\telse :\n\t\t\t\trenameresource.servicename = resource\n\t\t\treturn renameresource.rename_resource(client,new_servicename)\n\t\texcept Exception as e :\n\t\t\traise e", "def _updateList(self):\r\n for i in self._control.get_children():\r\n self._control.delete(i)\r\n sorted_names = sorted(self._services.iterkeys())\r\n for name in sorted_names:\r\n info = self._services[name]\r\n self._control.insert(\"\" , 'end', text=name, \r\n values=(name[0:name.rfind(\"._http._tcp.local.\")], \r\n info.getServer()[0:info.getServer().rfind(\".local\")],\r\n str(socket.inet_ntoa(info.getAddress())),\r\n info.getPort()))", "def change_name(self):\n if self.user_can_update_information():\n old_firstname = self.user.firstname\n old_surname = self.user.surname\n self.user.firstname = input(\"What is your firstname?\\n\")\n self.user.surname = input(\"What is your lastname?\\n\")\n update_user(self.user)\n print_message(f\"The name '{old_firstname} {old_surname}' has been updated to \"\n f\"'{self.user.firstname}' {self.user.surname}'\")\n else:\n print_error(\"Password is incorrect. Cannot update name.\")", "def CustomServiceNames(self) -> ServiceNameCollection:", "def set_name(self, name):\n self.recipe_proto[\"name\"] = name", "def _setName(self,name,value):\n\n if name in SDS['COP']:\n self.COP.__dict__[name] = value\n else:\n self.__dict__[name] = value", "async def _name(self, ctx, new_name: str):\n mother = ctx.message.author\n if common.has_mon(str(mother)):\n common.user_data['players'][str(mother)]['mon']['name'] = new_name\n await self.bot.say(\"Congratulations, {0}, your mon has been named {1}!\".format(mother.mention, new_name))\n else:\n await self.bot.say(\"{0}, you have no mon. You need to hatch an egg first.\".format(mother.mention))", "def update_prod_name(self, prod_id):\n if self.cursor:\n self.cursor.execute(\"\"\"UPDATE products SET \n prod_name = %s where prod_id = %s\"\"\",\n (self.data[\"prod_name\"], prod_id), )", "def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name", "def setName(self, newname: 'SbName') -> \"void\":\n return _coin.SoBase_setName(self, newname)", "def productionalize_schedulename(self):\n logging.debug(self.production_name)\n self.obj.Name = self.production_name\n self.obj.Label = self.production_name\n self.obj.Update()", "def updateOpticName(self, string: str) -> None:\n ...", "def rename(server, name):\r\n server.update(name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update the version informations for the service
def _process_version(self): version = 'latest' if 'version' in self.configuration['service']: version = str(self.configuration['service']['version']) self.infos.service_version = version self.infos.green_infos.stack['Parameters']['Version']['Default'] = self.infos.service_version self._log_information(key='Version', value=self.infos.service_version, ljust=18)
[ "def _update_info(self):", "def update_version(self):\n if not hasattr(self, 'versions') and not hasattr(self, 'version_number'):\n self.version_number = 1\n \n if hasattr(self, 'version_number') and self.version_number < 2:\n try:\n if 'short_desc' in self.__dict__:\n self._short_desc = self.short_desc\n del self.__dict__['short_desc']\n if 'long_desc' in self.__dict__:\n self._long_desc = self.long_desc\n del self.__dict__['long_desc']\n self.version_number = 2\n except KeyError:\n self.log.error('Error updating object %s in Thing.update_version()' % self)\n \n if hasattr(self, 'version_number'):\n # Changing to dictionary-based versioning system\n self.versions[gametools.findGamePath(__file__)] = 3\n del self.__dict__['version_number']\n \n if self.versions[gametools.findGamePath(__file__)] <= 5:\n self.adjectives = set(self.adjectives)\n self.versions[gametools.findGamePath(__file__)] = 6", "def test_services_update(self):\n pass", "def determine_latest_version(self, service_name, type_name):\n ...", "def update_version_file(self) -> \"ProductionPrep\":\n\n PyFunceble.facility.Logger.info(\n \"Started to update version file.\",\n )\n\n if self.should_be_deprecated(self.previous_version):\n to_append = \".\".join(\n self.version_utility.get_splitted(self.version_utility.local_version)[0]\n )\n\n if to_append not in self.version_file_content[\"deprecated\"]:\n self.version_file_content[\"deprecated\"].append(to_append)\n\n self.version_file_content[\n \"current_version\"\n ] = PyFunceble.storage.PROJECT_VERSION\n\n self.dict_helper.set_subject(self.version_file_content).to_yaml_file(\n self.VERSION_FILE_PATH\n )\n\n PyFunceble.facility.Logger.info(\n \"Finished to update version file.\",\n )\n\n return self", "def update_control_version(file_path, version):\n for line in fileinput.input(file_path, inplace=1):\n if 'Version: ' in line:\n old_ver = line.split(' ')[1]\n line = line.replace(old_ver, version) + '\\n'\n sys.stdout.write(line)", "def _update_versions(self, root):\n nodes = self._get_versioned_nodes(root)\n for node in nodes:\n name = utils.get_localname(node)\n\n if name == \"Indicator\":\n node.attrib['version'] = '2.2'\n else:\n node.attrib['version'] = '1.2'", "def _update_version_file(self):\n file_path = os.path.join(self.repo.working_tree_dir, self.version_file)\n self._update_version_numbers(file_path)\n return self._commit_file(\n self.version_file,\n 'Version updated for release {}.{}.{}{}.'.format(\n self.major, self.minor, self.patch, self.release\n ),\n )", "def edit_version(self):\n\n self.version.click()", "def get_version(self) -> GoProResp:", "def version(self, value):\n self.set(\"ver\", value)", "def list_api_versions(self, service_name, type_name):\n ...", "def _update_ver_nsi(self, nsiFilePath, ver):\r\n t_ver = ver.split('.')\r\n while len(t_ver) < 3:\r\n t_ver.append('0')\r\n\r\n if len(t_ver) > 3:\r\n raise RuntimeError('Invalid version for nsis file.')\r\n\r\n bOK = False\r\n try:\r\n # open nsis file\r\n nsiFile = open(nsiFilePath, 'r', encoding='utf-16-le')\r\n # read out all lines of nsi file\r\n nsiLines = nsiFile.readlines()\r\n nsiFile.close()\r\n\r\n # for nsiline in nsiLines:\r\n for x in range(len(nsiLines)):\r\n nsiline = nsiLines[x]\r\n if nsiline.find('\\n') != -1:\r\n nsiline = nsiline[:-1]\r\n\r\n if nsiline.startswith(\"!define FILE_VER\"):\r\n nsiline = '!define FILE_VER \\\"%s.%s.%s.0\\\"\\n' % (t_ver[0], t_ver[1], t_ver[2])\r\n\r\n nsiLines[x] = \"\"\r\n nsiLines[x] = nsiline\r\n # cc.v('[ver] new ver: %s' % nsiLines[x])\r\n bOK = True\r\n\r\n elif nsiline.startswith(\"!define OUT_VER\"):\r\n nsiline = '!define OUT_VER \\\"%s.%s.%s\\\"\\n' % (t_ver[0], t_ver[1], t_ver[2])\r\n\r\n nsiLines[x] = \"\"\r\n nsiLines[x] = nsiline\r\n # cc.v('[ver] new ver: %s' % nsiLines[x])\r\n bOK = True\r\n elif nsiline.startswith(\"!define PRODUCT_VER\"):\r\n nsiline = '!define PRODUCT_VER \\\"%s.%s\\\"\\n' % (t_ver[0], t_ver[1])\r\n\r\n nsiLines[x] = \"\"\r\n nsiLines[x] = nsiline\r\n # cc.v('[ver] new ver: %s' % nsiLines[x])\r\n bOK = True\r\n\r\n else:\r\n continue\r\n\r\n if bOK:\r\n cc.v(' update {}...'.format(nsiFilePath))\r\n wnsiFile = open(nsiFilePath, 'w', encoding='utf-16-le')\r\n wnsiFile.writelines(nsiLines)\r\n wnsiFile.close()\r\n return bOK\r\n\r\n except IOError:\r\n raise RuntimeError('can not process nsi file.')", "def service_version(self, service_version):\n\n self._service_version = service_version", "def update_versions_file():\n\n config_dir = os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))) + '/core/config/'\n\n with open(config_dir + 'versions.json', 'r') as u_vs:\n up_to_date_versions = json.load(u_vs)\n\n with open(JUMBODIR + 'versions.json', 'r') as c_vs:\n current_versions = json.load(c_vs)\n\n # Merge current services config\n for service in up_to_date_versions['services']:\n current_service = [s for s in current_versions['services']\n if s['name'] == service['name']]\n if current_service:\n for vers, _ in service['versions'].items():\n current_url = current_service[0]['versions'].get(vers, False)\n if current_url:\n service['versions'][vers] = current_url\n service['default'] = current_service[0]['default']\n\n # Merge current platforms config\n for platform in up_to_date_versions['platforms']:\n current_platform = [p for p in current_versions['platforms']\n if p['name'] == platform['name']]\n if current_platform:\n platform['default'] = current_platform[0]['default']\n\n # Merge current resources config\n for resource in up_to_date_versions['resources']:\n current_resource = [r for r in current_versions['resources']\n if r['name'] == resource['name']]\n if current_resource:\n for vers, _ in resource['versions'].items():\n current_url = current_resource[0]['versions'].get(vers, False)\n if current_url:\n resource['versions'][vers] = current_url\n\n with open(JUMBODIR + 'versions.json', 'w') as c_vs:\n json.dump(up_to_date_versions, c_vs, indent=2)", "def __update_communication_service(args):\n print(\"\\nUpdate...\")\n\n acs_client = __get_communication_management_client()\n\n tags = {}\n if args.keyvalues is not None:\n tags = {\"tags\": dict(args.keyvalues)}\n\n resource = acs_client.communication_service.update(args.resource_group_name, args.resource_name, TaggedResource(**tags))\n print(\"Resource Updated: \")\n __print_resource(resource)", "def _update_versions(self, root):\n nodes = self._get_versioned_nodes(root)\n\n for node in nodes:\n attribs = node.attrib\n attribs[common.TAG_CYBOX_MAJOR] = '2'\n attribs[common.TAG_CYBOX_MINOR] = '1'\n\n with utils.ignored(KeyError):\n del attribs[common.TAG_CYBOX_UPDATE]", "def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = gslbservice()\n\t\t\t\tupdateresource.servicename = resource.servicename\n\t\t\t\tupdateresource.ipaddress = resource.ipaddress\n\t\t\t\tupdateresource.publicip = resource.publicip\n\t\t\t\tupdateresource.publicport = resource.publicport\n\t\t\t\tupdateresource.cip = resource.cip\n\t\t\t\tupdateresource.cipheader = resource.cipheader\n\t\t\t\tupdateresource.sitepersistence = resource.sitepersistence\n\t\t\t\tupdateresource.siteprefix = resource.siteprefix\n\t\t\t\tupdateresource.maxclient = resource.maxclient\n\t\t\t\tupdateresource.healthmonitor = resource.healthmonitor\n\t\t\t\tupdateresource.maxbandwidth = resource.maxbandwidth\n\t\t\t\tupdateresource.downstateflush = resource.downstateflush\n\t\t\t\tupdateresource.maxaaausers = resource.maxaaausers\n\t\t\t\tupdateresource.viewname = resource.viewname\n\t\t\t\tupdateresource.viewip = resource.viewip\n\t\t\t\tupdateresource.monthreshold = resource.monthreshold\n\t\t\t\tupdateresource.weight = resource.weight\n\t\t\t\tupdateresource.monitor_name_svc = resource.monitor_name_svc\n\t\t\t\tupdateresource.hashid = resource.hashid\n\t\t\t\tupdateresource.comment = resource.comment\n\t\t\t\tupdateresource.appflowlog = resource.appflowlog\n\t\t\t\tupdateresource.naptrorder = resource.naptrorder\n\t\t\t\tupdateresource.naptrpreference = resource.naptrpreference\n\t\t\t\tupdateresource.naptrservices = resource.naptrservices\n\t\t\t\tupdateresource.naptrreplacement = resource.naptrreplacement\n\t\t\t\tupdateresource.naptrdomainttl = resource.naptrdomainttl\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tupdateresources = [ gslbservice() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].servicename = resource[i].servicename\n\t\t\t\t\t\tupdateresources[i].ipaddress = resource[i].ipaddress\n\t\t\t\t\t\tupdateresources[i].publicip = resource[i].publicip\n\t\t\t\t\t\tupdateresources[i].publicport = resource[i].publicport\n\t\t\t\t\t\tupdateresources[i].cip = resource[i].cip\n\t\t\t\t\t\tupdateresources[i].cipheader = resource[i].cipheader\n\t\t\t\t\t\tupdateresources[i].sitepersistence = resource[i].sitepersistence\n\t\t\t\t\t\tupdateresources[i].siteprefix = resource[i].siteprefix\n\t\t\t\t\t\tupdateresources[i].maxclient = resource[i].maxclient\n\t\t\t\t\t\tupdateresources[i].healthmonitor = resource[i].healthmonitor\n\t\t\t\t\t\tupdateresources[i].maxbandwidth = resource[i].maxbandwidth\n\t\t\t\t\t\tupdateresources[i].downstateflush = resource[i].downstateflush\n\t\t\t\t\t\tupdateresources[i].maxaaausers = resource[i].maxaaausers\n\t\t\t\t\t\tupdateresources[i].viewname = resource[i].viewname\n\t\t\t\t\t\tupdateresources[i].viewip = resource[i].viewip\n\t\t\t\t\t\tupdateresources[i].monthreshold = resource[i].monthreshold\n\t\t\t\t\t\tupdateresources[i].weight = resource[i].weight\n\t\t\t\t\t\tupdateresources[i].monitor_name_svc = resource[i].monitor_name_svc\n\t\t\t\t\t\tupdateresources[i].hashid = resource[i].hashid\n\t\t\t\t\t\tupdateresources[i].comment = resource[i].comment\n\t\t\t\t\t\tupdateresources[i].appflowlog = resource[i].appflowlog\n\t\t\t\t\t\tupdateresources[i].naptrorder = resource[i].naptrorder\n\t\t\t\t\t\tupdateresources[i].naptrpreference = resource[i].naptrpreference\n\t\t\t\t\t\tupdateresources[i].naptrservices = resource[i].naptrservices\n\t\t\t\t\t\tupdateresources[i].naptrreplacement = resource[i].naptrreplacement\n\t\t\t\t\t\tupdateresources[i].naptrdomainttl = resource[i].naptrdomainttl\n\t\t\t\tresult = cls.update_bulk_request(client, updateresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "def update(self):\n\n # config folder path is usually something like: /var/lib/waagent/Microsoft.CPlat.Core.LinuxPatchExtension-<version>/config\n try:\n self.setup(action=Constants.UPDATE, log_message=\"Extension is being updated to the latest version. Copying the required extension artifacts from preceding version to the current one\")\n\n # fetch all earlier extension versions available on the machine\n new_version_config_folder = self.ext_env_handler.config_folder\n extension_pardir = os.path.abspath(os.path.join(new_version_config_folder, os.path.pardir, os.path.pardir))\n self.logger.log(\"Parent directory for all extension version artifacts [Directory={0}]\".format(str(extension_pardir)))\n paths_to_all_versions = self.filter_files_from_versions(self.get_all_versions(extension_pardir))\n self.logger.log(\"List of all extension versions found on the machine. [All Versions={0}]\".format(paths_to_all_versions))\n if len(paths_to_all_versions) <= 1:\n # Extension Update action called when\n # a) artifacts for the preceding version do not exist on the machine, or\n # b) after all artifacts from the preceding versions have been deleted\n error_msg = \"No earlier versions for the extension found on the machine. So, could not copy any references to the current version.\"\n self.logger.log_error(error_msg)\n self.ext_output_status_handler.write_status_file(\"\", self.seq_no, status=Constants.Status.Error.lower(), message=error_msg, code=Constants.ExitCode.HandlerFailed)\n return Constants.ExitCode.HandlerFailed\n\n # identify the version preceding current\n self.logger.log(\"Fetching the extension version preceding current from all available versions...\")\n paths_to_all_versions.sort(reverse=True, key=LooseVersion)\n preceding_version_path = paths_to_all_versions[1]\n if preceding_version_path is None or preceding_version_path == \"\" or not os.path.exists(preceding_version_path):\n error_msg = \"Could not find path where preceding extension version artifacts are stored. Hence, cannot copy the required artifacts to the latest version. \"\\\n \"[Preceding extension version path={0}]\".format(str(preceding_version_path))\n self.logger.log_error(error_msg)\n self.ext_output_status_handler.write_status_file(\"\", self.seq_no, status=Constants.Status.Error.lower(), message=error_msg, code=Constants.ExitCode.HandlerFailed)\n return Constants.ExitCode.HandlerFailed\n\n self.logger.log(\"Preceding version path. [Path={0}]\".format(str(preceding_version_path)))\n\n # copy all required files from preceding version to current\n self.copy_config_files(preceding_version_path, new_version_config_folder)\n\n # Delete temp_folder\n self.ext_env_handler.delete_temp_folder()\n\n self.logger.log(\"All update actions from extension handler completed.\")\n self.ext_output_status_handler.write_status_file(\"\", self.seq_no, status=Constants.Status.Success.lower())\n return Constants.ExitCode.Okay\n\n except Exception as error:\n self.logger.log_error(\"Error occurred during extension update. [Error={0}]\".format(repr(error)))\n self.ext_output_status_handler.write_status_file(\"\", self.seq_no, status=Constants.Status.Error.lower(), message=\"Error occurred during extension update\", code=Constants.ExitCode.HandlerFailed)\n return Constants.ExitCode.HandlerFailed\n\n finally:\n self.tear_down()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update the AWS vpc ID informations for the service
def _process_vpc_id(self): self.infos.vpc_id = self._find_vpc_Id() self._log_information(key='Vpc ID', value=self.infos.vpc_id, ljust=18)
[ "def modify_vpc_attribute(VpcId=None, EnableDnsSupport=None, EnableDnsHostnames=None):\n pass", "def updateVpcTable(tableName,data,paGroupName):\n try:\n #VpcCidr is the primary key for VpcTable\n table=dynamodb.Table(tableName)\n item={\n 'VpcId': data['VpcId'],\n 'VpcCidr': data['VpcCidr'],\n 'Region': data['Region'],\n 'SubscriberSnsArn': data['SubscriberSnsArn'],\n 'SubscriberAssumeRoleArn': data['SubscriberAssumeRoleArn'],\n 'PaGroupName': paGroupName,\n 'CurrentStatus': 'Inprogress'\n }\n response=table.put_item(Item=item)\n except Exception as e:\n logger.error(\"Updating Transit VpcTalbe is Failed, Error: {}\".format(str(e)))", "def updateVpcTable(tableName,data,status):\n try:\n #VpcId is the primary key for VpcTable\n table=dynamodb.Table(tableName)\n response=table.update_item(Key={'VpcId':data['VpcId']},AttributeUpdates={'CurrentStatus':{'Value':status,'Action':'PUT'},'Node1VpnId':{'Value':data['VpnN1'],'Action':'PUT'},'Node2VpnId':{'Value':data['VpnN2'],'Action':'PUT'}, 'Region':{'Value':data['Region'],'Action':'PUT'}, 'IpSegment':{'Value':data['IpSegment'],'Action':'PUT'}})\n logger.info('Updated table {} with '.format(tableName,data['VpnN1'],data['VpnN2'],data['Region'],data['IpSegment']))\n except Exception as e:\n logger.error(\"Updating Transit VpcTalbe is Failed, Error: {}\".format(str(e)))", "def _find_vpc_Id(self):\n ec2 = boto3.resource('ec2', region_name=self.infos.region)\n client = boto3.client('ec2', region_name=self.infos.region)\n ids = map(lambda x: x.id, list(ec2.vpcs.filter(Filters=[])))\n for id in ids:\n response = client.describe_vpcs(VpcIds=[id])\n if 'Tags' in response['Vpcs'][0]:\n for tag in response['Vpcs'][0]['Tags']:\n if tag['Key'] == 'Environment' and tag['Value'] == self.infos.environment:\n return id\n raise ValueError('vpc id {} not found for environment'.format(self.infos.environment))", "def describe(\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc_id = _find_vpcs(\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as err:\n boto_err = __utils__[\"boto.get_error\"](err)\n if boto_err.get(\"aws\", {}).get(\"code\") == \"InvalidVpcID.NotFound\":\n # VPC was not found: handle the error and return None.\n return {\"vpc\": None}\n return {\"error\": boto_err}\n\n if not vpc_id:\n return {\"vpc\": None}\n\n filter_parameters = {\"vpc_ids\": vpc_id}\n\n try:\n vpcs = conn.get_all_vpcs(**filter_parameters)\n except BotoServerError as err:\n return {\"error\": __utils__[\"boto.get_error\"](err)}\n\n if vpcs:\n vpc = vpcs[0] # Found!\n log.debug(\"Found VPC: %s\", vpc.id)\n\n keys = (\n \"id\",\n \"cidr_block\",\n \"is_default\",\n \"state\",\n \"tags\",\n \"dhcp_options_id\",\n \"instance_tenancy\",\n )\n _r = {k: getattr(vpc, k) for k in keys}\n _r.update({\"region\": getattr(vpc, \"region\").name})\n return {\"vpc\": _r}\n else:\n return {\"vpc\": None}", "def get_vpc_info(self):\n if not self.base['cluster'].get('vpc'):\n res = self.ec2.describe_vpcs()\n self.base['cluster']['vpc'] = [vpc['VpcId'] for vpc in res['Vpcs'] if vpc['IsDefault']][0]\n logger.info('No vpc selected, using default vpc')\n logger.info(self.base['cluster']['vpc'])", "def ModifyVpcEndPointServiceAttribute(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyVpcEndPointServiceAttribute\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyVpcEndPointServiceAttributeResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def modify_vpc_endpoint(DryRun=None, VpcEndpointId=None, ResetPolicy=None, PolicyDocument=None, AddRouteTableIds=None, RemoveRouteTableIds=None):\n pass", "def delete(\n vpc_id=None,\n name=None,\n vpc_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if name:\n log.warning(\n \"boto_vpc.delete: name parameter is deprecated use vpc_name instead.\"\n )\n vpc_name = name\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_name or vpc_id must be provided.\"\n )\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not vpc_id:\n vpc_id = _get_id(\n vpc_name=vpc_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not vpc_id:\n return {\n \"deleted\": False,\n \"error\": {\"message\": \"VPC {} not found\".format(vpc_name)},\n }\n\n if conn.delete_vpc(vpc_id):\n log.info(\"VPC %s was deleted.\", vpc_id)\n if vpc_name:\n _cache_id(\n vpc_name,\n resource_id=vpc_id,\n invalidate=True,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return {\"deleted\": True}\n else:\n log.warning(\"VPC %s was not deleted.\", vpc_id)\n return {\"deleted\": False}\n except BotoServerError as e:\n return {\"deleted\": False, \"error\": __utils__[\"boto.get_error\"](e)}", "def deletevpc(vpc_choices):\n progressbar(\"Deleting VPC\")\n vpcname=vpc_choices['vpc'][0]\n try:\n ec2.delete_vpc(VpcId=str(vpcname))\n print(\"\\n \\n vpc \" +vpcname +\" has been deleted \\n \\n\")\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while deleting vpc: \\n\\n\\n\")\n print(e)", "def check_vpc(\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_id or vpc_name must be provided.\"\n )\n if vpc_name:\n vpc_id = _get_id(\n vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n elif not _find_vpcs(\n vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile\n ):\n log.info(\"VPC %s does not exist.\", vpc_id)\n return None\n return vpc_id", "def update_all_clusters_in_vpc(self):\n sections = [section for section in self.config_rds.sections()\n if section.split(\"-\")[0] == self.vpc_name]\n logging.debug(\"The following RDS clusters will be updated: %s\", \", \".join(sections))\n for section in sections:\n self.update_cluster(section)", "def _get_id(\n vpc_name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((vpc_name, tags, cidr)):\n raise SaltInvocationError(\n \"At least one of the following must be provided: vpc_name, cidr or tags.\"\n )\n\n if vpc_name and not any((cidr, tags)):\n vpc_id = _cache_id(\n vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if vpc_id:\n return vpc_id\n\n vpc_ids = _find_vpcs(\n vpc_name=vpc_name,\n cidr=cidr,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if vpc_ids:\n log.debug(\"Matching VPC: %s\", \" \".join(vpc_ids))\n if len(vpc_ids) == 1:\n vpc_id = vpc_ids[0]\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return vpc_id\n else:\n raise CommandExecutionError(\n \"Found more than one VPC matching the criteria.\"\n )\n else:\n log.info(\"No VPC found.\")\n return None", "def update(cls, api_client, id, **kwargs):\n\n cmd = {'id': id}\n cmd.update(kwargs)\n return api_client.updateNetworkServiceProvider(**cmd)", "def getvpcs(show):\n vpclist=[]\n \n try:\n vpcs=ec2.describe_vpcs()\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while getting vpc data: \\n\\n\\n\")\n print(e)\n for vpc in vpcs['Vpcs']:\n name=vpc['VpcId']\n cidr=vpc['CidrBlock']\n if show:\n print(\"VPC Id: \"+name+\" CIDR: \"+cidr)\n vpclist.append({ \"name\":name})\n return vpclist", "def describe_vpcs(DryRun=None, VpcIds=None, Filters=None):\n pass", "def main(azs, region, keyid, secret, cidr, owner, env):\n\n # Validate the region\n myregion = boto.ec2.get_region(region_name=region)\n if myregion == None:\n print(\"Unknown region.\")\n exit(1)\n\n # Establish a VPC service connection\n try:\n conn = boto.vpc.VPCConnection(aws_access_key_id=keyid, aws_secret_access_key=secret, region=myregion)\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n\n # Grab the availability-zones\n zones = []\n all_zones = conn.get_all_zones()\n for zone in all_zones:\n if zone.state != 'available':\n continue\n zones.append(zone.name)\n\n subnets = subnet_sizes(azs, cidr) # Calculate the subnet sizes\n name = owner.lower() + '-' + env.lower() + '-' # Used for tagging\n\n vpc_id = create_vpc(conn, name, region, cidr)\n igw_id = create_igw(conn, name, region, vpc_id)\n sub_ids = create_sub(conn, name, region, vpc_id, azs, subnets, zones)\n rtb_ids = create_rtb(conn, name, region, vpc_id, azs, sub_ids, igw_id)\n acl_ids = create_acl(conn, name, region, vpc_id, azs, sub_ids, cidr)\n flow_id = create_flows(vpc_id, keyid, secret, region)", "def _find_vpcs(\n vpc_id=None,\n vpc_name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((vpc_id, vpc_name)):\n raise SaltInvocationError(\"Only one of vpc_name or vpc_id may be provided.\")\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"filters\": {}}\n\n if vpc_id:\n filter_parameters[\"vpc_ids\"] = [vpc_id]\n\n if cidr:\n filter_parameters[\"filters\"][\"cidr\"] = cidr\n\n if vpc_name:\n filter_parameters[\"filters\"][\"tag:Name\"] = vpc_name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n vpcs = conn.get_all_vpcs(**filter_parameters)\n log.debug(\n \"The filters criteria %s matched the following VPCs:%s\", filter_parameters, vpcs\n )\n\n if vpcs:\n if not any((vpc_id, vpc_name, cidr, tags)):\n return [vpc.id for vpc in vpcs if vpc.is_default]\n else:\n return [vpc.id for vpc in vpcs]\n else:\n return []", "def get_vpc_id(account, region):\n uri = '/networks/aws'\n response = gate_request(uri=uri)\n\n if not response.ok:\n raise SpinnakerVPCNotFound(response.text)\n\n vpcs = response.json()\n\n for vpc in vpcs:\n LOG.debug('VPC Response: %s', vpc)\n if 'name' in vpc and all([vpc['name'] == VPC_NAME, vpc['account'] == account, vpc['region'] == region]):\n LOG.info('Found VPC ID for %s in %s: %s', account, region, vpc['id'])\n vpc_id = vpc['id']\n break\n else:\n LOG.fatal('VPC list: %s', vpcs)\n raise SpinnakerVPCIDNotFound('No VPC available for {0} [{1}].'.format(account, region))\n\n return vpc_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find the AWS VPC by environment
def _find_vpc_Id(self): ec2 = boto3.resource('ec2', region_name=self.infos.region) client = boto3.client('ec2', region_name=self.infos.region) ids = map(lambda x: x.id, list(ec2.vpcs.filter(Filters=[]))) for id in ids: response = client.describe_vpcs(VpcIds=[id]) if 'Tags' in response['Vpcs'][0]: for tag in response['Vpcs'][0]['Tags']: if tag['Key'] == 'Environment' and tag['Value'] == self.infos.environment: return id raise ValueError('vpc id {} not found for environment'.format(self.infos.environment))
[ "def get_vpc_from_env(self, env):\n vpcs = self.get_all_vpcs(filters=[{\"Name\": \"tag:Environment\", 'Values': [env]}])\n if len(vpcs) == 1:\n return vpcs[0]\n else:\n logger.error(\"Multiple envs found: %s\" % (env,))\n raise ValueError", "def get_vpc_id(account, region):\n uri = '/networks/aws'\n response = gate_request(uri=uri)\n\n if not response.ok:\n raise SpinnakerVPCNotFound(response.text)\n\n vpcs = response.json()\n\n for vpc in vpcs:\n LOG.debug('VPC Response: %s', vpc)\n if 'name' in vpc and all([vpc['name'] == VPC_NAME, vpc['account'] == account, vpc['region'] == region]):\n LOG.info('Found VPC ID for %s in %s: %s', account, region, vpc['id'])\n vpc_id = vpc['id']\n break\n else:\n LOG.fatal('VPC list: %s', vpcs)\n raise SpinnakerVPCIDNotFound('No VPC available for {0} [{1}].'.format(account, region))\n\n return vpc_id", "def _get_id(\n vpc_name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not any((vpc_name, tags, cidr)):\n raise SaltInvocationError(\n \"At least one of the following must be provided: vpc_name, cidr or tags.\"\n )\n\n if vpc_name and not any((cidr, tags)):\n vpc_id = _cache_id(\n vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n if vpc_id:\n return vpc_id\n\n vpc_ids = _find_vpcs(\n vpc_name=vpc_name,\n cidr=cidr,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if vpc_ids:\n log.debug(\"Matching VPC: %s\", \" \".join(vpc_ids))\n if len(vpc_ids) == 1:\n vpc_id = vpc_ids[0]\n if vpc_name:\n _cache_id(\n vpc_name,\n vpc_id,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n return vpc_id\n else:\n raise CommandExecutionError(\n \"Found more than one VPC matching the criteria.\"\n )\n else:\n log.info(\"No VPC found.\")\n return None", "def getvpcs(show):\n vpclist=[]\n \n try:\n vpcs=ec2.describe_vpcs()\n except botocore.exceptions.ClientError as e:\n coloredtext(\"There was an error while getting vpc data: \\n\\n\\n\")\n print(e)\n for vpc in vpcs['Vpcs']:\n name=vpc['VpcId']\n cidr=vpc['CidrBlock']\n if show:\n print(\"VPC Id: \"+name+\" CIDR: \"+cidr)\n vpclist.append({ \"name\":name})\n return vpclist", "def get_vpc_info(self):\n if not self.base['cluster'].get('vpc'):\n res = self.ec2.describe_vpcs()\n self.base['cluster']['vpc'] = [vpc['VpcId'] for vpc in res['Vpcs'] if vpc['IsDefault']][0]\n logger.info('No vpc selected, using default vpc')\n logger.info(self.base['cluster']['vpc'])", "def describe(\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vpc_id = _find_vpcs(\n vpc_id=vpc_id,\n vpc_name=vpc_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n except BotoServerError as err:\n boto_err = __utils__[\"boto.get_error\"](err)\n if boto_err.get(\"aws\", {}).get(\"code\") == \"InvalidVpcID.NotFound\":\n # VPC was not found: handle the error and return None.\n return {\"vpc\": None}\n return {\"error\": boto_err}\n\n if not vpc_id:\n return {\"vpc\": None}\n\n filter_parameters = {\"vpc_ids\": vpc_id}\n\n try:\n vpcs = conn.get_all_vpcs(**filter_parameters)\n except BotoServerError as err:\n return {\"error\": __utils__[\"boto.get_error\"](err)}\n\n if vpcs:\n vpc = vpcs[0] # Found!\n log.debug(\"Found VPC: %s\", vpc.id)\n\n keys = (\n \"id\",\n \"cidr_block\",\n \"is_default\",\n \"state\",\n \"tags\",\n \"dhcp_options_id\",\n \"instance_tenancy\",\n )\n _r = {k: getattr(vpc, k) for k in keys}\n _r.update({\"region\": getattr(vpc, \"region\").name})\n return {\"vpc\": _r}\n else:\n return {\"vpc\": None}", "def _find_vpcs(\n vpc_id=None,\n vpc_name=None,\n cidr=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if all((vpc_id, vpc_name)):\n raise SaltInvocationError(\"Only one of vpc_name or vpc_id may be provided.\")\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n filter_parameters = {\"filters\": {}}\n\n if vpc_id:\n filter_parameters[\"vpc_ids\"] = [vpc_id]\n\n if cidr:\n filter_parameters[\"filters\"][\"cidr\"] = cidr\n\n if vpc_name:\n filter_parameters[\"filters\"][\"tag:Name\"] = vpc_name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n vpcs = conn.get_all_vpcs(**filter_parameters)\n log.debug(\n \"The filters criteria %s matched the following VPCs:%s\", filter_parameters, vpcs\n )\n\n if vpcs:\n if not any((vpc_id, vpc_name, cidr, tags)):\n return [vpc.id for vpc in vpcs if vpc.is_default]\n else:\n return [vpc.id for vpc in vpcs]\n else:\n return []", "def get_ec2_connection():\n access = os.environ[config(\"access-environment-var\")]\n secret= os.environ[config(\"secret-environment-var\")]\n return ec2.connect_to_region(config(\"region\"), \n aws_access_key_id=access, aws_secret_access_key=secret)", "def query(config):\n # Check if env already exists\n env_name = config['environment'].get('name')\n env_vers = config['environment'].get('version', None)\n env = env_name\n\n if env_vers:\n env = \"-\".join([env_name, env_vers])\n\n system_type = config['tags'].get('system_type', None)\n resources = aws.environment_exists(env_name, env_vers, system_type)\n\n if system_type:\n env = \"-\".join([system_type, env])\n\n if (resources):\n msg = \"{} exists.\"\n msg += \"\\n\\n{}\"\n resources_json = json.dumps(resources,indent=4)\n message = colored(msg.format(env,resources_json), 'red')\n print(message)\n\n return", "def main(azs, region, keyid, secret, cidr, owner, env):\n\n # Validate the region\n myregion = boto.ec2.get_region(region_name=region)\n if myregion == None:\n print(\"Unknown region.\")\n exit(1)\n\n # Establish a VPC service connection\n try:\n conn = boto.vpc.VPCConnection(aws_access_key_id=keyid, aws_secret_access_key=secret, region=myregion)\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n\n # Grab the availability-zones\n zones = []\n all_zones = conn.get_all_zones()\n for zone in all_zones:\n if zone.state != 'available':\n continue\n zones.append(zone.name)\n\n subnets = subnet_sizes(azs, cidr) # Calculate the subnet sizes\n name = owner.lower() + '-' + env.lower() + '-' # Used for tagging\n\n vpc_id = create_vpc(conn, name, region, cidr)\n igw_id = create_igw(conn, name, region, vpc_id)\n sub_ids = create_sub(conn, name, region, vpc_id, azs, subnets, zones)\n rtb_ids = create_rtb(conn, name, region, vpc_id, azs, sub_ids, igw_id)\n acl_ids = create_acl(conn, name, region, vpc_id, azs, sub_ids, cidr)\n flow_id = create_flows(vpc_id, keyid, secret, region)", "def get_usable_vpc(config):\n _, _, compute, _ = construct_clients_from_provider_config(config[\"provider\"])\n\n # For backward compatibility, reuse the VPC if the VM is launched.\n resource = GCPCompute(\n compute,\n config[\"provider\"][\"project_id\"],\n config[\"provider\"][\"availability_zone\"],\n config[\"cluster_name\"],\n )\n node = resource._list_instances(label_filters=None, status_filter=None)\n if len(node) > 0:\n netInterfaces = node[0].get(\"networkInterfaces\", [])\n if len(netInterfaces) > 0:\n vpc_name = netInterfaces[0][\"network\"].split(\"/\")[-1]\n return vpc_name\n\n vpcnets_all = _list_vpcnets(config, compute)\n\n usable_vpc_name = None\n for vpc in vpcnets_all:\n if _check_firewall_rules(vpc[\"name\"], config, compute):\n usable_vpc_name = vpc[\"name\"]\n break\n\n proj_id = config[\"provider\"][\"project_id\"]\n if usable_vpc_name is None:\n logger.info(f\"Creating a default VPC network, {SKYPILOT_VPC_NAME}...\")\n\n # Create a SkyPilot VPC network if it doesn't exist\n vpc_list = _list_vpcnets(config, compute, filter=f\"name={SKYPILOT_VPC_NAME}\")\n if len(vpc_list) == 0:\n body = VPC_TEMPLATE.copy()\n body[\"name\"] = body[\"name\"].format(VPC_NAME=SKYPILOT_VPC_NAME)\n body[\"selfLink\"] = body[\"selfLink\"].format(\n PROJ_ID=proj_id, VPC_NAME=SKYPILOT_VPC_NAME\n )\n _create_vpcnet(config, compute, body)\n\n _create_rules(\n config, compute, FIREWALL_RULES_TEMPLATE, SKYPILOT_VPC_NAME, proj_id\n )\n\n usable_vpc_name = SKYPILOT_VPC_NAME\n logger.info(f\"A VPC network {SKYPILOT_VPC_NAME} created.\")\n\n # Configure user specified rules\n ports = config[\"provider\"].get(\"ports\", [])\n user_rules = []\n for port in ports:\n cluster_name_hash = common_utils.truncate_and_hash_cluster_name(\n config[\"cluster_name\"]\n )\n name = f\"user-ports-{cluster_name_hash}-{port}\"\n user_rules.append(\n {\n \"name\": name,\n \"description\": f\"Allow user-specified port {port} for cluster {config['cluster_name']}\",\n \"network\": \"projects/{PROJ_ID}/global/networks/{VPC_NAME}\",\n \"selfLink\": \"projects/{PROJ_ID}/global/firewalls/\" + name,\n \"direction\": \"INGRESS\",\n \"priority\": 65534,\n \"allowed\": [\n {\n \"IPProtocol\": \"tcp\",\n \"ports\": [str(port)],\n },\n ],\n \"sourceRanges\": [\"0.0.0.0/0\"],\n \"targetTags\": [config[\"cluster_name\"]],\n }\n )\n\n _create_rules(config, compute, user_rules, usable_vpc_name, proj_id)\n\n return usable_vpc_name", "def get_aws(verbosity):\n print(\"# AWS Start\")\n try:\n response = requests.get(AWSAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n for i in range(0, len(cidrdata[\"prefixes\"])):\n print(cidrdata[\"prefixes\"][i][\"ip_prefix\"])\n for i in range(0, len(cidrdata[\"ipv6_prefixes\"])):\n print(cidrdata[\"ipv6_prefixes\"][i][\"ipv6_prefix\"])\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n print(\"# AWS End\")", "def vpc_configuration(self) -> Optional[pulumi.Input['HostVpcConfigurationArgs']]:\n return pulumi.get(self, \"vpc_configuration\")", "def check_vpc(\n vpc_id=None,\n vpc_name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n\n if not _exactly_one((vpc_name, vpc_id)):\n raise SaltInvocationError(\n \"One (but not both) of vpc_id or vpc_name must be provided.\"\n )\n if vpc_name:\n vpc_id = _get_id(\n vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile\n )\n elif not _find_vpcs(\n vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile\n ):\n log.info(\"VPC %s does not exist.\", vpc_id)\n return None\n return vpc_id", "def get_security_group_vpc_id(security_group_details: dict) -> str:\n return security_group_details['VpcId']", "def get_region():\n url = 'http://169.254.169.254/latest/dynamic/instance-identity/document'\n region = None\n try:\n resp = requests.get(url, timeout=0.5)\n resp.raise_for_status()\n region = resp.json()['region']\n except (requests.exceptions.HTTPError,\n requests.exceptions.ReadTimeout,\n KeyError):\n logger.exception('Trying to access {0} failed'.format(url))\n finally:\n return region", "def describe_vpcs(DryRun=None, VpcIds=None, Filters=None):\n pass", "def get_cluster(ctx, name, region, verbosity):\n cp = ControlPlane(name, region=region)\n ci = cp.query()\n headers = ['NAME', 'ENDPOINT', 'VPC', 'SUBNETS']\n print(tabulate([[ci.name, ci.endpoint, ci.vpc, ','.join(ci.subnets)]], headers, tablefmt='plain'))", "def vpc_settings(self) -> pulumi.Input['SimpleAdVpcSettingsArgs']:\n return pulumi.get(self, \"vpc_settings\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find the AWS ECS cluster by name
def _find_cluster(self, clusterName): client = boto3.client('ecs', region_name=self.infos.region) response = client.list_clusters() for arn in response['clusterArns']: if arn.endswith(clusterName): return arn raise ValueError(f'Cluster "{clusterName}" not found.')
[ "def get_cb_cluster_by_name(self, name):\n for cluster in self.clusters:\n if cluster.name == name:\n return cluster\n raise Exception(\"Couchbase Cluster %s does not exist\" % name)", "def elasticsearch_cluster(self, name, site):\n try:\n endpoint = elasticsearch_clusters[name][site]['endpoint']\n suffix = elasticsearch_clusters[name][site]['suffix']\n dc_name = elasticsearch_clusters[name][site]['dc_name']\n return ElasticsearchCluster(Elasticsearch(endpoint), dc_name, self.script_node(), self.cumin_config, suffix, self.icinga(), self.sudo, self.dry_run)\n except KeyError:\n raise ConfigError('No cluster named {name} exist in DC {site}'.format(name=name, site=site))", "def find_cluster(self, name_or_id, ignore_missing=True):\n return self._find(\n _cluster.Cluster, name_or_id,\n ignore_missing=ignore_missing,\n )", "def check_for_cluster():\n emr_client = boto3.client('emr')\n\n return check_for_existing_emr_cluster(\n emr_client=emr_client, cluster_id=get_config('emr')['emr_cluster_id'])", "def test_get_cluster_id_by_name(self):\n hook = EmrHook(aws_conn_id=\"aws_default\", emr_conn_id=\"emr_default\")\n\n job_flow = hook.create_job_flow(\n {\"Name\": \"test_cluster\", \"Instances\": {\"KeepJobFlowAliveWhenNoSteps\": True}}\n )\n\n job_flow_id = job_flow[\"JobFlowId\"]\n\n matching_cluster = hook.get_cluster_id_by_name(\"test_cluster\", [\"RUNNING\", \"WAITING\"])\n\n assert matching_cluster == job_flow_id\n\n no_match = hook.get_cluster_id_by_name(\"foo\", [\"RUNNING\", \"WAITING\", \"BOOTSTRAPPING\"])\n\n assert no_match is None", "def get_cluster(ctx, name, region, verbosity):\n cp = ControlPlane(name, region=region)\n ci = cp.query()\n headers = ['NAME', 'ENDPOINT', 'VPC', 'SUBNETS']\n print(tabulate([[ci.name, ci.endpoint, ci.vpc, ','.join(ci.subnets)]], headers, tablefmt='plain'))", "def cluster_name(self):\n return self.base_config.cluster_name if hasattr(self.base_config, \"cluster_name\") else None", "def test_clusters_cluster_name_get(self):\n MockAmbari = Ambari\n Ambari.get_cluster_info = Mock(return_value={'cluster_name': 'cluster_name'})\n response = self.client.open(\n '/detapi/{version}/clusters/{cluster_name}'.format(version=__version__, \n cluster_name='cluster_name_example'),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_sdc_by_name(self, name):\n for sdc in self.sdc:\n if sdc.name == name:\n return sdc\n raise KeyError(\"SDC of that name not found\")", "def aws_ecsclusters(self):\n cluster_info = get_ecs_info()\n return_msg = '**ADS AWS ECS Clusters**\\n'\n for entry in cluster_info.get('clusterArns'):\n return_msg += '> {}: {}\\n'.format(entry.split('/')[1], entry)\n return return_msg", "def get_cluster():\n return nifi.ControllerApi().get_cluster()", "def get_index(self, name, verbose=False):\n\n try:\n idx = self.name.index(name)\n except ValueError:\n if verbose == True:\n print('Cluster \\'{}\\' not found in list'.format(name))\n idx = -1\n \n return idx", "def get_cluster_template(self, template_name, tag_name=None,\n ec2_conn=None):\n try:\n kwargs = {}\n if tag_name:\n kwargs.update(dict(cluster_tag=tag_name))\n kwargs.update(self.clusters[template_name])\n if not ec2_conn:\n ec2_conn = self.get_easy_ec2()\n clust = Cluster(ec2_conn, **kwargs)\n return clust\n except KeyError:\n raise exception.ClusterTemplateDoesNotExist(template_name)", "def test_get_cluster_id_by_name_pagination(self):\n hook = EmrHook(aws_conn_id=\"aws_default\", emr_conn_id=\"emr_default\")\n\n # Create enough clusters to trigger pagination\n for index in range(51):\n hook.create_job_flow(\n {\"Name\": f\"test_cluster_{index}\", \"Instances\": {\"KeepJobFlowAliveWhenNoSteps\": True}}\n )\n\n # Fetch a cluster from the second page using the boto API\n client = boto3.client(\"emr\", region_name=\"us-east-1\")\n response_marker = client.list_clusters(ClusterStates=[\"RUNNING\", \"WAITING\", \"BOOTSTRAPPING\"])[\n \"Marker\"\n ]\n second_page_cluster = client.list_clusters(\n ClusterStates=[\"RUNNING\", \"WAITING\", \"BOOTSTRAPPING\"], Marker=response_marker\n )[\"Clusters\"][0]\n\n # Now that we have a cluster, fetch the id with the name\n second_page_cluster_id = hook.get_cluster_id_by_name(\n second_page_cluster[\"Name\"], [\"RUNNING\", \"WAITING\", \"BOOTSTRAPPING\"]\n )\n\n # Assert that the id we got from the hook is the same as the one we got\n # from the boto api\n assert second_page_cluster_id == second_page_cluster[\"Id\"]", "def get_cluster(cluster_identifier: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:\n __args__ = dict()\n __args__['clusterIdentifier'] = cluster_identifier\n __args__['tags'] = tags\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:redshift/getCluster:getCluster', __args__, opts=opts, typ=GetClusterResult).value\n\n return AwaitableGetClusterResult(\n allow_version_upgrade=pulumi.get(__ret__, 'allow_version_upgrade'),\n aqua_configuration_status=pulumi.get(__ret__, 'aqua_configuration_status'),\n arn=pulumi.get(__ret__, 'arn'),\n automated_snapshot_retention_period=pulumi.get(__ret__, 'automated_snapshot_retention_period'),\n availability_zone=pulumi.get(__ret__, 'availability_zone'),\n availability_zone_relocation_enabled=pulumi.get(__ret__, 'availability_zone_relocation_enabled'),\n bucket_name=pulumi.get(__ret__, 'bucket_name'),\n cluster_identifier=pulumi.get(__ret__, 'cluster_identifier'),\n cluster_nodes=pulumi.get(__ret__, 'cluster_nodes'),\n cluster_parameter_group_name=pulumi.get(__ret__, 'cluster_parameter_group_name'),\n cluster_public_key=pulumi.get(__ret__, 'cluster_public_key'),\n cluster_revision_number=pulumi.get(__ret__, 'cluster_revision_number'),\n cluster_security_groups=pulumi.get(__ret__, 'cluster_security_groups'),\n cluster_subnet_group_name=pulumi.get(__ret__, 'cluster_subnet_group_name'),\n cluster_type=pulumi.get(__ret__, 'cluster_type'),\n cluster_version=pulumi.get(__ret__, 'cluster_version'),\n database_name=pulumi.get(__ret__, 'database_name'),\n default_iam_role_arn=pulumi.get(__ret__, 'default_iam_role_arn'),\n elastic_ip=pulumi.get(__ret__, 'elastic_ip'),\n enable_logging=pulumi.get(__ret__, 'enable_logging'),\n encrypted=pulumi.get(__ret__, 'encrypted'),\n endpoint=pulumi.get(__ret__, 'endpoint'),\n enhanced_vpc_routing=pulumi.get(__ret__, 'enhanced_vpc_routing'),\n iam_roles=pulumi.get(__ret__, 'iam_roles'),\n id=pulumi.get(__ret__, 'id'),\n kms_key_id=pulumi.get(__ret__, 'kms_key_id'),\n log_destination_type=pulumi.get(__ret__, 'log_destination_type'),\n log_exports=pulumi.get(__ret__, 'log_exports'),\n maintenance_track_name=pulumi.get(__ret__, 'maintenance_track_name'),\n manual_snapshot_retention_period=pulumi.get(__ret__, 'manual_snapshot_retention_period'),\n master_username=pulumi.get(__ret__, 'master_username'),\n node_type=pulumi.get(__ret__, 'node_type'),\n number_of_nodes=pulumi.get(__ret__, 'number_of_nodes'),\n port=pulumi.get(__ret__, 'port'),\n preferred_maintenance_window=pulumi.get(__ret__, 'preferred_maintenance_window'),\n publicly_accessible=pulumi.get(__ret__, 'publicly_accessible'),\n s3_key_prefix=pulumi.get(__ret__, 's3_key_prefix'),\n tags=pulumi.get(__ret__, 'tags'),\n vpc_id=pulumi.get(__ret__, 'vpc_id'),\n vpc_security_group_ids=pulumi.get(__ret__, 'vpc_security_group_ids'))", "def create_ecs_cluster(aws_conn_id: str, cluster_name: str) -> None:\n hook = AwsBaseHook(\n aws_conn_id=aws_conn_id,\n client_type=\"ecs\",\n )\n hook.conn.create_cluster(\n clusterName=cluster_name,\n capacityProviders=[\n \"FARGATE_SPOT\",\n \"FARGATE\",\n ],\n defaultCapacityProviderStrategy=[\n {\n \"capacityProvider\": \"FARGATE_SPOT\",\n \"weight\": 1,\n \"base\": 0,\n },\n {\n \"capacityProvider\": \"FARGATE\",\n \"weight\": 1,\n \"base\": 0,\n },\n ],\n )", "def cli_cosmosdb_mongocluster_get(client,\r\n resource_group_name, cluster_name):\r\n\r\n return client.get(resource_group_name, cluster_name)", "def get_cluster_operator(self, name):\n cluster_operator = None\n try:\n cluster_operator = self.ocp_co.get(name=name)\n except ApiException as e:\n logger.error(\"Exception while getting cluster operator %s : %s\\n\", name, e)\n\n return cluster_operator", "def test_get_cluster_id_by_name_duplicate(self):\n hook = EmrHook(aws_conn_id=\"aws_default\", emr_conn_id=\"emr_default\")\n\n hook.create_job_flow({\"Name\": \"test_cluster\", \"Instances\": {\"KeepJobFlowAliveWhenNoSteps\": True}})\n\n hook.create_job_flow({\"Name\": \"test_cluster\", \"Instances\": {\"KeepJobFlowAliveWhenNoSteps\": True}})\n\n with pytest.raises(AirflowException):\n hook.get_cluster_id_by_name(\"test_cluster\", [\"RUNNING\", \"WAITING\", \"BOOTSTRAPPING\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find the external ip
def _find_external_ip(self): data = None try: data = json.loads(urllib.request.urlopen("https://api.ipify.org?format=json").read()) if 'ip' in data: return data['ip'] except Exception: pass return data
[ "def getPublicIp():\n try:\n data = str(urlopen('http://checkip.dyndns.com/').read())\n #data = '<html><head><title>Current IP Check</title></head><body>Current IP Address: 65.96.168.198</body></html>\\r\\n'\n externalip = re.compile(r'Address: (\\d+\\.\\d+\\.\\d+\\.\\d+)').search(data).group(1)\n\n except:\n externalip = None\n raise\n finally:\n return externalip", "def getip():\n\tsi='Address: '\n\tr=urlopen('http://checkip.dyndns.org').read()\n\ti=r.find(si)+len(si)\n\te=r.find('<',i)\n\treturn r[i:e]", "def get_ipaddr():\n return get('https://api.ipify.org').text", "def get_ip_address(self, urlorip):\n res = self.is_valid_ip(urlorip)\n if (res):\n return urlorip\n else:\n try:\n ip = socket.gethostbyname(urlorip)\n return ip\n except socket.gaierror:\n return None", "def check_local_ip(self):\n (retcode,output) = run('ip addr show dev {device}'.format(device=self.device))\n IPV4ADDR = re.compile(r'\\sinet\\s+(?P<ipv4>\\d+.\\d+.\\d+.\\d+)[/\\s]')\n MACADDR = re.compile(r'link/(?P<type>\\S+)(\\s(?P<mac>\\S+))?')\n ipaddr = IPV4ADDR.search(output)\n if ipaddr:\n self.last_ip = ipaddr.groupdict()['ipv4']\n else:\n self.last_ip = None\n macaddr = MACADDR.search(output)\n if macaddr:\n self.device_mac = macaddr.groupdict()['mac']\n self.device_type = macaddr.groupdict()['type']\n else:\n self.device_mac = None\n self.device_type = None\n return self.last_ip", "def getIpAddress():\n # type: () -> String\n return socket.gethostbyname(str(getHostName()))", "def find_linux_ip():\n # executing ifconfig built in command\n out=subprocess.check_output([\"sudo\", \"ifconfig\"])\n # finding how many ip addresses exist\n num=out.count(\"inet addr\")\n ip=[]\n for i in range(num):\n # finding position of ip addresses\n position=out.find(\"inet addr\")\n # executing string that contains nth ip address (minimum 15 digits)\n string=out[position+10:position+25]\n # using regexp def to obtain exact ip occurance\n find=regexp(string)\n # appending to ip list\n ip.append(find[0])\n # decreasing out string's length\n out=out[position+25:]\n print ip\n return ip", "def _findNameIP(self, name):\n _ipMatchRegex = re.compile( r'\\d+\\.\\d+\\.\\d+\\.\\d+' )\n\n # First, check for an IP address\n ipmatch = _ipMatchRegex.findall( name )\n if ipmatch:\n return ipmatch[ 0 ]\n # Otherwise, look up remote server\n output = self.masternode.cmd('getent ahostsv4 {}'.format(name))\n\n ips = _ipMatchRegex.findall( output )\n\n ip = ips[ 0 ] if ips else None\n return ip", "def find_with_ip():\n state_filter = \" nud \" + \" nud \".join(HOME_STATES.values()).lower()\n cmd = f\"ip neigh show {state_filter}\".split()\n neighbours = subprocess.run(cmd, shell=False, capture_output=True, text=True)\n neighbours_ip = [_.split()[0] for _ in neighbours.stdout.splitlines()]\n return neighbours_ip", "def get_ip_address():\n ip_address = subprocess.check_output(\n [\"unit-get\", \"private-address\"])\n return ip_address.decode().strip()", "def show_ip(): #TODO\n pass", "def get_ip():\n try:\n r = requests.get('https://api.ipify.org').text\n return r\n except ConnectionError:\n return 'No Connection'", "def ip(self):\n\t\tif self.rr_type() in [\"A\", \"AAAA\"]:\n\t\t\treturn self[4]\n\t\telse:\n\t\t\traise Exception(\"ldnsx does not support ip for records other than A/AAAA\")", "def get_ip(self) -> str:\n try:\n self.remote_exec(\"from network import WLAN\")\n self.remote_exec(\"wlan=WLAN()\")\n ret = self.remote_exec(\"print(wlan.ifconfig()[0])\")\n ip = ret.decode(\"utf-8\")\n return ip\n except Exception as err:\n debug(f\"Exception {err=}. Could not retrieve WLAN ip address\")\n return \"\"", "def get_network_ip():\n comm = \"\"\" docker network inspect tada-gam_default | grep \"Gateway\" \"\"\"\n a = subprocess.Popen(comm, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]\n ip = a.split(\":\")[1].replace('\"', '').strip()\n return ip", "def __internal_ip_from_app(self, app):\n try:\n nic = self.__primary_nic_from_app(app)\n return IPv4Address(nic.find('vcd:IpAddress', _NS).text)\n except (AttributeError, AddressValueError):\n return None", "def get_ip(ifname):\n # TODO: what about AFINET6 / IPv6?\n return netifaces.ifaddresses(ifname)[netifaces.AF_INET][0]['addr']", "def get_ip_publica(self):\n \n self.ip_origen = urllib.request.urlopen('http://ip.42.pl/raw').read().decode('utf-8')", "def _process_external_ip(self):\n self.infos.external_ip = self._find_external_ip()\n self._log_information(key='External IP', value=self.infos.external_ip, ljust=18)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of vowels in an English word.
def countVowels(word): # initialize count count = 0 # loop over letters in word for letter in word.lower(): # convert word to all lower-case # increment counter if letter is a vowel if letter in ('a', 'e', 'i', 'o', 'u'): count = count + 1 return count
[ "def num_vowels(word):\n return sum(char in VOWELS for char in word.lower())", "def count_vowel(s):\n count = 0\n for i in s:\n\tif i == 'a' or i == 'e' or i == 'i' or i == 'o' or i == 'u':\n\t count += 1\n print \"Number of vowels:%d\" %count", "def count_vowels(s):\r\n\r\n count_vowels = 0\r\n\r\n for char in s:\r\n if char in \"aeiouAEIOU\":\r\n count_vowels = count_vowels + 1\r\n return count_vowels", "def count_vowels(string):\n count = 0\n for character in string.lower():\n if character in \"aeiou\":\n count += 1\n return count", "def vowel_count(phrase):\n # Add a comment to practice committing from VS Code\n low_phrase = phrase.lower()\n d = {}\n for l in low_phrase:\n if l in \"aeiou\":\n if l in d:\n d[l] = d[l] + 1\n else:\n d[l] = 1\n return d", "def CountVowels(phrase):\n ALWAYS_VOWELS = \"aeiou\"\n spurious = string.punctuation + '0123456789_'\n count = 0\n for word in phrase.lower().split():\n word = word.strip(spurious)\n l_word = len(word)\n for index, char in enumerate(word):\n if char in ALWAYS_VOWELS:\n count += 1\n continue\n if char != 'y' or index == 0:\n # now, char is 'y' and not the first char\n continue\n if word[index-1] in ALWAYS_VOWELS:\n # preceded by a vowel\n continue\n if word.endswith('ying') and index == l_word - 4:\n count += 1\n continue\n # now, it is a 'y' preceded by a consonant\n if (index == l_word - 1 # at end of word\n or word[index+1] not in ALWAYS_VOWELS):\n # or followed by a consonant\n count += 1\n continue\n return count", "def test_example_5(self):\n self.assertEqual(0, countVowels.count_lowercase_vowels('pffffft'))", "def test_example_3(self):\n self.assertEqual(0, countVowels.count_lowercase_vowels('A'))", "def count_vowels(string):\n vowel_counter = Counter()\n char_list = []\n\n for char in string:\n if char in vowels:\n char_list.append(char)\n vowel_counter[char] += 1\n str_list = list(set(char_list))\n vowel_string = ''.join(str_list)\n\n duplicates = 0\n\n for value in vowel_counter.values():\n if value >= 2:\n duplicates += 1\n return (vowel_string, duplicates)", "def count_words_letters(words, letters):\n return 0", "def test_example_6(self):\n self.assertEqual(5, countVowels.count_lowercase_vowels('aeioua'))", "def vowel_indices(word):\n return [i + 1 for i, j in enumerate(word) if j.lower() in \"aeiouy\"]", "def single_letter_count(word, letter):\n# count = 0\n# for letter in word:\n# if letter[i] == letter:\n# count = count + 1\n\n# print(single_letter_count('hello', 'h'))\n\n return word.lower().count(letter.lower())", "def count_syllables(word):\n word_lower = word.lower () #make function non-case sensetive\n vowels = 'aeiouy'\n vowels_position = []\n i = 0\n while i < len (word_lower):\n if word_lower [i] in vowels:\n vowels_position += [i] #put each vowel posistion in a list\n i += 1\n syllables = len (vowels_position)\n k = 0\n while k < len (vowels_position) - 1:\n if vowels_position [k+1] - vowels_position [k] == 1: #vowels next to each other\n syllables -= 1 #no new syllable, substract one from previous total\n k += 1\n if word_lower [-1] == 'e' and word_lower [-2] not in vowels: # 'e' is last and preceded by non-vowel\n syllables -= 1 #no new syllable, substract one from previous total\n syllables_final = max (syllables, 1) #at least one syllable\n return (syllables_final)", "def letter_count(word):\r\n count = 0\r\n # Could have used string.punctation instead of string of punctuation_marks\r\n # Counting letters in each word vs stripping punctuation from file and counting word length\r\n punctuation_marks = \"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~)\"\"\"\r\n for c in word:\r\n if c not in punctuation_marks:\r\n count += 1\r\n return count", "def CountVowelsInFile(file_name):\n vowel_count = 0\n file_obj = open(file_name)\n try:\n for line in file_obj:\n vowel_count += count_vowels.CountVowels(line)\n finally:\n file_obj.close()\n return vowel_count", "def vowel_frequency_consonant_sum(s):\n s = s.lower()\n\n stats = {}\n\n for v in VOWELS:\n stats[v] = s.count(v)\n\n stats['consonants'] = len(CONS_RE.findall(s))\n\n return stats", "def num_oov_words(self, words: VocabularyTagging):\n n_oov = 0\n for s, t in self.sentences:\n n_oov += (s == words.OOV_ID).sum().item()\n return n_oov", "def letter_counter(s):\n upper_case_letters = [c for c in s if c.isupper()]\n lower_case_letters = [c for c in s if c.islower()]\n return len(upper_case_letters),len(lower_case_letters)", "def has_three_vowels(s):\n count = 0\n for v in 'aeiou':\n count += s.count(v)\n return count >= 3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function to execute the vowelcounting program. Returns None.
def main(): # program greeting print('*'*58) print('Welcome to the vowel-counting program!\n') print('This program counts the number of vowels in English words.') print('*'*58) # get word word = getWord() # count vowels count = countVowels(word) # display result print('\n"{}" contains {} vowel(s).'.format(word.capitalize(), count))
[ "def count_vowel(s):\n count = 0\n for i in s:\n\tif i == 'a' or i == 'e' or i == 'i' or i == 'o' or i == 'u':\n\t count += 1\n print \"Number of vowels:%d\" %count", "def main():\n # set up the program to take in arguments from the command line", "def test_example_5(self):\n self.assertEqual(0, countVowels.count_lowercase_vowels('pffffft'))", "def main():\n try:\n uchart(sys.argv[1:])\n except Exception as err:\n logging.error('%s', err)\n traceback.print_exc()\n sys.exit(1)", "def test_example_6(self):\n self.assertEqual(5, countVowels.count_lowercase_vowels('aeioua'))", "def main():\r\n #We check if the file ends in .vm else its an error\r\n if len(sys.argv) != 2 or sys.argv[1][-3:] != \".vm\":\r\n badusage()\r\n #We get the filename\r\n filename = str(sys.argv[1])\r\n #Create the parser\r\n parser = Parser(filename)\r\n #We create the codewriter without the .vm extention\r\n codewriter = Codewriter(filename[0:-3])\r\n #Start reading checking if it has more commands advancing and seeing the command\r\n while parser.hasMoreCommands():\r\n parser.advance()\r\n #If it is a arithmetic command we write it or a push or pop command\r\n if parser.command_type() == \"C_ARITHMETIC\":\r\n command = parser.arg1()\r\n codewriter.writeArithmetic(command)\r\n elif parser.command_type() == \"C_PUSH\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_PUSH\", m_segment, index)\r\n elif parser.command_type() == \"C_POP\":\r\n m_segment = parser.arg1()\r\n index = parser.arg2()\r\n codewriter.writePushPop(\"C_POP\", m_segment, index)\r\n #Then we close the codewriter and parser\r\n del codewriter\r\n del parser", "def print_vowels():\n\n print(\"a\")\n print(\"e\")\n print(\"E\")\n print(\"i\")\n print(\"o\")\n print(\"u\")\n print(\"y\")\n print(\"@\")\n print(\"2\")\n print(\"9\")\n print(\"a~\")\n print(\"o~\")\n print(\"U~\")", "def count_vowels(string):\n count = 0\n for character in string.lower():\n if character in \"aeiou\":\n count += 1\n return count", "def main():\r\n index(parserCmdLine())", "def main(argv):\n\n # check for arguments\n try:\n file = argv[1]\n except IndexError:\n print(\"Expected one file name argument to be passed, e.g. ./tiger-parser program.tig\")\n return 40\n\n program_contents = read_file(argv[1])\n\n # parse input program\n try:\n program = Parser(program_contents, argv[1]).parse()\n except ParseError as e:\n print(\"Parse failure: %s\" % e.to_string())\n return 42\n\n # print the program\n print(program.to_string())\n\n return 0", "def test_example_3(self):\n self.assertEqual(0, countVowels.count_lowercase_vowels('A'))", "def main():\n\tif len(sys.argv) != 3:\n\t\tprint(\"Usage: python3 solver CENTER_CHAR OTHER_CHARS\")\n\t\tsys.exit(-1)\n\t\n\tsolver = SpellingBeeSolver(sys.argv[1], sys.argv[2])\n\tprint(solver.solve())", "def search_vowel(args): #passes the arguments from the previous function into this function\n\n text = args.text #sets the \"some text\" argument from the parser as the variable 'text' to be used in the for loop\n vowel = args.vowel #sets the \"vowel\" argument from the parser as the variable 'vowel' to be used in the for loop\n vowelinword = False #this is false unless proven true by the for loop - only proven true if the letter at that index is equal to the vowel variable\n thisindex = 0 #establishes 'thisindex' as a variable that changes to the value of the index where the letter equals the vowel variable\n for i in range(len(text)): #for loop for the length of the text variable\n if text[i] == vowel: #if the letter at index position i is exactly equal to the given value, then the variable 'vowelinword' becomes true\n vowelinword = True\n thisindex = i #sets the variable 'thisindex' equal to the index at which the vowel is located to call later\n if vowelinword: #if this statement becomes true by the vowel being in the text, then it will print the following statement\n return print(f'Found \"{vowel}\" in \"{text}\" at index {thisindex}.')\n else: #if 'vowelinword' is still false by the vowel not being in the text, then it will print the follwing statement\n print(f'\"{vowel}\" is not found in \"{text}\".')", "def count_vowels(s):\r\n\r\n count_vowels = 0\r\n\r\n for char in s:\r\n if char in \"aeiouAEIOU\":\r\n count_vowels = count_vowels + 1\r\n return count_vowels", "def test_z(self):\r\n list.extend(sys.argv, ['-z', 'count_txt'])\r\n self.assertEqual(len(count.og_function()), 26*2) # Check length of z:\r", "def CountVowels(phrase):\n ALWAYS_VOWELS = \"aeiou\"\n spurious = string.punctuation + '0123456789_'\n count = 0\n for word in phrase.lower().split():\n word = word.strip(spurious)\n l_word = len(word)\n for index, char in enumerate(word):\n if char in ALWAYS_VOWELS:\n count += 1\n continue\n if char != 'y' or index == 0:\n # now, char is 'y' and not the first char\n continue\n if word[index-1] in ALWAYS_VOWELS:\n # preceded by a vowel\n continue\n if word.endswith('ying') and index == l_word - 4:\n count += 1\n continue\n # now, it is a 'y' preceded by a consonant\n if (index == l_word - 1 # at end of word\n or word[index+1] not in ALWAYS_VOWELS):\n # or followed by a consonant\n count += 1\n continue\n return count", "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data_path\",\n help=\"top directory of WHO data\", default=\"./COVID-19/\")\n args = parser.parse_args()\n\n\n run_analysis(args.data_path)", "def main():", "def vowel_count(phrase):\n # Add a comment to practice committing from VS Code\n low_phrase = phrase.lower()\n d = {}\n for l in low_phrase:\n if l in \"aeiou\":\n if l in d:\n d[l] = d[l] + 1\n else:\n d[l] = 1\n return d", "def main():\n # TODO: ask the user to input some `text`\n\n\n # TODO: count the number of occurences of each word in the text\n\n\n # TODO: sort by descending order of occurences and display the result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the severity ranking of an SO term.
def severity(so_term): try: return ENSEMBL_SO_SEVERITY_ORDER.index(so_term) except ValueError: warnings.warn('Unexpected Sequence Ontology term: %s' % so_term) # If the SO term is not in ENSEMBL_SO_SEVERITY_ORDER, its severity # ranking is assumed to be +inf (least severe) return np.inf
[ "def getSeverity(self):\n if not self.lastType:\n return RO.Constants.sevNormal\n return TypeDict[self.lastType][1]", "def translate_score(self, severity):\n\n if not self.score_stats.has_key(severity):\n self.score_stats[severity] = 1\n else:\n self.score_stats[severity] += 1\n\n # per JM and ED of FireEye, only criticality rankings are\n # critical, major, and minor\n #\n if 'crit' == severity:\n return 100\n elif 'majr' == severity:\n return 75\n elif 'minr' == severity:\n return 50\n else:\n return 25", "def calculate_vader_rank(sentence):\r\n score = analyser.polarity_scores(sentence)\r\n return score[\"compound\"]", "def rarity(self, word):\r\n if not isinstance(word, str):\r\n raise TypeError(\"Word given must be a string\")\r\n\r\n try:\r\n occurrence = self.word_frequency[word]\r\n except KeyError: # misspelling or missing word\r\n return 3\r\n\r\n if self.max/100 <= occurrence: # common\r\n return 0\r\n elif self.max/1000 <= occurrence < self.max/100: # uncommon\r\n return 1\r\n elif 0 < occurrence < self.max/1000: # rare\r\n return 2", "def occurrence(self) -> Optional[int]:\n return pulumi.get(self, \"occurrence\")", "def seniority(self):\n s = sum(map(abs,self.occ['alpha'] - self.occ['beta']))\n return s", "def rank_skill_support():\n import collections\n score_dict = collections.defaultdict(int)\n for item in classes.Necromancer.items:\n for name in classes.Necromancer.skill_names:\n if name in inspect.getsource(item):\n score_dict[name] += 1\n\n for name, freq in sorted(score_dict.items(), key=lambda x: -x[1]):\n print(name, freq)\n\n\n # Necromancer_belts = [x for x in classes.Necromancer.items if x.type == 'waist']\n # print(len(Necromancer_belts))\n # for x in Necromancer_belts:\n # print(x.__doc__)\n # print(x.text)", "def rank(self):\n return self._get_data_value(\"Paper rank\")", "def get_relative_frequent_senses(word):\n rfss = []\n if word.lang in general_mfs_statistics and word.lang in ('ita', 'ron', 'jpn'):\n if word.lemma in general_mfs_statistics[word.lang]:\n # given the input lemma, for each sense s retrieves <sense, sum(occurrences in texts, excluded text)> pairs\n sid_scores = [(s, sum(general_mfs_statistics[word.lang][word.lemma][s].values()) -\n general_mfs_statistics[word.lang][word.lemma][s].get(word.document, 0))\n for s in general_mfs_statistics[word.lang][word.lemma]]\n # first one is the sense with highest number of occurrences\n rfss = sorted(sid_scores, key=lambda x: x[1], reverse=True)\n # scores not needed\n if rfss and isinstance(rfss[0], tuple):\n rfss = [i for (i, j) in sorted(sid_scores, key=lambda x: x[1], reverse=True)]\n\n elif word.lang in general_mfs_statistics and word.lang == 'eng':\n if word.lemma + '-' + word.pos in general_mfs_statistics[word.lang]:\n rfss = general_mfs_statistics[word.lang][word.lemma + '-' + word.pos]\n\n try:\n rfss = [i.replace('-s', '-a') for i in rfss]\n except:\n import pdb; pdb.set_trace()\n return rfss", "def get_rank(role): # -> Literal[0]:\n ...", "def get_pathology_severity(self):\n return self.pathology_severity_data", "def get_sev(sev_str):\n try:\n sev = Severities[sev_str]\n except KeyError:\n # invalid log message severity, ignoring log record\n return\n return sev", "def __categorizeSentiment(self):\n score = self.__overallSentiment\n if score < -0.4:\n return \"Worst\"\n elif score < -0.3:\n return \"Very Bad\"\n elif score < -0.1:\n return \"Bad\"\n elif score < 0.1:\n return \"Neutral\"\n elif score < 0.3:\n return \"Good\"\n elif score < 0.4:\n return \"Very Good\"\n else:\n return \"Best\"", "def get_score(self):\n jira = JiraServer()\n jira_response = jira.make_api_call(self.query)\n return self.process_jira_response(jira_response)", "def precedent(cls, stats, rule=STATUS_PRECEDENCE):\n return min(\n [cls.STATUS_CATEGORY[stat] for stat in stats],\n key=lambda stat: rule.index(stat),\n )", "def get_polarity_score(self, doc):\n\n if self.algorithm == \"nltk_vader\":\n return self._sia.polarity_scores(doc)[\"compound\"]\n elif self.algorithm == \"ML-Senticon\":\n return self.spa_polarity_score(doc)", "def get_text_rank_summary(self, doc, limit_sentences=20, verbose = True):\n result = doc._.textrank.summary(limit_sentences=limit_sentences)\n res = ''\n \n for sent in result:\n res+='{} '.format(sent)\n if verbose:\n print(sent)\n return res", "def get_risk(self):\n return str(Decimal(str(self.gui.spn_risk.textFromValue(\n self.gui.spn_risk.value())))\n )", "def get_sense_pos(self, tree):\n lem = tree.label()\n #word = tree.leaves()[0]\n sense = str(lem).split('.')[2]\n pos = str(lem).split('.')[1]\n return (pos, sense)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the file if it's already open.
def closefile_ifopen(self): if (self.filep == None): return # close file and clear it self.filep.close() self.filep = None
[ "def _close_file(self):\n self._iostream.close()\n self._fileobj.close()", "def close_file():\n lib_close.close_file()", "def close_file(self):\n self.file_handler.close()", "def close (self):\r\n\r\n self.file.close ()\r\n self.file = None\r\n self.filename = None\r\n self.current_line = None", "def close(self):\n self._fileobj.close()", "def close_file(self, file):\n try:\n try:\n os.close(file)\n except:\n file.close()\n except GeneratorExit:\n print(\"Closing the file: \" + str(file) + \" was not possible\")\n except:\n print(\"Unknown error occured, while closing file \" + str(file) + \"Error: \", sys.exc_info()[0])", "def close(self):\n \n self.__fh.close()", "def close_file(self):\n self.hdf.close()", "def close(self):\n self._read_buf = None\n if self._writable_file:\n self._writable_file.close()\n self._writable_file = None", "def close(self):\n if self._rfile is not None:\n self._rfile.close()\n self._reader = None", "def close(self):\n print((\"Closing exodus file: \" + self.fileName))\n errorInt = EXODUS_LIB.ex_close(self.fileId)\n if errorInt != 0:\n raise Exception(\n \"ERROR: Closing file \" +\n self.fileName +\n \" had problems.\")", "def close(self):\n # Free memory as best we can\n del self._file\n self._file = None", "def close_files(self):\n\t\tpass", "def close(self) -> None:\n if self.fd is None:\n return\n\n os.close(self.fd)\n self.fd = None", "def closed(self) -> bool:\r\n return self._file.closed", "def close(self):\n self.compressed_file.close()", "def Close(self):\n label = self._UniqueLabel()\n self._WriteCode('(%s), @%s, 0;JMP' % (label, label))\n self.file.close()", "def __del__(self):\n self.close() # does nothing if called twice (on an already closed file), so no worries", "def closeFile():\n\n\tglobal statusFileHandler\n\tstatusFileHandler.close()\n\t# NOTE - using print() instead of printWithTime(), as file handler\n\t# closed on previous statement.\n\tprint (\"Server status file : CLOSED.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjusts the simulation configuration to the current independent variable value.
def configure_simulation(independent_variable_value, input_params, empirical_profile, original_team_size, configuration_function, simulation_configuration): return configuration_function(independent_variable_value=independent_variable_value, input_params=input_params, empirical_profile=empirical_profile, original_team_size=original_team_size, simulation_configuration=simulation_configuration)
[ "def reset_dynamic_variables(self):\n\n start_ix = 0\n end_ix = self.episode_tracker.episode_time_steps\n self.energy_simulation.cooling_demand[start_ix:end_ix] = self.energy_simulation.cooling_demand_without_control.copy()[start_ix:end_ix]\n self.energy_simulation.heating_demand[start_ix:end_ix] = self.energy_simulation.heating_demand_without_control.copy()[start_ix:end_ix]\n self.energy_simulation.indoor_dry_bulb_temperature[start_ix:end_ix] = self.energy_simulation.indoor_dry_bulb_temperature_without_control.copy()[start_ix:end_ix]", "def observation_config(self):\n self.observation.planner = GLConfig().observation\n self.observation.actor = BCConfig().observation", "def UpdateDynamicVars(self, dt, step):\n self.v_m += dt * (-(self.v_m - self.e_l)) / self.tau_m + self.noise_exc[:, step]\n self.rho += dt * (self.rho_null - self.rho) / self.tau_rho + self.noise_rho[:, step]", "def setup_sim_properties(self):\n self.sim_model.set_sim_property(self.properties_info)", "def set_initial_value(self, initial_value, time=0.0):\n\t\tif isinstance(initial_value,dict):\n\t\t\tinitial_value = self._list_from_dynvar_dict(\n\t\t\t\t\tinitial_value,\n\t\t\t\t\t\"initial value\",\n\t\t\t\t\tself.n,\n\t\t\t\t)\n\t\t\n\t\tif self.n != len(initial_value):\n\t\t\traise ValueError(\"The dimension of the initial value does not match the dimension of your differential equations.\")\n\t\t\n\t\tself.y = np.array( initial_value, copy=True, dtype=float )\n\t\tself.t = time\n\t\treturn self", "def reassignOutputVariables(self):\n outVars = self.varMap[self.outputName].reshape(-1)\n numInVars = np.sum([np.prod(self.shapeMap[inputName]) for inputName in self.inputNames])\n numOutVars = len(outVars)\n newOutVars = np.array(range(numInVars,numInVars+numOutVars))\n \n # Adjust equation variables\n for eq in self.equList:\n for i, (c,var) in enumerate(eq.addendList):\n eq.addendList[i] = (c, self.reassignVariable(var, numInVars, outVars, newOutVars))\n \n # Adjust relu list\n for i, variables in enumerate(self.reluList):\n self.reluList[i] = tuple([self.reassignVariable(var, numInVars, outVars, newOutVars) for var in variables])\n \n # Adjust max pool list\n for i, (elements, outVar) in enumerate(self.maxList):\n newOutVar = self.reassignVariable(outVar, numInVars, outVars, newOutVars)\n newElements = set()\n for var in elements:\n newElements.add(self.reassignVariable(var, numInVars, outVars, newOutVars))\n self.maxList[i] = (newElements, newOutVar)\n \n # Adjust upper/lower bounds\n newLowerBounds = dict()\n newUpperBounds = dict()\n for var in self.lowerBounds:\n newLowerBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.lowerBounds[var]\n for var in self.upperBounds:\n newUpperBounds[self.reassignVariable(var, numInVars, outVars, newOutVars)] = self.upperBounds[var]\n self.lowerBounds = newLowerBounds\n self.upperBounds = newUpperBounds\n \n # Adjust constraint variables list\n newVarsParticipatingInConstraints = set()\n for var in self.varsParticipatingInConstraints:\n newVarsParticipatingInConstraints.add(self.reassignVariable(var, numInVars, outVars, newOutVars))\n self.varsParticipatingInConstraints = newVarsParticipatingInConstraints\n \n # Assign output variables to the new array\n self.varMap[self.outputName] = newOutVars.reshape(self.shapeMap[self.outputName])\n self.outputVars = self.varMap[self.outputName]", "def _setSimulation(self, simulation):\n\n self.simulation = simulation", "def update_initial(self, illumination=0):\n if self.fit_result_values is None:\n raise ValueError(\"Fit has not yet been performed\")\n for param in self.parameters:\n if param.multi:\n param.initial = self.fit_result_values[f\"{param.name}{illumination}\"]\n else:\n param.initial = self.fit_result_values[param.name]", "def update_inverse_temperature(self):\n\n if settings.annealing_method == \"multiplication\":\n self.inverse_temperature *= self.inverse_temperature_increment\n elif settings.annealing_method == \"addition\":\n self.inverse_temperature += self.inverse_temperature_increment\n else:\n eprint(1, \"ERROR: Annealing method %s not recognized\" % settings.annealing_method)\n quit()", "def change_hp_value(configuration_space: ConfigurationSpace,\n configuration_array: np.ndarray,\n hp_name: str, hp_value: float, index: int) -> np.ndarray:\n\n configuration_array[index] = hp_value\n\n # Hyperparameters which are going to be set to inactive\n disabled = []\n\n # Activate hyperparameters if their parent node got activated\n children = configuration_space._children_of[hp_name]\n if len(children) > 0:\n to_visit = deque() # type: deque\n to_visit.extendleft(children)\n visited = set() # type: Set[str]\n activated_values = dict() # type: Dict[str, Union[int, float, str]]\n\n while len(to_visit) > 0:\n current = to_visit.pop()\n if current.name in visited:\n continue\n visited.add(current.name)\n if current.name in disabled:\n continue\n\n current_idx = configuration_space.get_idx_by_hyperparameter_name(current.name)\n current_value = configuration_array[current_idx]\n\n conditions = configuration_space._parent_conditions_of[current.name]\n\n active = True\n for condition in conditions:\n if not condition.evaluate_vector(configuration_array):\n active = False\n break\n\n if active and (current_value is None or\n not np.isfinite(current_value)):\n default = current._inverse_transform(current.default)\n configuration_array[current_idx] = default\n children_ = configuration_space._children_of[current.name]\n if len(children_) > 0:\n to_visit.extendleft(children_)\n\n # If the hyperparameter was made inactive,\n # all its children need to be deactivade as well\n if not active and (current_value is not None\n or np.isfinite(current_value)):\n configuration_array[current_idx] = np.NaN\n\n children = configuration_space._children_of[current.name]\n\n if len(children) > 0:\n to_disable = set()\n for ch in children:\n to_disable.add(ch.name)\n while len(to_disable) > 0:\n child = to_disable.pop()\n child_idx = configuration_space. \\\n get_idx_by_hyperparameter_name(child)\n disabled.append(child_idx)\n children = configuration_space._children_of[child]\n\n for ch in children:\n to_disable.add(ch.name)\n\n for idx in disabled:\n configuration_array[idx] = np.NaN\n\n return configuration_array", "def update(self):\n # TODO: Check here if there even is new data. Skip if not.\n try:\n current_sim_time = self.current_sim_time()\n except IndexError:\n Warning(\"Current sim time is empty. This should not happen with proper initialization. \"\n \"Current sim time is set to 0.\")\n current_sim_time = 0\n if current_sim_time == 0:\n self._init_internal_variables()\n else:\n self._matlab_bridge.isolate_recent_data_in_workspace(current_sim_time)\n self._update_process_data()\n self._update_setpoint_data()\n self._update_idv_data()\n self._update_cost_data()\n self._update_manipulated_variables()", "def update_target_model(self):\n self.target_network.set_weights(self.q_network.get_weights())\n # vedere se funziona invece questo\n #for t, e in zip(self.target_network.trainable_variables,\n # self.primary_network.trainable_variables): t.assign(t * (1 - TAU) + e * TAU)", "def accept(self):\n\n if self.feature_patch_grid_size_y_new != self.configuration.feature_patch_grid_size_y:\n self.configuration.feature_patch_grid_size_y = self.feature_patch_grid_size_y_new\n self.configuration_changed = True\n\n if self.feature_patch_grid_size_x_new != self.configuration.feature_patch_grid_size_x:\n self.configuration.feature_patch_grid_size_x = self.feature_patch_grid_size_x_new\n self.configuration_changed = True\n\n if self.max_features_new != self.configuration.max_features:\n self.configuration.max_features = self.max_features_new\n self.configuration_changed = True\n\n if self.good_match_fraction_new != self.configuration.good_match_fraction:\n self.configuration.good_match_fraction = self.good_match_fraction_new\n self.configuration_changed = True\n\n if self.match_weighting_new != self.configuration.match_weighting:\n self.configuration.match_weighting = self.match_weighting_new\n self.configuration_changed = True\n\n if self.pyramid_scale_new != self.configuration.pyramid_scale:\n self.configuration.pyramid_scale = self.pyramid_scale_new\n self.configuration_changed = True\n\n if self.levels_new != self.configuration.levels:\n self.configuration.levels = self.levels_new\n self.configuration_changed = True\n\n if self.winsize_new != self.configuration.winsize:\n self.configuration.winsize = self.winsize_new\n self.configuration_changed = True\n\n if self.iterations_new != self.configuration.iterations:\n self.configuration.iterations = self.iterations_new\n self.configuration_changed = True\n\n if self.poly_n_new != self.configuration.poly_n:\n self.configuration.poly_n = self.poly_n_new\n self.configuration_changed = True\n\n if self.poly_sigma_new != self.configuration.poly_sigma:\n self.configuration.poly_sigma = self.poly_sigma_new\n self.configuration_changed = True\n\n if self.use_gaussian_filter_new != self.configuration.use_gaussian_filter:\n self.configuration.use_gaussian_filter = self.use_gaussian_filter_new\n self.configuration_changed = True\n\n if self.skip_rigid_transformation_new != self.configuration.skip_rigid_transformation:\n self.configuration.skip_rigid_transformation = self.skip_rigid_transformation_new\n self.configuration_changed = True\n\n if self.skip_optical_flow_new != self.configuration.skip_optical_flow:\n self.configuration.skip_optical_flow = self.skip_optical_flow_new\n self.configuration_changed = True\n\n self.close()", "def set_fluid_props(self):\n \n self.nu = self.mu / self.rho", "def reassign_val(self):\n self.val = Term.values[self.x]", "def prepare_simulation(self, components):\n # Set the var. art. costs.\n vac_in = self.vac_in\n vac_out = self.vac_out\n\n if self.storage_level_wanted is not None and self.storage_level < self.storage_level_wanted:\n # If a wanted storage level is set and the storage level fell below\n # that wanted level, the low VAC apply.\n vac_in = self.vac_low_in\n vac_out = self.vac_low_out\n\n self.current_vac = [vac_in, vac_out]", "def reset_dynamic(self):\n self.A = np.random.normal(size=(self.dim_x, self.dim_x))\n self.B = np.random.normal(size=(self.dim_x, self.dim_u))\n logger.info(\"Dynamic resetted to: \")\n logger.info(\"A: {}\".format(self.A))\n logger.info(\"B: {}\".format(self.B))", "def upd_main_parameters(self):\n self.steps += 1\n self.k += 1\n self.mode = 'PS'\n\n self.gamma = 3 / (self.k + 2)\n self.gamma_next = 3 / (self.k + 3)\n \n L = self.defaults['L']\n M = self.defaults['M']\n D_tilde = self.defaults['D_tilde']\n T = ceil(M**2 * (self.k + 1)**3 / (D_tilde * L**2))\n self.T = int(T)\n \n self.P = 2 / ((self.T + 1) * (self.T + 2))\n self.beta = 9 * L * (1 - self.P) / (2 * (self.k + 1))", "def activate(self):\r\n for consitit_name in self: # for each neuron group:\r\n self[consitit_name].activate() # find how future net values are affected from propagations this timestep.\r\n # set net values for the next timestep.\r\n self.clock += 1 # advance net's clock.\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test orientation of PNG files
def testOrientation( self ) : img = Reader.create( "test/IECore/data/png/uvMap.512x256.8bit.png" ).read() ipe = PrimitiveEvaluator.create( img ) self.assert_( ipe.R() ) self.assert_( ipe.G() ) self.assert_( ipe.B() ) self.failIf ( ipe.A() ) result = ipe.createResult() colorMap = { V2i( 0 , 0 ) : V3f( 0, 0, 0 ), V2i( 511, 0 ) : V3f( 1, 0, 0 ), V2i( 0, 255 ) : V3f( 0, 1, 0 ), V2i( 511, 255 ) : V3f( 1, 1, 0 ), } for point, expectedColor in colorMap.items() : found = ipe.pointAtPixel( point, result ) self.assert_( found ) color = V3f( result.floatPrimVar( ipe.R() ), result.floatPrimVar( ipe.G() ), result.floatPrimVar( ipe.B() ) ) self.assert_( ( color - expectedColor).length() < 1.e-6 )
[ "def testOrientation( self ) :\n\n\t\timg = Reader.create( \"test/IECore/data/tiff/uvMap.512x256.8bit.tif\" ).read()\n\n\t\tipe = PrimitiveEvaluator.create( img )\n\t\tself.assert_( ipe.R() )\n\t\tself.assert_( ipe.G() )\n\t\tself.assert_( ipe.B() )\n\t\tself.failIf ( ipe.A() )\n\n\t\tresult = ipe.createResult()\n\n\t\tcolorMap = {\n\t\t\tV2i( 0 , 0 ) : V3f( 0, 0, 0 ),\n\t\t\tV2i( 511, 0 ) : V3f( 1, 0, 0 ),\n\t\t\tV2i( 0, 255 ) : V3f( 0, 1, 0 ),\n\t\t\tV2i( 511, 255 ) : V3f( 1, 1, 0 ),\n\t\t}\n\n\t\tfor point, expectedColor in colorMap.items() :\n\n\t\t\tfound = ipe.pointAtPixel( point, result )\n\t\t\tself.assert_( found )\n\n\t\t\tcolor = V3f(\n\t\t\t\tresult.floatPrimVar( ipe.R() ),\n\t\t\t\tresult.floatPrimVar( ipe.G() ),\n\t\t\t\tresult.floatPrimVar( ipe.B() )\n\t\t\t)\n\n\t\t\tself.assert_( ( color - expectedColor).length() < 1.e-6 )", "def find_correct_image_orientation(grid):\n image = stitch_tiles(grid)\n for test_image in all_image_orientations(image):\n count = count_monsters(test_image)\n if count > 0:\n return test_image, count\n\n return None, None", "def test_other_image_modes(self):\n with testing_utils.tempdir() as tmp:\n image_file = 'tmp.jpg'\n image_path = os.path.join(tmp, image_file)\n image_zip_path = os.path.join(tmp, 'tmp.zip')\n image = Image.new('RGB', (16, 16), color=0)\n\n with PathManager.open(image_path, 'wb') as fp:\n image.save(fp, 'JPEG')\n\n with zipfile.ZipFile(\n PathManager.open(image_zip_path, 'wb'), mode='w'\n ) as zipf:\n zipf.write(image_path, arcname=image_file)\n\n for im in ['raw', 'ascii']:\n loader = ImageLoader({\"image_mode\": im})\n loader.load(image_path)\n loader.load(f\"{image_zip_path}/{image_file}\")", "def test_image_rotate(self):\n self.image_rotate.rotate(90)\n self.assertEqual(str(self.image_rotate.size()), str((1024, 1280)))\n\n self.image_rotate.rotate(120)\n self.assertEqual(str(self.image_rotate.size()), str((1622, 1528)))", "def _exif_orientation(im):\n try:\n exif = im._getexif()\n except (AttributeError, IndexError, KeyError, IOError):\n exif = None\n if exif:\n orientation = exif.get(0x0112)\n if orientation == 2:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 3:\n im = im.rotate(180)\n elif orientation == 4:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n elif orientation == 5:\n im = im.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 6:\n im = im.rotate(-90)\n elif orientation == 7:\n im = im.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 8:\n im = im.rotate(90)\n return im", "def handle_exif_rotation(image: Image.Image) -> Image.Image:\n\n def get_key_by_value(dictionary: Mapping[int, str], value: str) -> int:\n for k, v in dictionary.items():\n if v == value:\n return k\n raise ValueError(f\"No such value {value}.\")\n\n try:\n orientation = get_key_by_value(ExifTags.TAGS, \"Orientation\")\n exif = dict(image.getexif().items())\n if exif[orientation] == 3:\n image = image.transpose(Image.ROTATE_180)\n elif exif[orientation] == 6:\n image = image.transpose(Image.ROTATE_270)\n elif exif[orientation] == 8:\n image = image.transpose(Image.ROTATE_90)\n return image\n except (AttributeError, KeyError, IndexError, ValueError):\n return image", "def exif_orientation(im):\n orientation = get_exif_orientation(im)\n if orientation == 2:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 3:\n im = im.rotate(180)\n elif orientation == 4:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n elif orientation == 5:\n im = im.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 6:\n im = im.rotate(-90)\n elif orientation == 7:\n im = im.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 8:\n im = im.rotate(90)\n return im", "def get_image_rotation(filename):\n\n file = open(filename, 'rb')\n tags = exifread.process_file(file)\n\n for tag in tags.keys():\n if tag == 'Image Orientation':\n # print(f\"{tag}, value {tags[tag]}\")\n return tags[tag]", "def get_format_image(filename):\n imagename = \"app/static/img/\" + str(filename)\n image = Picture.open(imagename)\n\n width,height=image.size \n\n if width >= height :\n return \"horizontal\"\n return \"vertical\"", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def test_rank1d_orientation(self):\n oz = Rank1D(orient='v')\n npt.assert_array_equal(oz.fit_transform(self.dataset), self.dataset)\n\n # Image similarity comparison\n oz.finalize()\n self.assert_images_similar(oz)", "def test_png(self):\n\n test_image = np.random.randint(0, 256, size=(256, 224, 3)).astype(\"uint8\")\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, \"test_image.png\")\n itk_np_view = itk.image_view_from_array(test_image, is_vector=True)\n itk.imwrite(itk_np_view, filename)\n output_name = \"test_image/test_image_trans.png\"\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"itkreader\", output_name, \".png\")\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"PILReader\", output_name, \".png\")\n self._cmp(filename, (3, 224, 256), \"itkreader\", \"nibabelreader\", output_name, \".png\")", "def test_portrait_check():\n portrait_angles = [90, 270, -90]\n landscape_angles = [0, 180, -180, 360]\n\n for angle in portrait_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_portrait_frame()\n assert not compass.is_landscape_frame()\n\n for angle in landscape_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_landscape_frame()\n assert not compass.is_portrait_frame()", "def set_orient(dst, orient, src=None):\n exiftool = 'exiftool -overwrite_original -n'\n if src:\n system('%s -TagsFromFile %s -Orientation=%d %s >/dev/null' %\n (exiftool, shell_quote(src), orient, shell_quote(dst)))\n elif get_orient(dst) != orient:\n system('jpegexiforient -%d %s >/dev/null' % (orient, shell_quote(dst)))\n if get_orient(dst) != orient:\n system('%s -n -Orientation=%d %s >/dev/null' %\n (exiftool, orient, shell_quote(dst)))\n return get_orient(dst) == orient", "def read_angle(image_path):\n metadata = Image(image_path)\n\n try:\n if metadata.has_exif:\n angle = metadata.orientation.value\n # checking possible angles for images.\n angles = {1: 0, # (top, left)\n 6: 90, # (right, top)\n 3: 180, # (bottom, right)\n 8: 270} # (left, bottom)\n return angles.get(angle, 0)\n else:\n print(f'Cannot evaluate orientation for {image_path}.')\n return None\n except ValueError: # ... is not a valid TiffByteOrder\n print(f'Cannot evaluate orientation for {image_path}.')\n return None", "def all_image_orientations(image: Image) -> Generator:\n yield image\n yield vertical_flip_image(image)\n\n for degree in (90, 180, 270):\n rotated_image: Image = rotate(image, degree)\n yield rotated_image\n\n v_flipped_image: Image = vertical_flip_image(rotated_image)\n yield v_flipped_image", "def test_fix_orientation_via_rest_api(self):\n client = app.test_client()\n client.testing = True\n img = os.path.join(os.path.dirname(__file__), 'res', 'gates_up.jpg')\n\n # Fix orientation locally and store result in memory\n img_tmp = mktemp(suffix=os.path.splitext(img)[1])\n FaceOrienter(img).fix_orientation(img_tmp)\n with open(img_tmp, 'rb') as img_tmp_fp:\n result_local = img_tmp_fp.read()\n\n # Fix orientation using the REST API\n with open(img, 'rb') as img_fp:\n response = client.post('/orient', content_type='multipart/form-data',\n data={'image': (img_fp, 'gates_up.jpg')})\n\n # Check if equal\n self.assertEqual(result_local, response.data)", "def _apply_exif_orientation(image):\n if not hasattr(image, \"getexif\"):\n return image\n\n exif = image.getexif()\n\n if exif is None:\n return image\n\n orientation = exif.get(_EXIF_ORIENT)\n\n method = {\n 2: Image.FLIP_LEFT_RIGHT,\n 3: Image.ROTATE_180,\n 4: Image.FLIP_TOP_BOTTOM,\n 5: Image.TRANSPOSE,\n 6: Image.ROTATE_270,\n 7: Image.TRANSVERSE,\n 8: Image.ROTATE_90,\n }.get(orientation)\n\n if method is not None:\n return image.transpose(method)\n return image", "def test_properties(self):\r\n file = SAMPLE_IMAGES[0]\r\n img = Image.from_file(str(file))\r\n assert img.name == file.stem\r\n\r\n assert img.h == 260\r\n assert img.w == 260\r\n assert len(img.shape) == 3\r\n assert img.ndim == 3\r\n assert img.c == 3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a FITS image file and returns a numpy array
def readFITS(fn,hdr=False): hdulist=pf.open(fn) im=hdulist[0].data hdulist.close() if hdr: return im[0,0],getFITSInfo(fn) else: return im[0,0]
[ "def open_image(infile):\n with fits.open(infile) as f:\n header = f[0].header\n data = f[0].data\n if ((data.ndim == 3 and data.shape[0] != 1) or\n (data.ndim == 4 and data.shape[1] != 1)):\n # NAXIS=3: [FREQ!=1, Y, X]\n # NAXIS=4: [STOKES, FREQ!=1, Y, X]\n raise ValueError(\"input file '{0}' has invalid dimensions: {1}\".format(\n infile, data.shape))\n print(\"Read in FITS image from: %s\" % infile)\n return (header, data)", "def read_data(files):\n images = None\n for i,fn in enumerate(files):\n hdulist = pyfits.open(fn)\n image = hdulist[0].data\n hdulist.close()\n if images is None:\n images = np.zeros((len(files), image.shape[0], image.shape[1]))\n images[i,:,:] = image\n return images", "def read_in_1d_fits(path):\n data_arr = pf.open(path)\n hdf = data_arr[0].header\n hde = data_arr[0].header\n F = data_arr[0].data\n E = data_arr[1].data\n W = (hdf['CRVAL1'] + (hdf['CRPIX1'] - 1 + np.arange(hdf['NAXIS1']))*hdf['CDELT1'])*10 \n return W, F, E, hdf, hde", "def get_image_data(filename):\n #\n # open fits file and define x and y in arcsec\n #\n from astropy.io import fits\n f = fits.open(filename)\n h = f[0].header\n if h['CUNIT1']!='deg' or h['CUNIT2']!='deg' \\\n or h['NAXIS1']!=h['NAXIS2'] or h['BUNIT']!='JY/PIXEL': \n raise NameError('Something wrong with the image, check units & shape!')\n x = (np.arange(h['NAXIS1'])-h['CRPIX1'])*h['CDELT1']*pi/180./arcsec\n y = (np.arange(h['NAXIS2'])-h['CRPIX2'])*h['CDELT2']*pi/180./arcsec\n #\n # get image data (in Jy/pix) and convert to Jy/arcsec^2\n #\n img = f[0].data.copy()/(h['CDELT1']*h['CDELT2'])*(180./pi)**2*arcsec_sq\n img_lam = c_light/h['RESTFREQ']\n #\n # close fits file\n #\n f.close()\n return x,y,img,img_lam,h", "def read_fits_data(fname):\n hdulist = fits.open(fname)\n hdulist.verify(\"silentfix+warn\")\n return hdulist[1].data", "def _to_array(path_to_image):\n return im.imread(path_to_image) # Read image as is", "def image_as_numpy(filename):\n return np.array(Image.open(filename), dtype=np.float)", "def openfits(filename):\n hdulist = fits.open(filename)\n data = hdulist[0].data\n header = hdulist[0].header\n \n return data, header", "def main():\n \n with open(\"a1422866084343.jpg\",\"rb\") as fileh:\n \n s = fileh.read()\n \n image_file = StringIO(s)\n \n #print dir(image_file)\n \n image = Image.open(image_file)\n \n array = numpy.array(image) \n \n print type( array )", "def test_ReadFITSImage(self):\n if stile.file_io.has_fits:\n # Test that it read in the contents correctly, basically\n numpy.testing.assert_array_equal(stile.ReadFITSImage('test_data/image_int.fits'),\n self.fits_int_image)\n numpy.testing.assert_array_equal(stile.ReadFITSImage('test_data/image_and_table.fits'),\n self.fits_int_image)\n numpy.testing.assert_array_equal(stile.ReadFITSImage('test_data/image_float.fits'),\n self.fits_float_image)\n self.assertRaises(IOError, stile.ReadFITSImage, 'test_data/data_table.dat')", "def readmask(name) :\n f=open(name)\n data=[]\n for line in f :\n data.append(float(line))\n return np.array(data)", "def get_im(self):\n # Clear out HDUList in case we fail\n self.HDUList = None\n if not self.CCDCamera.ImageReady:\n raise EnvironmentError('CCD Camera image is not ready')\n # For some reason, we can't get at the image array or its FITS\n # header through CCDCamera.ImageArray, but we can through\n # Document.ImageArray\n self.getDocument()\n \n # Make sure we have an array to work with\n c_im = self.Document.ImageArray\n if c_im is None:\n raise EnvironmentError('There is no image array')\n # Create a basic FITS image out of this and copy in the FITS\n # keywords we want\n\n # TRANSPOSE ALERT. Document.ImageArray returns a tuple of\n # tuples shaped just how you would want it for X, Y. Since\n # Python is written in C, this is stored in memory in in \"C\n # order,\" which is the transpose of how they were intended to\n # be written into a FITS file. Since all the FITS stuff\n # assumes that we are reading/writing FORTRAN-ordered arrays\n # bytes from/to a C language, we need to transpose our array\n # here so that the FITS stuff has the bytes in the order it\n # expects. This seems less prone to generating bugs than\n # making users remember what state of transpose they are in\n # when dealing with arrays generated here vs. data read in\n # from disk for debugging routines. This is also faster than\n # writing to disk and re-reading, since the ndarray order='F'\n # doesn't actually do any movement of data in memory, it just\n # tells numpy how to interpret the order of indices.\n c_im = np.asarray(c_im)\n adata = c_im.flatten()#order='K')# already in C order in memory\n # The [::-1] reverses the indices\n adata = np.ndarray(shape=c_im.shape[::-1],\n buffer=adata, order='F')\n \n hdu = fits.PrimaryHDU(adata)\n self.get_keys()\n for k in self.FITS_keys:\n hdu.header[k[0]] = k[1]\n self.HDUList = fits.HDUList(hdu)\n return self.HDUList", "def read_image( filename ):\n new_dict = scipy.io.loadmat( filename )\n print \"loaded\", filename, \".mat\"\n return new_dict[filename]", "def read_image(self, verbose=False, as3d=True):\n if not as3d:\n return TIFF.read_image(self, verbose)\n \n # Code is initially copy-paste from TIFF:\n width = self.GetField('ImageWidth')\n height = self.GetField('ImageLength')\n bits = self.GetField('BitsPerSample')\n sample_format = self.GetField('SampleFormat')\n compression = self.GetField('Compression')\n \n typ = self.get_numpy_type(bits, sample_format)\n \n if typ is None:\n if bits==1:\n typ = np.uint8\n itemsize = 1\n elif bits==4:\n typ = np.uint32\n itemsize = 4\n else:\n raise NotImplementedError (`bits`)\n else:\n itemsize = bits/8\n \n \n # in order to allocate the numpy array, we must count the directories:\n # code borrowed from TIFF.iter_images():\n depth = 0\n while True:\n depth += 1\n if self.LastDirectory():\n break\n self.ReadDirectory()\n self.SetDirectory(0)\n \n # we proceed assuming all directories have the same properties from above.\n layer_size = width * height * itemsize\n total_size = layer_size * depth\n arr = np.zeros((depth, height, width), typ)\n \n if compression == COMPRESSION_NONE:\n ReadStrip = self.ReadRawStrip\n else:\n ReadStrip = self.ReadEncodedStrip\n \n layer = 0\n while True:\n pos = 0\n elem = None\n for strip in range (self.NumberOfStrips()):\n if elem is None:\n elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, layer_size)\n elif elem:\n elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, min(layer_size - pos, elem))\n pos += elem\n if self.LastDirectory():\n break\n self.ReadDirectory()\n layer += 1\n self.SetDirectory(0)\n return arr", "def view_fits(infile):\n pf = pyfits.open(infile) # Read-only\n\n # Look at available extensions.\n # This is slightly different than IRAF catfits.\n pf.info()\n\n for ext in range(4):\n # Look at all the headers\n print\n print repr(pf[ext].header)\n print\n\n if ext == 0:\n continue\n\n # View all the data, except PRIMARY header\n fig = pylab.figure()\n ax = fig.add_subplot(111)\n cax = ax.imshow(pf[ext].data)\n ax.set_title('Ext {}'.format(ext))\n fig.colorbar(cax)\n\n # You can manipulate FITS data like any numpy array.\n # Python starts from 0, IRAF starts from 1.\n # Python indexing is [Y,X], IRAF is [X,Y].\n # Python index range is [inclusive:exclusive],IRAF is [inclusive:inclusive].\n print\n print 'Mean SCI at IRAF region X=10:55 Y=80]:', \\\n pf['SCI',1].data[79,9:55].mean()\n print 'ERR at IRAF coord X=50 Y=10:', pf['ERR',1].data[9,49]\n print\n\n pf.close()", "def load_measurement(file_name):\n return np.array(Image.open(file_name))", "def test_Image_FITS_IO():\n for i in range(ntypes):\n array_type = types[i]\n\n if tchar[i][0] == 'C':\n # Cannot write complex Images to fits. Check for an exception and continue.\n ref_image = galsim.Image(ref_array.astype(array_type))\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\".fits\")\n with assert_raises(ValueError):\n ref_image.write(test_file)\n continue\n\n #\n # Test input from a single external FITS image\n #\n\n # Read the reference image to from an externally-generated fits file\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\".fits\")\n # Check pyfits read for sanity\n with pyfits.open(test_file) as fits:\n test_array = fits[0].data\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_array,\n err_msg=\"PyFITS failing to read reference image.\")\n\n # Then use galsim fits.read function\n # First version: use pyfits HDUList\n with pyfits.open(test_file) as hdu:\n test_image = galsim.fits.read(hdu_list=hdu)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Failed reading from PyFITS PrimaryHDU input.\")\n\n # Second version: use file name\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed reading from filename input.\")\n\n #\n # Test full I/O on a single internally-generated FITS image\n #\n\n # Write the reference image to a fits file\n ref_image = galsim.Image(ref_array.astype(array_type))\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits\")\n ref_image.write(test_file)\n\n # Check pyfits read for sanity\n with pyfits.open(test_file) as fits:\n test_array = fits[0].data\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_array,\n err_msg=\"Image\"+tchar[i]+\" write failed.\")\n\n # Then use galsim fits.read function\n # First version: use pyfits HDUList\n with pyfits.open(test_file) as hdu:\n test_image = galsim.fits.read(hdu_list=hdu)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Failed reading from PyFITS PrimaryHDU input.\")\n\n # Second version: use file name\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed reading from filename input.\")\n\n assert_raises(ValueError, galsim.fits.read, test_file, compression='invalid')\n assert_raises(ValueError, ref_image.write, test_file, compression='invalid')\n assert_raises(OSError, galsim.fits.read, test_file, compression='rice')\n assert_raises(OSError, galsim.fits.read, 'invalid.fits')\n assert_raises(OSError, galsim.fits.read, 'config_input/catalog.fits', hdu=1)\n\n assert_raises(TypeError, galsim.fits.read)\n assert_raises(TypeError, galsim.fits.read, test_file, hdu_list=hdu)\n assert_raises(TypeError, ref_image.write)\n assert_raises(TypeError, ref_image.write, file_name=test_file, hdu_list=hdu)\n\n # If clobbert = False, then trying to overwrite will raise an OSError\n assert_raises(OSError, ref_image.write, test_file, clobber=False)\n\n #\n # Test various compression schemes\n #\n\n # These tests are a bit slow, so we only bother to run them for the first dtype\n # when doing the regular unit tests. When running python test_image.py, all of them\n # will run, so when working on the code, it is a good idea to run the tests that way.\n if i > 0 and __name__ != \"__main__\":\n continue\n\n test_file0 = test_file # Save the name of the uncompressed file.\n\n # Test full-file gzip\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\".fits.gz\")\n test_image = galsim.fits.read(test_file, compression='gzip')\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for explicit full-file gzip\")\n\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for auto full-file gzip\")\n\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.gz\")\n ref_image.write(test_file, compression='gzip')\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for explicit full-file gzip\")\n\n ref_image.write(test_file)\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for auto full-file gzip\")\n\n # With compression = None or 'none', astropy automatically figures it out anyway.\n test_image = galsim.fits.read(test_file, compression=None)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for auto full-file gzip\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='gzip')\n\n # Test full-file bzip2\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\".fits.bz2\")\n test_image = galsim.fits.read(test_file, compression='bzip2')\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for explicit full-file bzip2\")\n\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for auto full-file bzip2\")\n\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.bz2\")\n ref_image.write(test_file, compression='bzip2')\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for explicit full-file bzip2\")\n\n ref_image.write(test_file)\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for auto full-file bzip2\")\n\n # With compression = None or 'none', astropy automatically figures it out anyway.\n test_image = galsim.fits.read(test_file, compression=None)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for auto full-file gzip\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='bzip2')\n\n # Test rice\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\".fits.fz\")\n test_image = galsim.fits.read(test_file, compression='rice')\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for explicit rice\")\n\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" read failed for auto rice\")\n\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.fz\")\n ref_image.write(test_file, compression='rice')\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for explicit rice\")\n\n ref_image.write(test_file)\n test_image = galsim.fits.read(test_file)\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for auto rice\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='rice')\n assert_raises(OSError, galsim.fits.read, test_file, compression='none')\n\n # Test gzip_tile\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.gzt\")\n ref_image.write(test_file, compression='gzip_tile')\n test_image = galsim.fits.read(test_file, compression='gzip_tile')\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for gzip_tile\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='gzip_tile')\n assert_raises(OSError, galsim.fits.read, test_file, compression='none')\n\n # Test hcompress\n # Note: hcompress is a lossy algorithm, and starting with astropy 2.0.5,\n # the fidelity of the reconstruction is really quite poor, so only test with\n # rtol=0.1. I'm not sure if this is a bug in astropy, or just the nature\n # of the hcompress algorithm. But I'm ignoring it for now, since I don't\n # think too many people use hcompress anyway.\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.hc\")\n ref_image.write(test_file, compression='hcompress')\n test_image = galsim.fits.read(test_file, compression='hcompress')\n np.testing.assert_allclose(ref_array.astype(types[i]), test_image.array, rtol=0.1,\n err_msg=\"Image\"+tchar[i]+\" write failed for hcompress\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='hcompress')\n assert_raises(OSError, galsim.fits.read, test_file, compression='none')\n\n # Test plio (only valid on positive integer values)\n if tchar[i] in ['S', 'I']:\n test_file = os.path.join(datadir, \"test\"+tchar[i]+\"_internal.fits.plio\")\n ref_image.write(test_file, compression='plio')\n test_image = galsim.fits.read(test_file, compression='plio')\n np.testing.assert_array_equal(ref_array.astype(types[i]), test_image.array,\n err_msg=\"Image\"+tchar[i]+\" write failed for plio\")\n\n assert_raises(OSError, galsim.fits.read, test_file0, compression='plio')\n assert_raises(OSError, galsim.fits.read, test_file, compression='none')\n\n # Check a file with no WCS information\n nowcs_file = 'fits_files/blankimg.fits'\n im = galsim.fits.read(nowcs_file)\n assert im.wcs == galsim.PixelScale(1.0)\n\n # If desired, can get a warning about this\n with assert_warns(galsim.GalSimWarning):\n im = galsim.fits.read(nowcs_file, suppress_warning=False)\n assert im.wcs == galsim.PixelScale(1.0)", "def open_img(fpath):\n im = fabio.OXDimage.OXDimage()\n \n with open(fpath, \"r\") as fh:\n s = fh.read(10)\n if s==\"get(DATA):\":\n fh2 = io.BytesIO()\n fh.seek(82)\n fh2.write(fh.read()[:-8])\n im.read(fh2)\n else:\n im.read(fh)\n return im", "def read(cls, fname):\n return cls.from_fits(fits.open(fname)['GALAXY'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a pickle file, expected format is a NxM numpy array
def readArrayPkl(fn): fh=open(fn,'rb') im=pickle.load(fh) fh.close() return im
[ "def dem_file_read(self, file_path):\n with open(file_path, 'rb') as handle:\n dem_array_data = pickle.load(handle)\n handle.close()\n return dem_array_data", "def load_numpy_object_demo(file_name: Path)\\\n -> Union[np.ndarray, np.recarray]:\n return np.load(str(file_name), allow_pickle=True)", "def load_model(self, filename):\n [self.num_layers, self.sizes, self.weights, self.biases] = np.load(\n filename, allow_pickle=True)", "def load_npy(name):\n\twith open(name, \"rb\") as fr:\n\t\treturn np.load(fr)", "def pickle_load(file_path):\n with open(file_path, 'rb') as file_ptr:\n data = pickle.load(file_ptr)\n return data", "def load_mat_from_bin(filename, dtype, shape):\n\n f = open(filename, 'rb')\n byte_array = f.read()\n f.close()\n np_array = np.frombuffer(byte_array, dtype=dtype)\n np_array = np_array.reshape(shape)\n return np_array", "def _read_npy_file(file):\n data = np.load(file, allow_pickle=True)\n labels = []\n\n for sequences, label in data:\n labels.append(str(label))\n\n return labels", "def microarray_exploration(data_path='microarray_data.pickle'):\r\n\r\n # loading the data:\r\n with open(data_path, 'rb') as f:\r\n data = pickle.load(f)\r\n return data", "def poincare_load(filename):\n with open(filename, 'rb') as input:\n data = pickle.load(input)\n return data", "def deserialize(buff: str) -> np.ndarray:\n temp = BytesIO(buff)\n arr = np.load(temp, allow_pickle=False)\n return arr", "def loadnpz(npzfile):\n return np.load(npzfile, allow_pickle=True)", "def load_npy(self, filename):\n self.set_data(np.load(filename))", "def load_data_array(fname):\n data = np.genfromtxt(fname)\n #data = np.load(fname)\n return data", "def _read_binary_matrix(filename):\n with tf.gfile.GFile(filename, \"rb\") as f:\n s = f.read()\n magic = int(np.frombuffer(s, \"int32\", 1))\n ndim = int(np.frombuffer(s, \"int32\", 1, 4))\n eff_dim = max(3, ndim)\n raw_dims = np.frombuffer(s, \"int32\", eff_dim, 8)\n dims = []\n for i in range(0, ndim):\n dims.append(raw_dims[i])\n\n dtype_map = {507333717: \"int8\",\n 507333716: \"int32\",\n 507333713: \"float\",\n 507333715: \"double\"}\n data = np.frombuffer(s, dtype_map[magic], offset=8 + eff_dim * 4)\n data = data.reshape(tuple(dims))\n return data", "def load_ndarray(filename):\r\n if filename.endswith('.npy'):\r\n compress = False\r\n status = 'uncompressed' # Everything OK!\r\n elif filename.endswith('.7z'):\r\n compress = True\r\n status = 'compressed'\r\n else:\r\n file_npy = filename + '.npy'\r\n if file_npy in os.listdir():\r\n filename = file_npy\r\n compress = False\r\n status = 'uncompressed'\r\n else:\r\n file_7z = filename + '.7z'\r\n if file_7z in os.listdir():\r\n filename = file_7z\r\n compress = True\r\n status = 'compressed'\r\n else:\r\n raise FileNotFoundError\r\n\r\n # ---------------------------------\r\n size = os.stat(filename).st_size\r\n print('Loading {0:,} [{1}] bytes from disk... File: {2}'\r\n .format(size, status, filename))\r\n\r\n if compress:\r\n if shutil.which('7z') is None:\r\n raise FileNotFoundError('7z not found on the PATH!')\r\n subprocess.Popen('7z e ' + filename + ' -mmt', shell=True).wait()\r\n ndarray = np.load(filename[:-3] + '.npy')\r\n else:\r\n ndarray = np.load(filename)\r\n print('Succesfully loaded {0!s}-array ({1:,} bytes) from {2}!'\r\n .format(ndarray.shape, size, filename))\r\n\r\n return ndarray", "def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())", "def read_pickle(filename):\n if filename.endswith('.gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n data = cPickle.load(f)\n f.close()\n return data", "def nploadbz(fname):\n f = bz2.BZ2File(fname, \"r\")\n d = np.load(f)\n f.close()\n return d", "def load_pickle(filename):\n with open(filename, 'rb') as f:\n return pkl.load(f, encoding='latin1')", "def read_multi_dim_data(filename):\n dataset =[]\n\n ##from tutorial\n\n return dataset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the FITS header for pointing and pixel size information return [RA,DEC], pixel resolution, pixel of [RA,DEC]
def getFITSInfo(fn): hdulist=pf.open(fn) hdr=hdulist[0].header #CTYPE1: RA---[PROJ], projection SIN/TAN/ARC #CRVAL1: reference RA position in degrees #CRPIX1: location of reference pixel #CDELT1: delta RA/pixel size in degrees #CTYPE2: DEC--[PROJ], projection SIN/TAN/ARC #CRVAL2: reference DEC position in degrees #CRPIX2: location of reference pixel #CDELT2: delta DEC/pixel size in degrees ra=hdr['CRVAL1'] dra=hdr['CDELT1'] raPix=hdr['CRPIX1'] dec=hdr['CRVAL2'] ddec=hdr['CDELT2'] decPix=hdr['CRPIX2'] hdulist.close() return {'ra':ra,'dec':dec,'dra':dra,'ddec':ddec,'raPix':raPix,'decPix':decPix}
[ "def get_info_from_header(fits):\n hdr = pyfits.getheader(fits, 1)\n odate = hdr['DATE-OBS']\n obsid = hdr['OBS_ID']\n expo = hdr['EXPOSURE']\n expo = float(expo)\n atemp = re.split('-', odate)\n year = float(atemp[0])\n mon = float(atemp[1])\n fyear = year + mon/12\n det = hdr['DETNAM'].lower()\n ra_pnt = hdr['RA_PNT']\n ra_pnt = float(ra_pnt)\n dec_pnt= hdr['DEC_PNT']\n dec_pnt= float(dec_pnt)\n\n return [odate, obsid, expo, fyear, det, ra_pnt, dec_pnt]", "def _det_header(self,):\n from astropy.io import fits\n from astropy import units\n coef = \"\"\"XTENSION= 'IMAGE ' / IMAGE extension \nBITPIX = -32 / number of bits per data pixel \nNAXIS = 2 / number of data axes \nNAXIS1 = 1987 / length of data axis 1 \nNAXIS2 = 2046 / length of data axis 2 \nPCOUNT = 0 / required keyword; must = 0 \nGCOUNT = 1 / required keyword; must = 1 \nCRPIX1S = 1448.000000 \nCRPIX2S = 703.000000 \nCRVAL1S = 136.204166175583 \nCRVAL2S = -32.4930169210235 \nCDELT1S = -0.000156666785871793 \nCDELT2S = 0.000156666785871793 \nPC1_1S = 0.755670245086613 \nPC1_2S = -0.654951085758962 \nPC2_1S = 0.654952042271387 \nPC2_2S = 0.755671475100696 \nCTYPE1S = 'RA---TAN-SIP' \nCTYPE2S = 'DEC--TAN-SIP' \nCUNIT1S = 'deg ' / X coordinate units \nCUNIT2S = 'deg ' / Y coordinate units \nCRPIX1 = 996.5 \nCRPIX2 = 1021.5 \nCRVAL1 = 0. \nCRVAL2 = 0. \nCDELT1 = 0.009075 \nCDELT2 = 0.009075 \nCTYPE1 = 'DETX ' / X coordinate type \nCTYPE2 = 'DETY ' / Y coordinate type \nCUNIT1 = 'mm ' / X coordinate units \nCUNIT2 = 'mm ' / Y coordinate units \nA_ORDER = 3 \nB_ORDER = 3 \nA_1_0 = -0.00125153527908 \nA_2_0 = -1.21308092203E-05 \nA_1_1 = 3.57697489791E-06 \nA_0_2 = -4.98655501953E-06 \nA_3_0 = -2.23440999701E-10 \nA_2_1 = 2.81157465077E-10 \nA_1_2 = 1.07794901513E-09 \nA_0_3 = 1.81850672672E-09 \nB_0_1 = -0.0119355520972 \nB_2_0 = 1.29190114841E-06 \nB_1_1 = -6.22446958796E-06 \nB_0_2 = 6.50166571708E-06 \nB_3_0 = 1.5607230673E-09 \nB_2_1 = 3.10676603198E-09 \nB_1_2 = 1.83793386146E-09 \nB_0_3 = 3.0412214095E-12 \nAP_ORDER= 3 / Polynomial order, axis 1, detector to sky \nBP_ORDER= 3 / Polynomial order, axis 2, detector to sky \nAP_1_0 = 0.00125480395117 \nAP_0_1 = -1.36411236372E-07 \nAP_2_0 = 1.2138698679E-05 \nAP_1_1 = -3.57720222046E-06 \nAP_0_2 = 5.12067402118E-06 \nAP_3_0 = 5.04857662962E-10 \nAP_2_1 = -4.41525720641E-10 \nAP_1_2 = -8.91001063794E-10 \nAP_0_3 = -2.06470726234E-09 \nBP_1_0 = 4.40624953378E-07 \nBP_0_1 = 0.0121093187715 \nBP_2_0 = -1.42450854484E-06 \nBP_1_1 = 6.34534204537E-06 \nBP_0_2 = -6.67738246399E-06 \nBP_3_0 = -1.675660935E-09 \nBP_2_1 = -3.07108005097E-09 \nBP_1_2 = -2.02039013787E-09 \nBP_0_3 = 8.68667185361E-11 \n \"\"\"\n hdr = fits.Header.fromstring(coef,'\\n') \n hdr['CRVAL1S'] = self.pointing.ra.deg\n hdr['CRVAL2S'] = self.pointing.dec.deg\n hdr['CRPIX1S'], hdr['CRPIX2S'] = self.grism_boresight(order=0) # this is in IMG coordinate\n x = self.PA(self.roll.to(units.deg)).to(units.rad).value\n hdr['PC1_1S'] = np.cos(x)\n hdr['PC1_2S'] = np.sin(x)\n hdr['PC2_1S'] = -np.sin(x)\n hdr['PC2_2S'] = np.cos(x)\n return hdr", "def _parse_raster_info(self, prop=RASTER_INFO):\n\n raster_info = {}.fromkeys(COMPLEX_DEFINITIONS[prop], u'')\n\n # Ensure conversion of lists to newlines is in place\n raster_info['dimensions'] = get_default_for_complex_sub(\n prop=prop,\n subprop='dimensions',\n value=parse_property(self._xml_tree, None, self._data_map, '_ri_num_dims'),\n xpath=self._data_map['_ri_num_dims']\n )\n\n xpath_root = self._get_xroot_for(prop)\n xpath_map = self._data_structures[prop]\n\n for dimension in parse_complex_list(self._xml_tree, xpath_root, xpath_map, RASTER_DIMS):\n dimension_type = dimension['type'].lower()\n\n if dimension_type == 'vertical':\n raster_info['vertical_count'] = dimension['size']\n\n elif dimension_type == 'column':\n raster_info['column_count'] = dimension['size']\n raster_info['x_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()\n\n elif dimension_type == 'row':\n raster_info['row_count'] = dimension['size']\n raster_info['y_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()\n\n return raster_info if any(raster_info[k] for k in raster_info) else {}", "def _pngxy(data):\n ihdr = data.index(b\"IHDR\")\n # next 8 bytes are width/height\n return struct.unpack(\">ii\", data[ihdr + 4 : ihdr + 12])", "def read_in_1d_fits(path):\n data_arr = pf.open(path)\n hdf = data_arr[0].header\n hde = data_arr[0].header\n F = data_arr[0].data\n E = data_arr[1].data\n W = (hdf['CRVAL1'] + (hdf['CRPIX1'] - 1 + np.arange(hdf['NAXIS1']))*hdf['CDELT1'])*10 \n return W, F, E, hdf, hde", "def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']\n slot_ind = hdict['slot']\n eud2_ind = hdict['eud2']\n\n station = self.bit_manip(head_int[stat_ind[0]], stat_ind[1], stat_ind[2])\n link = self.bit_manip(head_int[link_ind[0]], link_ind[1], link_ind[2])\n slot = self.bit_manip(head_int[slot_ind[0]], slot_ind[1], slot_ind[2])\n frame = self.bit_manip(head_int[frame_ind[0]], frame_ind[1], frame_ind[2])\n time = self.bit_manip(head_int[t_ind[0]], t_ind[1], t_ind[2])\n count = self.bit_manip(head_int[eud2_ind[0]], eud2_ind[1], eud2_ind[2])\n\n return station, link, slot, frame, time, count", "def readFITS(fn,hdr=False):\n hdulist=pf.open(fn)\n im=hdulist[0].data\n hdulist.close()\n if hdr:\n return im[0,0],getFITSInfo(fn)\n else: return im[0,0]", "def geo_meta(dicomfile):\n\n # rows and columns\n rows = dicomfile[0x0028, 0x0010]\n rows = rows.value\n cols = dicomfile[0x0028, 0x0011]\n cols = cols.value\n matrix_size = [rows, cols]\n\n # per-frame functional group sequence\n elem = dicomfile[0x5200, 0x9230] # pydicom.dataelem.DataElement\n seq = elem.value # pydicom.sequence.Sequence\n elem3 = seq[0] # first frame\n elem4 = elem3.PixelMeasuresSequence # pydicom.sequence.Sequence\n\n for xx in elem4:\n st = xx.SliceThickness\n pixels_space = xx.PixelSpacing\n\n return matrix_size, st, pixels_space", "def openfits(filename):\n hdulist = fits.open(filename)\n data = hdulist[0].data\n header = hdulist[0].header\n \n return data, header", "def parse_header(self, header, size_unit=None, fits_id=''):\n major_key = f'{fits_id}BMAJ'\n minor_key = f'{fits_id}BMIN'\n angle_key = f'{fits_id}BPA'\n if major_key not in header:\n log.error(f\"FITS header contains no beam description \"\n f\"for type '{fits_id}'.\")\n return\n if minor_key not in header:\n minor_key = major_key\n\n if size_unit is None:\n x_fwhm = get_header_quantity(header, major_key,\n default_unit='degree')\n y_fwhm = get_header_quantity(\n header, minor_key, default_unit='degree').to(x_fwhm.unit)\n else:\n size_unit = units.Unit(size_unit)\n x_fwhm = header[major_key] * size_unit\n y_fwhm = header[minor_key] * size_unit\n\n self.position_angle = get_header_quantity(\n header, angle_key, default=0.0, default_unit='degree')\n self.set_xy_fwhm(x_fwhm, y_fwhm)", "def pixels(header: BitmapInfoHeader, data: bytes) -> List[Tuple[int, int, int]]:\n width = header[\"width\"]\n padding = (3 * width) % 4\n pix = []\n offset = 0\n for _ in range(header[\"height\"]):\n buf = data[offset : offset + (3 * width)]\n pix += list(struct.iter_unpack(\"BBB\", buf))\n offset += (3 * width) + padding\n top_to_bottom = list(\n reversed(\n [\n pix[h * header[\"width\"] : (h + 1) * header[\"width\"]]\n for h in range(header[\"height\"])\n ]\n )\n )\n return [\n (int(p[0]), int(p[1]), int(p[2])) for sublist in top_to_bottom for p in sublist\n ]", "def read_header(fits_file):\n\n head = {}\n F = pf.open(fits_file)\n H = F[0].header\n head['Ntot'] = H['N_TOT']\n head['Nmu'] = H['N_MU']\n head['Nsig'] = H['N_SIGMA']\n head['Nv'] = H['N_VOIGT']\n head['Ncoef'] = H['N_COEF']\n head['Nspa'] = H['N_SPARSE']\n head['mu'] = [H['MU1'], H['MU2']]\n head['sig'] = [H['SIGMA1'], H['SIGMA2']]\n head['z'] = F[1].data.field('redshift')\n F.close()\n return head", "def read_file_header(self, fn):\n fo = open(fn)\n\n header = []\n\n k=0\n\n while True:\n data_str = fo.read(self.frame_size)\n\n if len(data_str) == 0:\n break\n \n # Read in first 32 bytes of frame\n header = self.parse_header(data_str[:32])\n\n fin = header[2] + 16 * header[1] + 128 * np.arange(8)\n print fin\n\n if len(header) >= 1:\n header = np.concatenate(header).reshape(-1, 6)\n\n return header, data", "def get_size(self, unit='DEFAULT_UNIT'):\n if unit == 'DEFAULT_UNIT': unit = DEFAULT_UNIT\n for i, line in enumerate(self.header_lines):\n match = self._rx_page_size.match(line)\n if match:\n width = _conv_abs_coord(int(match.group(1)), 'pt', unit)\n height = _conv_abs_coord(int(match.group(2)), 'pt', unit)\n return (width, height)", "def _parse_header(fh):\n headerConverters = {\n b'StartFontMetrics': float,\n b'FontName': _to_str,\n b'FullName': _to_str,\n b'FamilyName': _to_str,\n b'Weight': _to_str,\n b'ItalicAngle': float,\n b'IsFixedPitch': _to_bool,\n b'FontBBox': _to_list_of_ints,\n b'UnderlinePosition': _to_int,\n b'UnderlineThickness': _to_int,\n b'Version': _to_str,\n b'Notice': _to_str,\n b'EncodingScheme': _to_str,\n b'CapHeight': float, # Is the second version a mistake, or\n b'Capheight': float, # do some AFM files contain 'Capheight'? -JKS\n b'XHeight': float,\n b'Ascender': float,\n b'Descender': float,\n b'StdHW': float,\n b'StdVW': float,\n b'StartCharMetrics': _to_int,\n b'CharacterSet': _to_str,\n b'Characters': _to_int,\n }\n d = {}\n while 1:\n line = bytes(fh.readline(), 'ascii')\n if not line: break\n line = line.rstrip()\n if line.startswith(b'Comment'): continue\n lst = line.split(b' ', 1 )\n key = lst[0]\n if len( lst ) == 2:\n val = lst[1]\n else:\n val = b''\n #key, val = line.split(' ', 1)\n try: d[key] = headerConverters[key](val)\n except ValueError:\n continue\n except KeyError:\n continue\n if key==b'StartCharMetrics': return d\n raise RuntimeError('Bad parse')", "def parse_header(hdr_file):\n with open(hdr_file, encoding=\"utf8\", errors='ignore') as f:\n text = f.read()\n\n try:\n lines = [e.split() for e in text.split(\"\\n\") if e != \"\"]\n headers = dict(lines)\n is_dem = True if DATUM in headers or Z_SCALE in headers \\\n or PROJECTION in headers else False\n if is_dem and DATUM not in headers:\n msg = 'No \"DATUM\" parameter in DEM header/resource file'\n raise RoipacException(msg)\n except ValueError:\n msg = \"Unable to parse content of %s. Is it a ROIPAC header file?\"\n raise RoipacException(msg % hdr_file)\n\n for k in headers.keys():\n if k in INT_HEADERS:\n headers[k] = int(headers[k])\n elif k in STR_HEADERS:\n headers[k] = str(headers[k])\n elif k in FLOAT_HEADERS:\n headers[k] = float(headers[k])\n elif k in DATE_HEADERS:\n headers[k] = parse_date(headers[k])\n else: # pragma: no cover\n pass # ignore other headers\n\n # grab a subset for GeoTIFF conversion\n subset = {ifc.PYRATE_NCOLS: headers[WIDTH],\n ifc.PYRATE_NROWS: headers[FILE_LENGTH],\n ifc.PYRATE_LAT: headers[Y_FIRST],\n ifc.PYRATE_LONG: headers[X_FIRST],\n ifc.PYRATE_X_STEP: headers[X_STEP],\n ifc.PYRATE_Y_STEP: headers[Y_STEP]}\n\n if is_dem:\n subset[ifc.PYRATE_DATUM] = headers[DATUM]\n else:\n subset[ifc.PYRATE_WAVELENGTH_METRES] = headers[WAVELENGTH]\n\n # grab first/second dates from header, or the filename\n has_dates = True if DATE in headers and DATE12 in headers else False\n dates = headers[DATE12] if has_dates else _parse_dates_from(hdr_file)\n subset[ifc.FIRST_DATE], subset[ifc.SECOND_DATE] = dates\n\n # replace time span as ROIPAC is ~4 hours different to (second minus first)\n timespan = (subset[ifc.SECOND_DATE] - subset[ifc.FIRST_DATE]).days / ifc.DAYS_PER_YEAR\n subset[ifc.PYRATE_TIME_SPAN] = timespan\n\n # Add data units of interferogram\n subset[ifc.DATA_UNITS] = RADIANS\n\n # Add InSAR processor flag\n subset[ifc.PYRATE_INSAR_PROCESSOR] = ROIPAC\n\n # add custom X|Y_LAST for convenience\n subset[X_LAST] = headers[X_FIRST] + (headers[X_STEP] * (headers[WIDTH]))\n subset[Y_LAST] = headers[Y_FIRST] + (headers[Y_STEP] * (headers[FILE_LENGTH]))\n\n return subset", "def import_ascii(file_path=None):\n\n if file_path is None:\n file_path = askopenfilename(title='Select AFM image ASCII file', filetypes=((\"ASCII files\", \"*.asc\"),))\n file_name = file_path.split('/')[-1]\n f = open(file_path, 'r')\n\n # Read each line, discriminate between header line and height value line by checking if the\n # content of the first entry of the line is a digit or not\n img = []\n for line in f:\n try:\n first_entry = line.strip().split()[0][-5:]\n meas_par = line.split()[1]\n\n if first_entry.isdigit() or first_entry[-5:-3] == 'e-' or first_entry[-4:-2] == 'e-':\n line = line.strip()\n floats = [float(x) for x in line.split()]\n img.append(np.asarray(floats))\n\n # Find the required measurement information\n elif meas_par == 'x-pixels':\n x_pixels = float(line.split()[-1])\n\n # Find the required measurement information\n elif meas_par == 'y-pixels':\n y_pixels = float(line.split()[-1])\n\n elif meas_par == 'x-length':\n x_length = float(line.split()[-1])\n\n except IndexError:\n pass\n\n if 'x_pixels' not in locals():\n x_pixels = 'unknown'\n print('The amount of x-pixels was not found in the header')\n\n if 'y_pixels' not in locals():\n y_pixels = 'unknown'\n print('The amount of y-pixels was not found in the header')\n\n if 'x_length' not in locals():\n x_length = 'unknown'\n print('The size of the image was not found in the header')\n\n img = np.asarray(img)\n img_meta_data = {'file_name': file_name,\n 'file_path': file_path,\n 'x_pixels': x_pixels,\n 'x_length': x_length,\n 'y_pixels': y_pixels,\n 'pixel_size': x_length/x_pixels}\n\n return np.asarray(img), img_meta_data", "def getdata(self):\n #log.info('Reading extension {} of image {}'.format(self.ccdnum,self.imfile))\n image = galsim.fits.read(self.imfile,hdu=self.cpimage_hdu) # [ADU]\n invvar = galsim.fits.read(self.ivarfile,hdu=self.cpimage_hdu) # [1/ADU^2]\n \n imhdr = galsim.fits.FitsHeader(self.imfile,hdu=self.cpimage_hdu)\n ivarhdr = galsim.fits.FitsHeader(self.ivarfile,hdu=self.cpimage_hdu)\n \n self.width = image.xmax\n self.height = image.ymax\n \n return image, invvar, imhdr, ivarhdr", "def LSIReader(filename): \n\n metadata = {}\n \n dict_args_int32 = ['pixels_per_column','pixels_per_row','channels',\n 'numeric_type_indicator','apodization_type','remap_type',\n 'image_plane_indicator']\n \n dict_args_float32 = ['rf_center_frequency','rf_bandwidth','dwell_angle',\n 'cone_angle','graze_angle','twist_angle','column_sample_spacing',\n 'row_sample_spacing','column_oversampling_factor',\n 'row_oversampling_factor','column_resolution','row_resolution']\n\n file = open(filename, \"rb\")\n file.seek(0, 2)\n file_size = file.tell()\n file.seek(0, 0)\n num = file.read(200)\n text = file.read(200)\n data = file.read(file_size - file.tell())\n file.close()\n \n for i, arg in enumerate(dict_args_int32):\n metadata[arg] = np.int32(struct.unpack('<i', num[4*i:4*i+4]))\n\n N = len(dict_args_int32) * 4\n for i, arg in enumerate(dict_args_float32):\n metadata[arg] = np.float32(struct.unpack('<f', num[N+4*i:4*i+4+N]))\n \n metadata['text_header'] = str(text, 'utf-8')\n \n \n if metadata['numeric_type_indicator'][0] == 1:\n data = np.frombuffer(data, np.float32)\n elif metadata['numeric_type_indicator'][0] == 2:\n data = np.frombuffer(data, np.complex64)\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err) \n \n data = data.reshape(metadata['pixels_per_row'][0], \n metadata['pixels_per_column'][0],\n metadata['channels'][0])\n \n return data, metadata", "def info(img):\n if hasattr(aq_base(img), 'meta_type') and img.meta_type == 'Image':\n ct, w, h = img.content_type, img.width, img.height\n # Zope Image object can be buggy (tiff)\n if isinstance(w, int) and isinstance(h, int) and ct.startswith('image/'):\n return ct, w, h\n\n if isinstance(img, File):\n img = ofsFileHandler(img)\n\n # now we are working either on python file-like object or on str\n if hasattr(img, 'seek'):\n img_header = img.read(30)\n img.seek(0)\n elif isinstance(img, str):\n img_header = img[:30]\n img = StringIO(img)\n else:\n raise ValueError(\"%r\" % img)\n\n format, width, height = zopeGetImageInfo(img_header)\n if (width < 0 or height < 0) and PIL_OK:\n try:\n img = PIL.Image.open(img)\n width, height = img.size\n if not format:\n format = PIL.Image.MIME.get(img.format,\n 'application/octet-stream')\n except IOError, e: # TODO a lot\n format = 'application/octet-stream'\n width, height = -1, -1\n\n if width < 0:\n width = None\n if height < 0:\n height = None\n\n return format, width, height" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write hermite coeffs and meta data to a pickle file
def writeHermiteCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='hermite',info=''): d={ 'coeffs':coeffs, 'mode':mode, 'xc':xc, 'size':size, 'beta':beta, 'norder':norder, 'ra':pos[0], 'dec':pos[1], 'dra':pos[2], 'ddec':pos[2], 'info': info } fh=open(fn,'wb') pickle.dump(d,fh) fh.close()
[ "def writeHessian(self):\n\t\tself.makeHessian()\n\t\tnp.savetxt(\"hessian.dat\",self.H,\"%15.7f\",\" \",\"\\n\")", "def writeLageurreCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='laguerre',info=''):\n d={ 'coeffs':coeffs,\n 'mode':mode,\n 'xc':xc,\n 'size':size,\n 'beta':beta,\n 'norder':norder,\n 'ra':pos[0],\n 'dec':pos[1],\n 'dra':pos[2],\n 'ddec':pos[2],\n 'info': info }\n fh=open(fn,'wb')\n pickle.dump(d,fh)\n fh.close()", "def save_model(self):\n np.savetxt(\"weighth.csv\", self.wh, delimiter=\",\")\n np.savetxt(\"weighto.csv\", self.wo, delimiter=\",\")", "def save(self):\n weights_filepath = '{}_weights.buf'.format(os.path.splitext(self.weights_hdf5_filepath)[0])\n with open(weights_filepath, mode='wb') as f:\n f.write(self.weights)\n metadata_filepath = '{}_metadata.json'.format(os.path.splitext(self.weights_hdf5_filepath)[0])\n with open(metadata_filepath, mode='w') as f:\n json.dump(self.metadata, f)", "def pickleModel(self):\n print 'Saving model to file...'\n logit = LogisticRegression(C=self.C, penalty='l1')\n logit.fit(self.X_mapped,self.y)\n \n with open('model','w') as myFile:\n pickle.dump({'logit':logit,'degree':self.degree,'useInverse':self.useInverse,'mean':self.mean,'stdev':self.stdev,'n':self.n,'m':self.m},myFile)", "def write_to_file(self, filename):\n # check to see if the map exists from instantiation\n if hasattr(self, 'map'):\n sunpy_meta = self.map.meta\n\n psihdf.wrh5_meta(filename, self.x, self.y, np.array([]),\n self.data, chd_meta=self.info, sunpy_meta=sunpy_meta)", "def save(self, weights):\n numpy.save(self.file, weights)\n with open(self.file, \"rb\") as source:\n with lzma.open(self.compressedFile, \"w\") as compressor:\n compressor.write(source.read())\n remove(self.file)", "def dump(self):\n import pickle as pkl\n \n filename = self.create_output_name(step=self.time_step_count)\n filename = filename.replace('.bin','.pkl')\n file = open(filename, \"wb\")\n pkl.dump(self, file)\n file.close()", "def dump_model_to_file(prompt_string, feature_ext, classifier, text, score, model_path):\n model_file = {'prompt': prompt_string, 'extractor': feature_ext, 'model': classifier, 'text' : text, 'score' : score}\n pickle.dump(model_file, file=open(model_path, \"w\"))", "def save(self, filename):\n if (filename[-5:] != '.hmat'):\n filename += '.hmat'\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('matrix', data=self.matrix, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('idx', data=self.idx, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('applyedMethods', data=cPickle.dumps(self._applyedMethods))\n if hasattr(self,\"genome\") and hasattr(self,\"resolution\"):\n h5f.create_dataset('genome',data = cPickle.dumps(self.genome))\n h5f.create_dataset('resolution',data = cPickle.dumps(self.resolution))\n else:\n warnings.warn(\"No genome and resolution is specified, attributes are recommended for matrix.\")\n \n h5f.close()", "def _write_linear(name_pfx, state_dict, path):\n with open(path, 'wb') as fout:\n weight = state_dict['{}.weight'.format(name_pfx)]\n for row in weight:\n array('f', row).tofile(fout) # output * input * 4\n bias_name = '{}.bias'.format(name_pfx)\n if bias_name in state_dict:\n bias = state_dict[bias_name]\n array('f', bias).tofile(fout) # output * 4", "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "def save(X, Y, prices, filename):\n with pd.HDFStore(filename, 'w') as store:\n X.to_hdf(store, 'X')\n Y.to_hdf(store, 'Y')\n prices.to_hdf(store, 'prices')", "def guardar(self):\n pickle_out = open(\"X.pickle\", \"wb\")\n pickle.dump(self.features, pickle_out)\n pickle_out.close()\n\n pickle_out = open(\"Y.pickle\", \"wb\")\n pickle.dump(self.labels, pickle_out)\n pickle_out.close()", "def save_model_params(args):\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'hidden_dim': args.hidden_dim,\n 'output_dim': args.output_dim\n }\n torch.save(model_info, f)", "def poincare_save(data, filename):\n with open(filename, 'wb') as output:\n pickle.dump(data, output)", "def dump_the_dicts(\r\n dict_bho_mini_t1_post,\r\n dict_bho_mini_t2_post,\r\n dict_bho_mini_t3_post,\r\n dict_parameters_t1,\r\n dict_parameters_t2,\r\n dict_parameters_t3,\r\n dict_parameters_t4,\r\n dict_bho_solver,\r\n pathout='./',\r\n ):\r\n\r\n #dicts of parameters {cotrecho:{params},...}\r\n with open(pathout+'dict_parameters_t1.pickle','wb') as f:\r\n pickle.dump(dict_parameters_t1,f)\r\n\r\n with open(pathout+'dict_parameters_t2.pickle','wb') as f:\r\n pickle.dump(dict_parameters_t2,f)\r\n\r\n with open(pathout+'dict_parameters_t3.pickle','wb') as f:\r\n pickle.dump(dict_parameters_t3,f)\r\n\r\n with open(pathout+'dict_parameters_t4.pickle','wb') as f:\r\n pickle.dump(dict_parameters_t4,f)\r\n\r\n\r\n #dicts of association {cotrecho:{mini},...}\r\n with open(pathout+'dict_bho_mini_t1_post.pickle','wb') as f:\r\n pickle.dump(dict_bho_mini_t1_post,f)\r\n\r\n with open(pathout+'dict_bho_mini_t2_post.pickle','wb') as f:\r\n pickle.dump(dict_bho_mini_t2_post,f) #contains t1 outlets\r\n\r\n with open(pathout+'dict_bho_mini_t3_post.pickle','wb') as f:\r\n pickle.dump(dict_bho_mini_t3_post,f)\r\n\r\n # type 4 does not have an association with mini\r\n #with open('dict_bho_mini_t4_post.pickle','wb') as f:\r\n # pickle.dump(dict_bho_mini_t4_post,f)\r\n\r\n # solver!\r\n with open(pathout+'dict_bho_solver.pickle','wb') as f:\r\n pickle.dump(dict_bho_solver,f)\r\n\r\n print(\" - the dicts were successfully saved!\")\r\n\r\n\r\n return None", "def save_layer_weights(data, list_keys, dims, footer_string, file_name):\n\n data_type = data[list_keys[0]].dtype\n #default precision is FP32\n # The values should be compartible with DataType from Nvinfer.h\n data_prec = 1 if data_type == np.dtype('float16') else 0\n\n meta_data = np.int32([data_prec] + dims)\n meta_count = np.int32(meta_data.shape[0])\n\n out_file = open(file_name, 'wb')\n for key in list_keys:\n out_file.write(data[key].tobytes())\n out_file.write(meta_data.tobytes())\n # write footer\n out_file.write(meta_count.tobytes() + bytearray(footer_string, 'ASCII'))", "def save_to_file(the_experiment, filename):\n #Pickle dumps\n datas = dumps(the_experiment)\n f = open(filename, 'w')\n f.write(datas)\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write Lageurre coeffs and meta data to a pickle file
def writeLageurreCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='laguerre',info=''): d={ 'coeffs':coeffs, 'mode':mode, 'xc':xc, 'size':size, 'beta':beta, 'norder':norder, 'ra':pos[0], 'dec':pos[1], 'dra':pos[2], 'ddec':pos[2], 'info': info } fh=open(fn,'wb') pickle.dump(d,fh) fh.close()
[ "def dump(self):\n import pickle as pkl\n \n filename = self.create_output_name(step=self.time_step_count)\n filename = filename.replace('.bin','.pkl')\n file = open(filename, \"wb\")\n pkl.dump(self, file)\n file.close()", "def pickleModel(self):\n print 'Saving model to file...'\n logit = LogisticRegression(C=self.C, penalty='l1')\n logit.fit(self.X_mapped,self.y)\n \n with open('model','w') as myFile:\n pickle.dump({'logit':logit,'degree':self.degree,'useInverse':self.useInverse,'mean':self.mean,'stdev':self.stdev,'n':self.n,'m':self.m},myFile)", "def saveData(fname, grating, params, lines, meta):\r\n pickle.dump((grating, params, lines, meta), open(fname, \"wb\"))", "def poincare_save(data, filename):\n with open(filename, 'wb') as output:\n pickle.dump(data, output)", "def writeHermiteCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='hermite',info=''):\n d={ 'coeffs':coeffs,\n 'mode':mode,\n 'xc':xc,\n 'size':size,\n 'beta':beta,\n 'norder':norder,\n 'ra':pos[0],\n 'dec':pos[1],\n 'dra':pos[2],\n 'ddec':pos[2],\n 'info': info }\n fh=open(fn,'wb')\n pickle.dump(d,fh)\n fh.close()", "def saveAsLM(self, path):\n if not path.endswith(\".lm\"):\n path += \".lm\"\n f = open(path, 'w', encoding=self.enc)\n f_lab = open(path+\".lab\", 'w', encoding=self.enc)\n f.write(\"#SpeechMark Landmark File\\n\")\n f.write(\"#SMPRODUCT: TGProcess.py\\n\")\n f.write(\"#SMVERSION: 1\\n\")\n f.write(\"#LMVERSION: 2013-03-26\\n\")\n f.write(\"#WAVEFORM NAME: \"+self.waveformName+\"\\n\")\n f.write(\"#WAVEFORM CHECKSUM: \"+self.waveformChecksum+\"\\n\")\n f.write(\"#FILE CREATED:\"+strftime(\"%m/%d/%Y %H:%M:%S\")+\"\\n\")\n f.write(\"#--------------------------------------------------------------\\n\")\n f.write(\"#\\n\")\n #condense tiers into single list\n items = [(item.mark.replace(\" \",\"_\"), \"%.3f\" % float(item.time)) for tier in self.tiers for item in tier if type(item)==Point]\n items.sort(key=lambda item: item[1])\n last_time = \"0\"\n #write items to both files\n for item in items:\n f.write(item[1]+\" \"+item[0]+\"\\n\")\n f_lab.write(last_time + \" \" + item[1] + \" \" + item[0]+\"\\n\")\n last_time = item[1]", "def guardar(self):\n pickle_out = open(\"X.pickle\", \"wb\")\n pickle.dump(self.features, pickle_out)\n pickle_out.close()\n\n pickle_out = open(\"Y.pickle\", \"wb\")\n pickle.dump(self.labels, pickle_out)\n pickle_out.close()", "def pickle(data, path):\n with open(path, \"wb\") as file_handler:\n pl.dump(data, file_handler)", "def write_pickle(data, filename, protocol=cPickle.HIGHEST_PROTOCOL):\n if filename.endswith('.gz'):\n f = gzip.open(filename, 'wb')\n else:\n f = open(filename, 'wb')\n cPickle.dump(data, f, protocol)\n f.close()", "def write_to_pickle(self, lat_dir):\n filename = os.path.join(lat_dir, 'gspace_%dcons.p' % self.set_n)\n with open(filename, 'wb') as f:\n cPickle.dump(self._lattice, f)", "def save_as_pickle(variable_name, save_name):\n f = open(save_name + '.pckl', 'wb')\n pickle.dump(variable_name, f)\n f.close()", "def save_lattice(lattice, filename):\n np.save(filename, lattice)\n print (\"SOM lattice saved at %s\" %filename)", "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "def _write_linear(name_pfx, state_dict, path):\n with open(path, 'wb') as fout:\n weight = state_dict['{}.weight'.format(name_pfx)]\n for row in weight:\n array('f', row).tofile(fout) # output * input * 4\n bias_name = '{}.bias'.format(name_pfx)\n if bias_name in state_dict:\n bias = state_dict[bias_name]\n array('f', bias).tofile(fout) # output * 4", "def saveLabbooktoFile(self, filename):\n \n fd=open(filename,'w')\n import pickle\n pickle.dump(self.meta.labbook,fd)\n fd.close() \n return", "def createPickle(pickleFile, file):\r\n os.chdir(r'D:\\PEFilesIamges\\DataSet')\r\n with open(pickleFile, 'wb') as fileObject:\r\n pkl.dump(file, fileObject)\r\n fileObject.close()", "def storeMoments(filename, data):\n\tfileObject = open(filename, 'wb')\n\tpickle.dump(data, fileObject)\n\tprint ('Data successfully written on disk')", "def export_into_python(self):\n pkl_path = self.model.name + '.pkl'\n with open(pkl_path, 'wb') as fh:\n pickle.dump(self, fh, protocol=2)\n py_str = \"\"\"\n import pickle\n with open('%s', 'rb') as fh:\n model_class = pickle.load(fh)\n \"\"\" % os.path.abspath(pkl_path)\n py_str = textwrap.dedent(py_str)\n py_path = self.model.name + '.py'\n with open(py_path, 'w') as fh:\n fh.write(py_str)", "def save_restart_data(self, filename='restart.pkl'):\r\n t = np.copy(self.physics.engine.t)\r\n X = np.copy(self.physics.engine.X)\r\n arr_n = np.copy(self.physics.engine.op_vals_arr_n)\r\n data = [t, X, arr_n]\r\n with open(filename, \"wb\") as fp:\r\n pickle.dump(data, fp, 4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
At present readLageurreCoeffs and readHermiteCoeffs do the same operations
def readCoeffs(fn): return readHermiteCoeffs(fn)
[ "def _read_coefficients(self):\n coeff = self._read_register(_BME280_REGISTER_DIG_T1, end=24)\n coeff = list(unpack('<HhhHhhhhhhhh', bytearray(coeff)))\n coeff = [float(i) for i in coeff]\n self._temp_calib = coeff[:3]\n self._pressure_calib = coeff[3:]\n\n self._humidity_calib = [0]*6\n self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)\n coeff = self._read_register(_BME280_REGISTER_DIG_H2, end=7)\n coeff = list(unpack('<hBbBbb', bytearray(coeff)))\n self._humidity_calib[1] = float(coeff[0])\n self._humidity_calib[2] = float(coeff[1])\n self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))\n self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))\n self._humidity_calib[5] = float(coeff[5])", "def readCoeffPkl(fn):\n fh = open(fn,'rb')\n coeffDict = pkl.load(fh)\n fh.close()\n return coeffDict", "def LSIReader(filename): \n\n metadata = {}\n \n dict_args_int32 = ['pixels_per_column','pixels_per_row','channels',\n 'numeric_type_indicator','apodization_type','remap_type',\n 'image_plane_indicator']\n \n dict_args_float32 = ['rf_center_frequency','rf_bandwidth','dwell_angle',\n 'cone_angle','graze_angle','twist_angle','column_sample_spacing',\n 'row_sample_spacing','column_oversampling_factor',\n 'row_oversampling_factor','column_resolution','row_resolution']\n\n file = open(filename, \"rb\")\n file.seek(0, 2)\n file_size = file.tell()\n file.seek(0, 0)\n num = file.read(200)\n text = file.read(200)\n data = file.read(file_size - file.tell())\n file.close()\n \n for i, arg in enumerate(dict_args_int32):\n metadata[arg] = np.int32(struct.unpack('<i', num[4*i:4*i+4]))\n\n N = len(dict_args_int32) * 4\n for i, arg in enumerate(dict_args_float32):\n metadata[arg] = np.float32(struct.unpack('<f', num[N+4*i:4*i+4+N]))\n \n metadata['text_header'] = str(text, 'utf-8')\n \n \n if metadata['numeric_type_indicator'][0] == 1:\n data = np.frombuffer(data, np.float32)\n elif metadata['numeric_type_indicator'][0] == 2:\n data = np.frombuffer(data, np.complex64)\n else:\n err = 'Invalid \"numeric_type_indicator\". Valid range is 1 or 2'\n ValueError(err) \n \n data = data.reshape(metadata['pixels_per_row'][0], \n metadata['pixels_per_column'][0],\n metadata['channels'][0])\n \n return data, metadata", "def loadCoefficients(path):\n # FILE_STORAGE_READ\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)\n\n # note we also have to specify the type to retrieve other wise we only get a\n # FileNode object back instead of a matrix\n camera_matrix = cv_file.getNode(\"camera_matrix_plex\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff_plex\").mat()\n\n # Debug: print the values\n # print(\"camera_matrix : \", camera_matrix.tolist())\n # print(\"dist_matrix : \", dist_matrix.tolist())\n\n cv_file.release()\n return [camera_matrix, dist_matrix]", "def readEGM96Coefficients():\n \" Read the coefficients. \"\n degrees = []; orders = []; CcoeffsTemp = []; ScoeffsTemp = [];\n with open(\"EGM96coefficients\", \"r\") as egm96file:\n reader = csv.reader(egm96file, delimiter=\" \")\n for row in reader:\n degrees.append( row[1] ) # There will be some \" \" in row, the delimiter isn't always \" \", sometimes it's \" \"...\n orders.append( row[2] )\n CcoeffsTemp.append( row[3] )\n ScoeffsTemp.append( row[4] )\n \n # Change to numbers from str.\n degrees = [int(x) for x in degrees]\n orders = [int(x) for x in orders]\n CcoeffsTemp = [float(x) for x in CcoeffsTemp]\n ScoeffsTemp = [float(x) for x in ScoeffsTemp]\n \n \" Parse C and S coefficients to an easily usable format. \"\n # Store a list of coefficients corresponding to the given degree of len( no. orders corresponding to this degree ).\n Ccoeffs = {0:[1],1:[0,0]}; Scoeffs ={0:[0],1:[0,0]}; # Initial coefficients for spherical Earth. C_10, C_11, and S_11 are 0 if the origin is at the geocentre.\n for i in range(len(degrees)): # Initialise emoty lists.\n Ccoeffs[degrees[i]] = []\n Scoeffs[degrees[i]] = []\n \n for i in range(len(degrees)): # Store the coefficients.\n Ccoeffs[degrees[i]].append( CcoeffsTemp[i] )\n Scoeffs[degrees[i]].append( ScoeffsTemp[i] )\n \n return Ccoeffs, Scoeffs", "def cireclvlRead(ions, filename=None, filetype='cilvl'):\n if filename:\n fname = filename\n else:\n fname = util.ion2filename(ions)\n\n paramname = fname + '.' + filetype\n\n #print('paramname %s'%(paramname))\n\n input = open(paramname,'r')\n lines = input.readlines()\n input.close()\n\n iline = 0\n idx = -1\n while idx < 0:\n aline = lines[iline][0:5]\n idx = aline.find('-1')\n iline += 1\n ndata = iline - 1\n ntrans = ndata//2\n #\n #\n # need to find the maximum number of temperatures, not all lines are the same\n #\n ntemp = np.zeros(ntrans, 'int32')\n iline = 0\n for jline in range(0, ndata, 2):\n dummy = lines[jline].replace(os.linesep, '').split()\n ntemp[iline] = len(dummy[4:])\n iline += 1\n maxNtemp = ntemp.max()\n# print ' maxNtemp = ', maxNtemp\n temp = np.zeros((ntrans,maxNtemp), 'float64')\n iline = 0\n for jline in range(0, ndata, 2):\n recdat = lines[jline].replace(os.linesep, '').split()\n shortT = np.asarray(recdat[4:], 'float64')\n # the result of the next statement is to continue to replicate t\n t = np.resize(shortT, maxNtemp)\n if filetype == 'rrlvl':\n temp[iline] = t\n else:\n temp[iline] = 10.**t\n iline += 1\n #\n lvl1 = np.zeros(ntrans, 'int64')\n lvl2 = np.zeros(ntrans, 'int64')\n ci = np.zeros((ntrans, maxNtemp), 'float64')\n #\n idat = 0\n for jline in range(1, ndata, 2):\n cidat = lines[jline].replace(os.linesep, '').split()\n shortCi = np.asarray(cidat[4:], 'float64')\n lvl1[idat] = int(cidat[2])\n lvl2[idat] = int(cidat[3])\n ci[idat] = np.resize(shortCi, maxNtemp)\n idat += 1\n return {'temperature':temp, 'ntemp':ntemp,'lvl1':lvl1, 'lvl2':lvl2, 'rate':ci,'ref':lines[ndata+1:], 'ionS':ions}", "def read_params():\n import numpy as np\n import os.path\n\n for file_name in 'mfa_input', 'system_input':\n if os.path.isfile(file_name) == False:\n print \"Fatal Error: File \"+ file_name +\" is missing\"\n print \"Finishing script here\"\n exit()\n\n j=0\n with open('mfa_input') as f:\n for line in f:\n j += 1\n if(j==3):\n n_steps = float(line.split()[0])\n if(j==4):\n n_obs = float(line.split()[0]) \n if(j==5):\n dt = float(line.split()[0]) \n if(j==12):\n Lx = float(line.split()[0])\n if(j==13):\n Ly = float(line.split()[0])\n if(j==14):\n Lz = float(line.split()[0])\n delta_t = dt * float(n_obs) \n n_frames = int(n_steps / n_obs) \n\n\n\n #Now read system_input\n j=0\n with open('system_input') as f:\n for line in f:\n j += 1\n if(j==5):\n Lx = Lx * float(line.split()[2])\n Ly = Ly * float(line.split()[3])\n n_mon = float(line.split()[0])\n n_chain = float(line.split()[1])\n if(j==7):\n n_mon_d = float(line.split()[0])\n n_chain_d = float(line.split()[1])\n\n\n\n #j=0\n #with open('conf_old') as f:\n # for line in f:\n # j += 1\n # if(j==3):\n # Lx = Lx * float(line.split()[0])\n # Ly = Ly * float(line.split()[1])\n\n # if(j==2): \n # n_mon = float(line.split()[0])\n # n_chain = float(line.split()[1])\n # n_mon_d = float(line.split()[2])\n # n_chain_d = float(line.split()[3])\n\n return Lx,Ly,Lz,int(n_frames),int(n_chain_d*n_mon_d),int(n_mon*n_chain), delta_t", "def writeLageurreCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='laguerre',info=''):\n d={ 'coeffs':coeffs,\n 'mode':mode,\n 'xc':xc,\n 'size':size,\n 'beta':beta,\n 'norder':norder,\n 'ra':pos[0],\n 'dec':pos[1],\n 'dra':pos[2],\n 'ddec':pos[2],\n 'info': info }\n fh=open(fn,'wb')\n pickle.dump(d,fh)\n fh.close()", "def read_data( filename ):\n\n # read first word at first line\n with open( filename, 'r' ) as f:\n lattice = f.readline().split()[0] \n\n\n # read volumen and energy results \n data = np.loadtxt(filename, skiprows=1) \n\n return lattice, factor[lattice]*data[:,0]**3, data[:,1]", "def load_fluctuations_3D_fluc_only(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n\n #initialize the arrays\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n nane_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n dni_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n phi_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n\n #load all the rest of the files\n for i in range(1,len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n for j in range(self.n_plane):\n phi_all[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,j],0,1)\n if(self.HaveElectron):\n nane_all[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,j],0,1)\n if(self.load_ions):\n dni_all[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,j],0,1)\n fluc_mesh.close()\n\n\n #similar to the 2D case, we take care of the equilibrium relaxation contribution. See details in the comments in 2D loading function.\n\n phi_avg_tor = np.average(phi_all,axis = 0)\n if self.HaveElectron:\n nane_avg_tor = np.average(nane_all,axis=0)\n if self.load_ions:\n dni_avg_tor = np.average(dni_all,axis=0)\n\n for j in range(self.n_cross_section):\n self.phi[j,...] = np.swapaxes(phi_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - phi_avg_tor[:,np.newaxis,:]\n if self.HaveElectron:\n self.nane[j,...] = np.swapaxes(nane_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - nane_avg_tor[:,np.newaxis,:]\n if self.load_ions:\n self.dni[j,...] = np.swapaxes(dni_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - dni_avg_tor[:,np.newaxis,:]\n\n self.ne0[:] += np.average(phi_avg_tor,axis=0)\n if self.HaveElectron:\n self.ne0[:] += np.average(nane_avg_tor,axis=0)\n self.ni0[:] += np.average(phi_avg_tor,axis=0)\n if self.load_ions:\n self.ni0[:] += np.average(dni_avg_tor,axis=0)\n\n return 0", "def _load_calibration_data():\n\tpickle_dict = pickle.load(open('./calibration.p', 'rb'))\n\tmtx = pickle_dict['mtx']\n\tdist = pickle_dict['dist']\n\treturn mtx, dist", "def get_properties(filename):\n # define dictionairies and constants\n properties = dict()\n path = \"log/\"\n # to convert Hartree/Bohr to (kcal/mol)/Angstrom\n conv = 27.2114/0.529\n\n # open file with names and energies\n f = open(filename, \"r\")\n lines = f.readlines()\n f.close()\n\n # loop through compounds\n for line in lines:\n forces\t\t= np.array([])\n xyz\t\t\t\t= np.array([])\n tokens\t\t= line.split()\n\n name\t\t\t= tokens[0]\n energy\t\t= float(tokens[1])*27.2114\n #print(name, energy)\n\n # get xyz coordinates\n f_xyz\t\t\t= open(\"xyz/\" + name + \".xyz\")\n ls\t\t\t= f_xyz.readlines()\n f_xyz.close()\n\n for i,l in enumerate(ls):\n if i == 0:\n nAtoms = int(l)\n #if i == 1: continue\n #tokens\t= l.split()\n #xyz\t\t\t= np.append(xyz, [[tokens[1], tokens[2], tokens[3]]])\n #xyz\t\t\t\t= xyz.reshape(nAtoms,3)\n\n # open orca output file to get the forces\n f_log\t\t\t= open(path + name + \".log\", \"r\")\n lines\t\t\t= f_log.readlines()\n f_log.close()\n\n # find line with the final forces\n #index\t\t\t= lines.index('The cartesian gradient:\\n')\n # index\t\t\t= lines.index('The final MP2 gradient\\n')\n index\t\t\t= lines.index('CARTESIAN GRADIENT\\n')\n\n # store forces in a 14x3 np.array\n# for line in lines[index+1:index+nAtoms+1]:\n for line in lines[index+3:index+nAtoms+3]:\n tokens\t= line.split()\n# forces\t= np.append(forces, [[float(tokens[1])*conv, float(tokens[2])*conv, float(tokens[3])*conv]])\n forces\t= np.append(forces, [[float(tokens[3])*conv, float(tokens[4])*conv, float(tokens[5])*conv]])\n forces\t\t= forces.reshape(nAtoms,3)\n\n # store name, energy and forces in a dictionairy\n xyz = []\n properties[name] = [energy, forces, xyz]\n\n return properties", "def load_fluctuations_2D_all(self):\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.nane_bar = np.zeros((len(self.time_steps)))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.mesh['R'])) )\n self.dni_bar = np.zeros((len(self.time_steps)))\n\n self.phi = np.zeros((self.n_cross_section,len(self.time_steps),len(self.mesh['R'])))\n self.phi_bar = np.zeros((len(self.time_steps)))\n for i in range(len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n if (i == 0):\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.planes = np.arange(self.n_cross_section) * dn\n\n self.phi_bar[i] = np.mean(fluc_mesh['dpot'][...])\n if(self.HaveElectron):\n self.nane_bar[i] = np.mean(fluc_mesh['eden'][...])\n if(self.load_ions):\n self.dni_bar[i] = np.mean(fluc_mesh['iden'][...])\n for j in range(self.n_cross_section):\n self.phi[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,self.planes[j]],0,1)\n self.phi[j,i] -= self.phi_bar[i]\n\n if(self.HaveElectron):\n self.nane[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,self.planes[j]],0,1)\n self.nane[j,i] -= self.nane_bar[i]\n if(self.load_ions):\n self.dni[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,self.planes[j]],0,1)\n self.dni[j,i] -= self.dni_bar[i]\n fluc_mesh.close()\n\n\n\n\n return 0", "def Flux_init(self):\n f = open(self.fln,'r')\n lines = f.readlines()\n # We read the header line containing the number of temperatures (n_temp), logg (n_logg) and mu=cos(angle) (n_mu)\n n_temp, n_logg, n_mu = lines[1].split()[:3]\n n_temp = int(n_temp)\n n_logg = int(n_logg)\n n_mu = int(n_mu)\n # There should be 3 lines per grid point (temp,logg,mu): the info line and two flux lines\n # To that, we must subtract the comment line, the header line and two lines for the mu values\n if (n_temp*abs(n_logg)*3) != len(lines)-4:\n print('It appears that the number of lines in the file is weird')\n return None\n # Read the mu values\n mu = np.array(lines[2].split()+lines[3].split(),dtype=float)\n # Read the info line for each grid point\n hdr = []\n grid = []\n for i in np.arange(4,len(lines),3):\n hdr.append(lines[i].split())\n grid.append(lines[i+1].split()+lines[i+2].split())\n hdr = np.array(hdr,dtype=float)\n grid = np.log(np.array(grid,dtype=float)/(cts.c*100)*self.wav**2)\n hdr.shape = (n_temp,abs(n_logg),hdr.shape[1])\n grid.shape = (n_temp,abs(n_logg),n_mu)\n logtemp = np.log(hdr[:,0,0])\n logg = hdr[0,:,1]\n leff = hdr[0,0,2]\n #jl = hdr[:,:,3]\n h = hdr[:,:,4]\n #bl = hdr[:,:,5]\n #self.hdr = hdr\n self.grid = grid\n self.logtemp = logtemp\n self.logg = logg\n self.mu = mu\n self.leff = leff\n self.h = h\n return", "def read_cp2k_wfn(self,filename):\n with open(filename, mode='rb') as file:\n self.fileContent = file.read()\n line = self.readline()\n self.natom_read, \\\n self.nspin_read, \\\n self.nao_read, \\\n self.nset_max, \\\n self.nshell_max \\\n = struct.unpack(\"IIIII\",line)\n line = self.readline()\n self.nset_info = np.array(struct.unpack( \\\n \"I\"*self.natom_read,line))\n line = self.readline()\n self.nshell_info = np.array(struct.unpack( \\\n \"I\"*self.natom_read*self.nset_max,line))\n line = self.readline()\n self.nso_info = np.array(struct.unpack( \\\n \"I\"*self.natom_read*self.nset_max*self.nshell_max,line))\n self.vecs_all = []\n self.nmo_all = []\n self.homo_all = []\n self.lfomo_all = []\n self.nelectron_all = []\n self.evals_all = []\n self.occups_all = []\n for i in range(self.nspin_read):\n vecs_spin = []\n line = self.readline()\n if not line:\n break\n nmo,homo,lfomo,nelectron = \\\n struct.unpack(\"IIII\",line)\n self.nmo_all.append(nmo)\n self.homo_all.append(homo)\n self.lfomo_all.append(lfomo)\n self.nelectron_all.append(nelectron)\n line = self.readline()\n evals = np.array(struct.unpack(\"d\"*nmo,line[:8*nmo]))\n occups = np.array(struct.unpack(\"d\"*nmo,line[8*nmo:]))\n self.evals_all.append(evals)\n self.occups_all.append(occups)\n for i in range(nmo):\n line = self.readline()\n vec = np.array(struct.unpack(\"d\"*self.nao_read,line))\n vecs_spin.append(vec)\n self.vecs_all.append(vecs_spin)", "def updateCoeffs(self, coeffs):\n\t\traise NotImplementedError(\"\")", "def _calculate_coeffs(self):\n for joint in self._joint_names:\n self._ovrl_disp[joint] = self._start_pos[joint] - self._end_pos[joint]\n self._coeffs[joint] = [(2*self._ovrl_disp[joint])/(self._motion_time ** 3), (3*-self._ovrl_disp[joint])/(self._motion_time ** 2)]", "def calc_lds(name, response_function, model, s_met, s_grav, s_teff,\n s_vturb, min_w=None, max_w=None, atlas_correction=True,\n photon_correction=True, interpolation_order=1, fout=None):\n print('\\n\\t Reading response functions\\n\\t --------------------------')\n\n # Get the response file minimum and maximum wavelengths and all the\n # wavelengths and values:\n min_w, max_w, S_wav, S_res = get_response(min_w, max_w, response_function)\n\n ######################################################################\n # IF USING ATLAS MODELS....\n ######################################################################\n if 'A' in model:\n # Search for best-match ATLAS9 model for the input stellar parameters:\n print('\\n\\t ATLAS modelling\\n\\t ---------------\\n'\n '\\t > Searching for best-match Kurucz model ...')\n chosen_filename, chosen_teff, chosen_grav, chosen_met, \\\n chosen_vturb = ATLAS_model_search(s_met, s_grav, s_teff, s_vturb)\n\n # Read wavelengths and intensities (I) from ATLAS models.\n # If model is \"A100\", it also returns the interpolated\n # intensities (I100) and the associated mu values (mu100).\n # If not, those arrays are empty:\n wavelengths, I, mu = read_ATLAS(chosen_filename, model)\n\n # Now use these intensities to obtain the (normalized) integrated\n # intensities with the response function:\n I0 = integrate_response_ATLAS(wavelengths, I, mu, S_res,\n S_wav, atlas_correction, photon_correction,\n interpolation_order, model)\n\n # Finally, obtain the limb-darkening coefficients:\n if model == \"AS\":\n idx = mu >= 0.05 # Select indices as in Sing (2010)\n else:\n idx = mu >= 0.0 # Select all\n\n ######################################################################\n # IF USING PHOENIX MODELS....\n ######################################################################\n elif 'P' in model:\n # Search for best-match PHOENIX model for the input stellar parameters:\n print('\\n\\t PHOENIX modelling\\n\\t -----------------\\n'\n '\\t > Searching for best-match PHOENIX model ...')\n chosen_path, chosen_teff, chosen_grav, chosen_met, \\\n chosen_vturb = PHOENIX_model_search(s_met, s_grav, s_teff, s_vturb)\n\n # Read PHOENIX model wavelenghts, intensities and mus:\n wavelengths, I, mu = read_PHOENIX(chosen_path)\n\n # Now use these intensities to obtain the (normalized) integrated\n # intensities with the response function:\n I0 = integrate_response_PHOENIX(wavelengths, I, mu, S_res, S_wav,\n photon_correction, interpolation_order)\n\n # Obtain correction due to spherical extension. First, get r_max:\n r, fine_r_max = get_rmax(mu, I0)\n\n # Now get r for each intensity point and leave out those that have r>1:\n new_r = r/fine_r_max\n idx_new = new_r <= 1.0\n new_r = new_r[idx_new]\n # Reuse variable names:\n mu = np.sqrt(1.0-(new_r**2))\n I0 = I0[idx_new]\n\n # Now, if the model requires it, obtain 100-mu points interpolated\n # in this final range of \"usable\" intensities:\n if model == 'P100':\n mu, I100 = get100_PHOENIX(wavelengths, I, mu, idx_new)\n I0 = integrate_response_PHOENIX(wavelengths, I100, mu,\n S_res, S_wav, photon_correction, interpolation_order)\n\n # Now define each possible model and fit LDs:\n if model == 'PQS': # Quasi-spherical model (Claret et al. 2012)\n idx = mu >= 0.1\n elif model == 'PS': # Sing method\n idx = mu >= 0.05\n else:\n idx = mu >= 0.0\n\n # Now compute each LD law:\n c1, c2, c3, c4 = fit_non_linear(mu, I0)\n a = fit_linear(mu[idx], I0[idx])\n u1, u2 = fit_quadratic(mu[idx], I0[idx])\n b1, b2, b3 = fit_three_parameter(mu[idx], I0[idx])\n l1, l2 = fit_logarithmic(mu[idx], I0[idx])\n e1, e2 = fit_exponential(mu[idx], I0[idx])\n s1, s2 = fit_square_root(mu[idx], I0[idx])\n # Make this correction:\n if model == 'PQS':\n c1, c2, c3, c4 = fit_non_linear(mu[idx], I0[idx])\n\n # Stack all LD coefficients into one single tuple:\n LDC = a, u1, u2, b1, b2, b3, c1, c2, c3, c4, l1, l2, e1, e2, s1, s2\n\n # Save to the file:\n if fout is not None:\n fout.write(70*\"#\" + \"\\n\")\n fout.write(\"{:s} {:s} {:s}\\nTeff={:.1f}K log(g)={:.1f} \"\n \"[M/H]={:.1f} vturb={:.1f}\\n\\n\".format(name, model,\n response_function, chosen_teff, chosen_grav,\n chosen_met, chosen_vturb))\n fout.write(\"a = {:12.8f}\\n\"\n \"u1, u2 = {:11.8f}, {:11.8f}\\n\"\n \"b1, b2, b3 = {:11.8f}, {:11.8f}, {:11.8f}\\n\"\n \"c1, c2, c3, c4 = {:11.8f}, {:11.8f}, {:11.8f}, {:11.8f}\\n\"\n \"l1, l2 = {:11.8f}, {:11.8f}\\n\"\n \"e1, e2 = {:11.8f}, {:11.8f}\\n\"\n \"s1, s2 = {:11.8f}, {:11.8f}\\n\\n\".format(*LDC))\n\n print('\\t > Done! \\n\\t {:s}\\n'.format(70*'#'))\n return LDC", "def photoxRead(ions):\n #\n zion = util.convertName(ions)\n if zion['Z'] < zion['Ion']:\n print((' this is a bare nucleus that has no ionization rate'))\n return\n #\n fname = util.ion2filename(ions)\n paramname = fname+'.photox'\n input = open(paramname,'r')\n lines = input.readlines()\n input.close\n # get number of energies\n# neng = int(lines[0][0:6])\n dataEnd = 0\n lvl1 = []\n lvl2 = []\n energy = []\n cross = []\n icounter = 0\n while not dataEnd:\n lvl11 = int(lines[icounter][:8])\n lvl21 = int(lines[icounter][8:15])\n ener = lines[icounter][15:].split()\n energy1 = np.asarray(ener, 'float64')\n #\n icounter += 1\n irsl = int(lines[icounter][:8])\n ind0 = int(lines[icounter][8:15])\n if irsl != lvl11 or ind0 != lvl21:\n # this only happens if the file was written incorrectly\n print((' lvl1, lvl2 = %7i %7i'%(lvl11, lvl21)))\n print((' irsl, indo = %7i %7i'%(irsl, ind0)))\n return\n crs = lines[icounter][15:].split()\n cross1 = np.asarray(crs, 'float64')\n lvl1.append(lvl11)\n lvl2.append(lvl21)\n energy.append(energy1)\n cross.append(cross1)\n icounter += 1\n dataEnd = lines[icounter].count('-1')\n ref = lines[icounter+1:-1]\n cross = np.asarray(cross, 'float64')\n energy = np.asarray(energy, 'float64')\n return {'lvl1':lvl1, 'lvl2':lvl2,'energy':energy, 'cross':cross, 'ref':ref}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that predicted binding value falls within the specified (min,max) range or raise an assertion error.
def check_binding_value(self, value): assert isinstance(value, (int, float)), \ "Expected float for binding value, got %s : %s" % ( value, type(value)) if self.min_inclusive: assert value >= self.min_value, \ "Given value (%s) too low (min_value=%s)" % ( value, self.min_value) else: assert value > self.min_value, \ "Given value (%s) too low (min_value=%s)" % ( value, self.min_value) if self.max_inclusive: assert value <= self.max_value, \ "Given value (%s) too high (max_value=%s)" % ( value, self.max_value) else: assert value < self.max_value, \ "Given value (%s) too high (max_value=%s)" % ( value, self.max_value)
[ "def _check_bounds(value, varname, minmax=(0, 1)):\n if value < minmax[0] or value > minmax[1]:\n raise AssertionError(f\"{varname} is not in {minmax}\")", "def _check_bounds(lower_bound: float, upper_bound: float) -> None:\n if lower_bound > upper_bound:\n raise ValueError(\n f\"Lower bound'{str(lower_bound)}' cannot be bigger then the upper bound '{str(upper_bound)}'.\"\n )", "def test_predict_bounds(self):\n self._fit()\n self.instance.set_min_max(x=self.x, y=self.y, is_predicted=False)\n lower, upper = self.instance.predict_bounds(x=self.x, is_predicted=False, agg_point=False)\n\n # verify shape\n assert lower.shape == self.y.shape\n assert upper.shape == self.y.shape\n\n # verify data type\n assert lower.dtype == numpy.float32\n assert upper.dtype == numpy.float32\n\n # verify value range\n assert (lower <= upper).all()\n assert ((lower - EPSILON) <= self.y).all()\n assert (self.y <= (upper + EPSILON)).all()", "def assertBetweenInclusive(a, min_value, max_value):\n assertGreaterEqual(a, min_value)\n assertLessEqual(a, max_value)", "def check_valid_bound(value, name):\n if value is None:\n return\n if math.isnan(value):\n raise InvalidArgument(u'Invalid end point %s %r' % (value, name))", "def check_valid_interval(lower_bound, upper_bound, lower_name, upper_name):\n if lower_bound is None or upper_bound is None:\n return\n if upper_bound < lower_bound:\n raise InvalidArgument(\n 'Cannot have %s=%r < %s=%r' % (\n upper_name, upper_bound, lower_name, lower_bound\n ))", "def assert_numeric_range_limit(self, field, label, lower, upper):\n ##in range\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {1}, \"lower\": {2}}}}}}}'.format(field, upper, lower))\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, upper-1, lower+1))\n ##out of range\n expected_error_lower = [\"{0} (lower bound) not in {1}..{2}\".format(label, lower, upper)]\n expected_error_upper = [\"{0} (upper bound) not in {1}..{2}\".format(label, lower, upper)]\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {2}, \"lower\": {3}}}}}}}'.format(field, upper, lower, lower-1), expected_valid=False, expected_errors=expected_error_lower)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {1}}}}}}}'.format(field, upper, lower, upper+1), expected_valid=False, expected_errors=expected_error_upper)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, upper+2, upper+1), expected_valid=False, expected_errors=expected_error_upper+expected_error_lower)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, lower-1, lower-2), expected_valid=False, expected_errors=expected_error_upper+expected_error_lower)\n ##not specified\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{}}}}'.format(field, upper, lower))\n ##invert\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"invert\": true,\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, upper-1, lower+1), expected_valid=False)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"invert\": true,\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {2}, \"lower\": {3}}}}}}}'.format(field, upper, lower, lower-1), expected_valid=False)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"invert\": true,\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {1}}}}}}}'.format(field, upper, lower, upper+1), expected_valid=False)\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"invert\": true,\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, upper+2, upper+1))\n self.assert_cmd('{{\"limit\": {{\"{0}\": {{\"invert\": true,\"range\": {{\"upper\": {1}, \"lower\": {2}}}}}}}, \"spec\": {{\"{0}\": {{\"upper\": {3}, \"lower\": {4}}}}}}}'.format(field, upper, lower, lower-1, lower-2))", "def hasValidRange(*args, **kwargs):\n \n pass", "def test_invalid_range():\n with pytest.raises(ValueError):\n # Test with too-low value\n assert calculate_E_min(B_degrees=-10)\n with pytest.raises(ValueError):\n # Test with too-high value\n assert calculate_E_min(B_degrees=1_000)", "def assert_between(self, value, low, high, msg=None):\n if msg is None:\n self.longMessage = False\n msg = '{!r} is not between {!r} and {!r}'.format(value, low, high)\n self.assertGreaterEqual(value, low, msg=msg)\n self.assertLessEqual(value, high, msg=msg)", "def verifyRanges(obj, data, ranges):\n ((minLower, minUpper), (maxLower, maxUpper)) = ranges\n obj.assertGreaterEqual(min(data), minLower)\n obj.assertLess(min(data), minUpper)\n obj.assertGreaterEqual(max(data), maxLower)\n obj.assertLess(max(data), maxUpper)", "def check_bounds(self):\n if np.isnan(self.value).all():\n return\n if np.isnan(self.bounds).all():\n return\n if np.bitwise_or(self.value < self.bounds[0], self.value > self.bounds[-1]).any(): #pylint: disable=unsubscriptable-object\n raise ValueError(\"Value outside bounds: %.s [%s,%s]\" % (self.value, self.bounds[0], self.bounds[-1])) #pylint: disable=unsubscriptable-object", "def assert_that_pv_is_within_range(self, pv, min_value, max_value, timeout=None):\n def _condition(val):\n return min_value <= float(val) <= max_value\n\n message = \"Expected PV value to between {} and {}\".format(min_value, max_value)\n return self.assert_that_pv_value_causes_func_to_return_true(pv, _condition, timeout, message)", "def IsInBounds( value, min_, max_ ):\n \n return min_ <= value <= max_", "def bounded(self):\n return self.lower > -np.inf or self.upper < np.inf", "def test_no_prediction_time_outside_min_and_max_date(\n self, sampler: BinnedUniformSampler, raw_data: pd.DataFrame\n ):\n sampled = sampler.generate_samples(raw_data)\n max_date = sampler.max_date\n min_date = sampler.min_date\n assert np.all(sampled.prediction_time > min_date)\n assert np.all(sampled.prediction_time < max_date)", "def CheckBounds(self, ):\n ...", "def _check_user_metric_value_range(value_range: UserMetricValueRange):\n py_typecheck.check_type(value_range, tuple, 'range')\n value_range = typing.cast(\n Union[tuple[float, float], tuple[int, int]], value_range\n )\n if len(value_range) != 2:\n raise ValueError(\n 'Ranges must be defined as a 2-tuple, got a tuple of '\n f'length {len(value_range)}.'\n )\n\n lower, upper = value_range\n py_typecheck.check_type(lower, (int, float), 'lower bound')\n if type(upper) is not type(lower):\n raise TypeError(\n 'The lower bound threshold should have the same type as '\n 'the upper bound threshold, but found the lower bound '\n f'threshold type {type(lower)} and the upper bound '\n f'threshold type {type(upper)}.'\n )", "def test_bounds_decorator_simple():\n # val_a must be between [0,1]\n\n @check_bound_at_run\n def _check_simple(val_a: (0, 1)):\n return val_a + 0\n\n for val in [-10, 1000, -100.0, 1000.2, \"\", (1,), None, [], {}, float(\"nan\")]:\n # these should fail\n try:\n print(val)\n _check_simple(val)\n raise EnvironmentError(\"Error: {} should not be valid\".format(val))\n except TypeError:\n pass\n except ValueError:\n pass\n\n print()\n for val in [0, 0.0, 0.5, 1, 1.0]:\n print(val)\n _check_simple(val)", "def validateMinMax(self, value, min_value, max_value):\n for val in (value, min_value, max_value):\n self.validateType(val)\n if not (value >= min_value and value <= max_value):\n raise ValueError(\n f'value should be between {min_value} and {max_value}, {value} given')\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is the predicted binding value stronger than the given cutoff?
def value_is_binder(self, value, cutoff): self.check_binding_value(value) if self.cutoff_is_upper_bound: return value <= cutoff else: return value >= cutoff
[ "def test_broker_weight_cv(self):\n assert abs(self.state.broker_weight_cv - 0.4040) < 1e-4", "def test_broker_leader_weight_cv(self):\n assert abs(self.state.broker_leader_weight_cv - 1.3030) < 1e-4", "def spread_to_binary(spread, cutoff):\n return spread > cutoff", "def has_weight(self) -> bool:\n return self.weight > 0.0", "def passes_bayes_filter(self, cutoff=100.0):\n return True if self._bayes > float(cutoff) else False", "def test_annual_weight_loss_decreases_weight(self):\n loc = (2,7)\n i = Island()\n a_sim = Herbivore(i, loc)\n initial_weight = a_sim.weight\n a_sim.annual_weight_loss()\n new_weight = a_sim.weight\n\n assert initial_weight > new_weight", "def loss(min_offer, predicted):\n return MAX_GAIN-min_offer if predicted < min_offer else predicted - min_offer", "def IsStable(pass_rate, lower_flake_threshold, upper_flake_threshold):\n assert upper_flake_threshold > lower_flake_threshold\n return (TestDoesNotExist(pass_rate) or\n pass_rate < lower_flake_threshold + flake_constants.EPSILON or\n pass_rate > upper_flake_threshold - flake_constants.EPSILON)", "def is_to_close(self):\n return self.current_dist_human <= self.human_threshold", "def closest_obs_blender(low_value: Any, high_value: Any, coeff: float):\n return low_value if coeff < 0.5 else high_value", "def is_threshold_reach(self):\n return self.accumulate > self.threshold", "def is_better(self, model, other):\n if model is None:\n return False\n if other is None:\n return True\n return model.META[\"loss\"] < other.META[\"loss\"]", "def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return random.random() < cutoff", "def get_strenght(self):\n return 10 - self.get_agility()", "def low_AGWRC(self, value = 0.88):\n\n if any([p.AGWRC < value for p in self.postprocessor.hspfmodel.perlnds]):\n print('Some of the PERLNDs have an AGWRC less than 0.88, ' +\n ' which is very low. Consider increasing these values.\\n')\n return True\n\n return False", "def rem_predicted(self, value):\r\n for bin_ in sorted(self.bins):\r\n if value <= bin_:\r\n self.bins[bin_]['predicted'] -= value/100.0\r\n\r\n break", "def get_bet_on(x, p_cutoff=0.7):\n\n if x['F1_P_Win'] > p_cutoff:\n return 'F1'\n elif x['F2_P_Win'] > p_cutoff:\n return 'F2'\n else:\n return np.nan", "def bounded(self):\n return self.lower > -np.inf or self.upper < np.inf", "def f5(_, target) -> bool:\n return target.get_hp() < 30" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets current seating chart from the Google Sheet.
def get_seating_chart(self): seating = {} # keys = periods, values = 2D arrays service = build('sheets', 'v4', credentials=self.credentials) # Call the Sheets API sheet = service.spreadsheets() for period in self.schedules[self.year].keys(): array = [] # Array to hold the names ss_range = 'Period {}!B2:G5'.format(period) # Spreadsheet range try: result = sheet.values().get(spreadsheetId=self.seating_id, range=ss_range).execute() values = result.get('values', []) except Exception as e: print('Period {}: Failed to read.'.format(period)) print(e) else: if not values: print('Period {}: No data found.'.format(period)) else: for row in values: array.append(row) seating[period] = array return seating # keys = periods, values = 2D arrays
[ "def _workbook(self):\n return self._chart_part.chart_workbook", "def spreadsheets(self):\n # Get the list of spreadsheets\n feed = self.gd_client.GetSpreadsheetsFeed()\n self._PrintFeed(feed)\n return feed", "def get_today_on_duty_name_n_team():\n # print(f'gcp_account_info {json.loads(gcp_account_info)}')\n gcp_account_info = os.getenv('GCP_SERVICE_ACCOUNT_INFO')\n sheet_id = os.getenv('SPREADSHEET_ID')\n sheet_range = os.getenv('SPREADSHEET_RANGE_NAME')\n if not gcp_account_info:\n raise ValueError('No GCP_SERVICE_ACCOUNT_INFO')\n\n if os.path.isfile(gcp_account_info):\n with open(gcp_account_info) as fh:\n gcp_account_info = fh.read()\n\n credentials = service_account.Credentials.from_service_account_info(\n json.loads(gcp_account_info)\n )\n service = googleapiclient.discovery.build('sheets', 'v4', credentials=credentials)\n\n request = service.spreadsheets().values().get(\n spreadsheetId=sheet_id,\n range=sheet_range)\n response = request.execute()\n values = response.get('values', None)\n if values:\n return values[0][0]\n else:\n return None", "def get_series(self):\n return self.series", "def get_charts(self, title=None):\n matched_charts = []\n chart_data = self.client.sheet.get(self.spreadsheet.id, fields='sheets(charts,properties/sheetId)')\n sheet_list = chart_data.get('sheets')\n sheet = [x for x in sheet_list if x.get('properties', {}).get('sheetId') == self.id][0]\n chart_list = sheet.get('charts', [])\n for chart in chart_list:\n if not title or chart.get('spec', {}).get('title', '') == title:\n matched_charts.append(Chart(worksheet=self, json_obj=chart))\n return matched_charts", "def get_google_order_sheet():\n return get_google_sheet(GOOGLE_SHEETS['orders'], 'orders')", "def get_authorized_pygsheets_client():\n credentials = get_credentials()\n pygsheets_client = pygsheets.authorize(custom_credentials=credentials)\n if settings.DRIVE_SHARED_ID:\n pygsheets_client.drive.enable_team_drive(team_drive_id=settings.DRIVE_SHARED_ID)\n return pygsheets_client", "def ActiveSheet(self): \n return self._activeSheet", "def render_chart(self):\n seatings = Seating.objects.filter(dinner=self).order_by('seat_number')\n head = seatings[0]\n foot = seatings[len(seatings)/2]\n second_half = seatings[len(seatings):len(seatings)/2:-1]\n first_half = seatings[1:(len(seatings)/2)]\n if len(first_half) < len(second_half):\n first_half += [{\"person\": \"Empty seat\"}]\n sides = zip(second_half, first_half)\n return {\"head\": head, \"sides\": sides, \"foot\": foot}", "def _get_worksheet(\n key:str,\n worksheet_name:str,\n creds:\"/Users/dzidzi_quist/Desktop/project\"=\"credential.json\",\n ) -> gspread.Worksheet:\n scope = [\"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\"]\n credentials = ServiceAccountCredentials.from_json_keyfile_name(creds, scope)\n gc = gspread.authorize(credentials)\n wb = gc.open_by_key(key)\n sheet = wb.worksheet(worksheet_name)\n return sheet", "def get_chart_data(cls, chart_id, date=None):\n return billboard.ChartData(chart_id, date)", "def login_to_sheets():\n # use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name('/Users/{}/Downloads/secret_key.json'.format(os.environ[\"USER\"]), scope)\n client = gspread.authorize(creds)\n # Find a workbook by name and open the first sheet\n # Make sure you use the right name here.\n sheet = client.open(\"water changes Fall 2017\").sheet1\n return sheet", "def series(self):\n return SeriesCollection(self._chartSpace)", "def get_chart(symbol: str, chart_range: str) -> dict:\n logging.info(\n \"Retrieving /chart data for symbol '%s' and range '%s'.\", symbol, chart_range\n )\n\n response = requests.get(\n url=f\"{IEX_ENDPOINT}/stock/{symbol}/chart/{chart_range}\",\n params={\n \"token\": IEX_TOKEN,\n \"chartCloseOnly\": True,\n \"changeFromClose\": True,\n },\n )\n\n if not response.ok:\n logging.error(\n \"IEX /chart API call failed with status code %d.\", response.status_code\n )\n\n return response.json()", "def get_chart_data(self):\n constructor = HighchartsConstructor(self.graph_document)\n constructor.process()\n return constructor.render_as_json()", "def url(self):\n return 'https://docs.google.com/spreadsheets/d/{0}'.format(self.spread.id)", "def get_data(self, g, s):\n return self.get_dataset(g,s).get_data()", "def get_series():\n\n return Series.query.all()", "def update(self):\n print('Updating seating chart...')\n for period in self.periods:\n if period in self.class_lists:\n new_seating, version = self.new_tables(period)\n self.seating_chart[period] = new_seating\n\n # Verify success:\n if new_seating:\n print('Period {}'.format(period))\n for i in range(len(new_seating)):\n print('Table {}: {}'.format(i + 1, new_seating[i]))\n print('Version = {}'.format(version))\n else:\n print('Period {}: Failed to update seating.'.format(period))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets class list for each requested period.
def get_class_lists(self): print('Getting class lists...') students = {} # key = periods, values = list of names ss_range = 'Summary!B3:H40' # Spreadsheet range for source sheet. service = build('sheets', 'v4', credentials=self.credentials) # Call the Sheets API sheet = service.spreadsheets() for period in self.periods: class_list = [] # Array to hold the names ss_id = self.schedules[self.year][period]['gradebook_id'] # Source spreadsheet ID try: result = sheet.values().get(spreadsheetId=ss_id, range=ss_range).execute() values = result.get('values', []) except Exception as e: print('Period {}: Failed to read.'.format(period)) print(e) else: if not values: print('Period {}: No data found.'.format(period)) # Unlikely error. else: for row in values: if int(row[-1]) == period: class_list.append(row[0].strip() + ' ' + row[1][0].strip() + '.') students[period] = class_list print('Period {}: {}'.format(period, students[period])) # Success. return students # keys = periods, values = list of names
[ "def scrap_classes():\n\n config = load_config()\n session = requests.session()\n\n with session.post('https://myclass.apps.binus.ac.id/Auth/Login', data={\n 'Username': config['login']['username'],\n 'Password': config['login']['password'],\n 'btnSubmit': True\n }) as response:\n try:\n assert response.json()['Status']\n except:\n return print('Error: Failed to login to BINUS Classes site!')\n\n with session.get('https://myclass.apps.binus.ac.id/Home/GetViconSchedule') as response:\n result = response.json()\n\n for class_data in result:\n date = class_data['DisplayStartDate']\n time = class_data['StartTime'] + ' - ' + class_data['EndTime']\n\n code = class_data['ClassCode']\n delivery = class_data['DeliveryMode'] + ' - ' + class_data['SsrComponentDescription']\n course = class_data['CourseCode'] + ' - ' + class_data['CourseTitleEn']\n\n week = class_data['WeekSession']\n session = class_data['CourseSessionNumber']\n\n meeting_url = class_data['MeetingUrl']\n meeting_id = class_data['MeetingId']\n meeting_password = class_data['MeetingPassword']\n\n student_class = StudentClass(date, time, code, delivery, course, week, session)\n if meeting_url != '-':\n meeting = MeetingInfo(meeting_id, meeting_password, meeting_url)\n student_class.meeting = meeting\n\n student_classes.append(student_class)", "def __get_class_resources(self):\r\n resources = []\r\n preload_manager = servers.get_preload_manager()\r\n for i in range( preload_manager.getnumclassestopreload() ):\r\n resources.append( preload_manager.getclasstopreload(i) )\r\n return resources", "def getMonthClasses(curs, sid, month, year):\n curs.execute('select * from schedule ' + \n 'inner join classes using (cid) ' + \n 'where sid=%s and monthname(classDate)=%s and year(classDate)=%s group by classDate', (sid, month, year))\n return curs.fetchall()", "def get_results_for_class_session(class_session,\n grading_period_key = None):\n result_list = []\n if class_session:\n result_list = \\\n class_session.gradingperiodresult_set.fetch(500)\n if grading_period_key:\n single_period_results = []\n for res in result_list:\n if (res.grading_period.key() == grading_period_key):\n single_period_results.append(res)\n result_list = single_period_results\n return result_list", "def _list(self, resource_class, zone_name=None, **args):\n\n resources = []\n resource = resource_class()\n resource.gce_project = self\n\n request = self._list_request(resource, zone_name=zone_name, **args)\n while request:\n results = {}\n try:\n results = self._run_request(request)\n except error.GceError:\n raise\n except error.GceTokenError:\n raise\n\n for result in results.get('items', []):\n new_resource = resource_class()\n new_resource.from_json(result)\n resources.append(new_resource)\n\n request = resource.service_resource().list_next(\n self._list_request(resource, zone_name=zone_name, **args),\n results)\n\n return resources", "def classList():\n module_dir = dir(module)\n module_class = []\n for each in module_dir:\n if type(getattr(module, each)) == types.ClassType:\n module_class.append(each)\n return module_class", "def get_classes():\n session = Session()\n classes = session.query(Class)\n session.commit()\n\n return classes", "def get_class_names(self):\n return self.request32('get_class_names')", "def _allInstances(cls):\n return pyalaocl.asSet(_theSession().findByClass(cls))", "def get_classes(path):\n subjects_data = json_to_data(path)\n subjects = subjects_data[\"subjects\"]\n for subject in subjects:\n url = base_url + subject[\"semester\"][\"year\"] + '/' + subject[\n \"semester\"][\"semester\"] + '/' + 'S' + '/' + subject[\n \"degree\"] + '/' + subject[\"institute\"] + '/' + subject[\n \"initials\"]\n driver.get(url)\n container = driver.find_element_by_id('conteudo')\n subject_classes_elements = container.find_elements_by_xpath(\n \"//div[contains(@class, 'turma')]\")\n for subject_class in subject_classes_elements:\n class_obj = {}\n class_obj[\"subject\"] = subject[\"initials\"]\n class_obj[\"class\"] = subject_class.find_element_by_xpath(\n \"*//h3[contains(text(), 'Turma')]//following-sibling::span\"\n ).text\n class_obj[\"positions\"] = subject_class.find_element_by_xpath(\n \"*//span[contains(text(), 'Vagas')]//following-sibling::span\"\n ).text\n try:\n class_obj[\"enrolled\"] = subject_class.find_element_by_xpath(\n \"*//p[contains(text(), 'Número de alunos matriculados')]//following-sibling::span\"\n ).text\n except:\n class_obj[\"enrolled\"] = \"\"\n try:\n class_schedule_list_element = subject_class.find_element_by_class_name(\n 'horariosFormatado')\n class_schedule_elements = class_schedule_list_element.find_elements_by_tag_name(\n 'li')\n class_obj[\"schedule\"] = []\n for li in class_schedule_elements:\n schedule_obj = {}\n schedule_obj[\"day\"] = li.find_element_by_class_name(\n 'diaSemana').text\n schedule_obj[\"time_start\"] = li.find_element_by_class_name(\n 'horarios').text.split('-')[0].strip()\n schedule_obj[\"time_end\"] = li.find_element_by_class_name(\n 'horarios').text.split('-')[-1].strip()\n schedule_obj[\"place\"] = li.find_element_by_class_name(\n 'salaAula').text\n class_obj[\"schedule\"].append(schedule_obj)\n except:\n class_obj[\"schedule\"] = []\n\n try:\n professors_list_element = subject_class.find_element_by_class_name(\n 'docentes')\n professors_elements = professors_list_element.find_elements_by_tag_name(\n 'li')\n class_obj[\"professors\"] = []\n for li in professors_elements:\n class_obj[\"professors\"].append(li.text)\n except:\n class_obj[\"professors\"] = []\n\n try:\n courses_list_element = subject_class.find_element_by_class_name(\n 'reservas')\n courses_elements = courses_list_element.find_elements_by_tag_name(\n 'li')\n class_obj[\"course_reservation\"] = []\n for li in courses_elements:\n course = {}\n course[\"course_number\"] = li.text.split('-')[0].strip()\n course[\"course_name\"] = li.text.split('-')[-1].strip()\n class_obj[\"course_reservation\"].append(course)\n except:\n class_obj[\"course_reservation\"] = []\n data[\"classes\"].append(class_obj)", "def iter_all(class_name):\n ...", "def api_get_classes_for_all_students():\n query = \"SELECT STD.firstname || ' '|| STD.lastname NAME, CLS.course_name CLASSES FROM STUDENT STD\" \\\n \" INNER JOIN SCHEDULE_TABLE SCD\" \\\n \" ON STD.student_id = SCD.student_id\" \\\n \" INNER JOIN CLASS CLS\" \\\n \" ON CLS.crn = SCD.crn;\"\n conn = sqlite3.connect(app.config['DATABASE'], detect_types=sqlite3.PARSE_DECLTYPES)\n conn.row_factory = dict_factory\n cur = conn.cursor()\n results = cur.execute(query).fetchall()\n return jsonify(convert_to_list(results, 'CLASSES'))", "def get_all_classrooms() -> List[classroom_config_domain.Classroom]:\n backend_classroom_models = classroom_models.ClassroomModel.get_all()\n classrooms: List[classroom_config_domain.Classroom] = [\n get_classroom_from_classroom_model(model)\n for model in backend_classroom_models\n ]\n return classrooms", "def get_user_classes(self):\n # Previously getting classes from profile (such a list is incomplete)\n # raw_classes = self.get_user_profile().get('all_classes').values()\n\n # Get classes from the user status (includes all classes)\n status = self.get_user_status()\n uid = status['id']\n raw_classes = status.get('networks', [])\n\n classes = []\n for rawc in raw_classes:\n c = {k: rawc[k] for k in ['name', 'term']}\n c['num'] = rawc.get('course_number', '')\n c['nid'] = rawc['id']\n c['is_ta'] = uid in rawc['prof_hash']\n classes.append(c)\n\n return classes", "def getPeriods(cls):\n return cls.__periods", "def get_active_class_sessions(self):\n query = ClassSession.all(keys_only=True)\n query.filter(\"end_date >\", date.today())\n query.filter(\"section =\", self.key())\n keys = query.fetch(20)\n return db.get(keys)", "def get_classes_on_time(day, time):\n out = []\n for unit in units:\n for c in unit.classes:\n if c.multiple:\n for c in c.times:\n if not days.index(c.day_full) == day: continue\n if c.time <= time < c.time + c.hours:\n out.append(unit.name + ' ' + c.code)\n if not days.index(c.day_full) == day: continue\n if c.time <= time < c.time + c.hours:\n out.append(unit.name + ' ' + c.code)\n return out", "def getClassDays():\n conn = cursor()\n conn.execute('select distinct day from classes')\n return conn.fetchall()", "def __set_decision_classes__(self):\n decisions = []\n for decision_object in self.objects:\n decisions.append(decision_object.decision)\n return universal_tools.get_unique_and_frequency(decisions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates seating for the requested periods.
def update(self): print('Updating seating chart...') for period in self.periods: if period in self.class_lists: new_seating, version = self.new_tables(period) self.seating_chart[period] = new_seating # Verify success: if new_seating: print('Period {}'.format(period)) for i in range(len(new_seating)): print('Table {}: {}'.format(i + 1, new_seating[i])) print('Version = {}'.format(version)) else: print('Period {}: Failed to update seating.'.format(period))
[ "def update_time_period(self, time_period_form):\n pass", "def write_names(self):\n print('Writing to spreadsheet...')\n service = build('sheets', 'v4', credentials=self.credentials) # Call Google Sheets API.\n\n for period in self.periods:\n if period in self.class_lists:\n seating_update = self.extend_array(copy.deepcopy(self.seating_chart[period]))\n ss_range = 'Period {}!B2:G5'.format(period)\n body = {'values': seating_update, 'majorDimension': 'rows'}\n try:\n result = service.spreadsheets().values().update(spreadsheetId=self.seating_id,\n valueInputOption='RAW',\n range=ss_range,\n body=body).execute()\n except Exception as e:\n print('Period {}: Failed to record names.'.format(period))\n print(e)\n else:\n print(result) # Verify success", "async def _update_prices(self):\n async with self._pg.transaction() as db_conn:\n price_update_id = await self._create_price_update_record(db_conn)\n flights = await self._updater.get_cheapest_flights()\n flights_saved = await self._save_flights(db_conn, flights, price_update_id)\n if flights_saved > 0:\n await self._confirm_successful_update(db_conn, price_update_id)\n else:\n await self._mark_update_failed(db_conn, price_update_id)\n\n # Schedule next update soon if retrieved less than 2/3 of expected number of flights\n next_update_soon = flights_saved < len(self._directions) * self._number_of_days * 2 / 3\n self._schedule_next_update(soon=next_update_soon)", "def get_seating_chart(self):\n seating = {} # keys = periods, values = 2D arrays\n service = build('sheets', 'v4', credentials=self.credentials) # Call the Sheets API\n sheet = service.spreadsheets()\n\n for period in self.schedules[self.year].keys():\n array = [] # Array to hold the names\n ss_range = 'Period {}!B2:G5'.format(period) # Spreadsheet range\n try:\n result = sheet.values().get(spreadsheetId=self.seating_id, range=ss_range).execute()\n values = result.get('values', [])\n except Exception as e:\n print('Period {}: Failed to read.'.format(period))\n print(e)\n else:\n if not values:\n print('Period {}: No data found.'.format(period))\n else:\n for row in values:\n array.append(row)\n seating[period] = array\n return seating # keys = periods, values = 2D arrays", "def update_seattle_lots():\n CONFIG = create_app().config\n db = PostgresWrapper(\n \"host='{PG_HOST}' port={PG_PORT} dbname={PG_DATABASE} \"\n \"user={PG_USERNAME} password={PG_PASSWORD} \".format(**CONFIG))\n\n # grab data from city of seattle DOT\n data = requests.get(\"http://web6.seattle.gov/sdot/wsvcEparkGarageOccupancy/Occupancy.asmx/GetGarageList\",\n params={\"prmGarageID\": \"G\", \"prmMyCallbackFunctionName\": \"\"})\n data = json.loads(data.text.lstrip(\"(\").rstrip(\");\"))\n\n if data:\n db.query(\"\"\"\n UPDATE parking_lots l SET available = d.available\n FROM (VALUES {}) AS d(pid, available)\n WHERE l.partner_name = 'Seattle ePark'\n AND l.partner_id = d.pid\n \"\"\".format(\",\".join([\"('{}',{})\".format(x[\"Id\"], x[\"VacantSpaces\"]) for x in data])))", "def test_api_meetings_update_authenticated(self):\n user = UserFactory()\n meeting = MeetingFactory(name=\"Old name\")\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.put(\n f\"/api/meetings/{meeting.id!s}/\",\n {\n \"name\": \"New name\",\n \"start\": \"2022-07-07T09:00:00Z\",\n \"end\": \"2022-07-07T10:00:00Z\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)\n meeting.refresh_from_db()\n self.assertEqual(meeting.name, \"Old name\")", "def update(self, policy):", "async def updateratings(self, ctx):\n await ctx.channel.send(embed=self.embed(\"Updating ratings... Please wait.\"))\n await self.update_ratings(ctx)", "def _update(self):\n self._update_assets()\n self._update_funds()", "def update(s):\n s.getPlaneState()\n s.horizon()\n s.FPM()\n s.instruments()", "def update_duration(self):\n for slot in self.event.wip_schedule.talks.filter(\n submission=self, start__isnull=False\n ):\n slot.end = slot.start + dt.timedelta(minutes=self.get_duration())\n slot.save()", "def set_indexed_current_price_and_period(self, index: int):\n self.currentPeriod = self.data[index]\n self.currentPrice = self.data[index]['open']", "def update(self, Name=None, NumberOfSegmentsV6=None):\n # type: (str, int) -> BgpSRTEPoliciesSegmentListV6\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def update(self, instance, validated_data):\n\n instance.byweekday = validated_data.get('byweekday', instance.byweekday)\n instance.dstart = validated_data.get('dstart', instance.dstart)\n instance.tstart = validated_data.get('tstart', instance.tstart)\n instance.tend = validated_data.get('tend', instance.tend)\n instance.until = validated_data.get('until', instance.until)\n instance.is_repetition = validated_data.get('is_repetition', instance.is_repetition)\n instance.fallback_id = validated_data.get('fallback_id', instance.fallback_id)\n instance.automation_id = validated_data.get('automation_id', instance.automation_id)\n instance.rrule = validated_data.get('rrule', instance.rrule)\n instance.show = validated_data.get('show', instance.show)\n\n instance.save()\n return instance", "def _set_update_period(self, update_period):\n self._parent._write(self, DacBase._COMMAND_SET_UPDATE_PERIOD.format(update_period))\n self._update_period = update_period", "def update_federation(self, federation_id, values):", "def perform_update(self, serializer):\n new_date = timezone.now() + relativedelta(months=1)\n serializer.save(pay_day=new_date, partial=True)", "def update(self):\n self.meat = self.get_live_meat()\n self.verification_status = self.STATUS_NOT_CHANGED\n self.verified_at = datetime.datetime.now()\n self.save()", "def steam_game_timeperiod_setter(\n\tsteamgame: steam_game_db.SteamGame,\n\tperiod_id: str,\n\tstatus: str,\n\tstart: datetime.date,\n\tend: datetime.date,\n\tlogger: logging.Logger,\n\t**kwargs\n\t) -> None:\n\t##Documentation\n\n\t##Checking for repeated ids\n\tIdList=[Period.period_id for Period in steamgame.periods]\n\tif period_id not in IdList:\n\t\tlogger.info('New TimePeriod: {}'.format(period_id))\n\t\tTimePeriod=steam_game_db.TimePeriod()\n\t\tModFlag=False\n\telse:\n\t\tlogger.info('Modifying TimePeriod: {}'.format(period_id))\n\t\tfor i,period in enumerate(steamgame.periods):\n\t\t\tif period.period_id==period_id:\n\t\t\t\tTimePeriod=period\n\t\t\t\tsteamgame.periods.pop(i)\n\t\t\t\tbreak\n\t\tModFlag=True\n\n\t##Checking for wrong ending dates\n\tif start>end:\n\t\traise ValueError('End is earlier than start')\n\n\t##Checking for unkwown status\n\tSTATUS_DICT={\n\t\t'i':'inactive',\n\t\t's':'stagnant',\n\t\t'a':'active (unknown cause)',\n\t\t'al':'active (launch)',\n\t\t'as':'active (sales)',\n\t\t'aw':'active (free weekend)'\n\t\t}\n\tif status not in STATUS_DICT.keys():\n\t\traise ValueError('Unknown status')\n\n\t##Setting properties\n\tTimePeriod.period_id=period_id\n\tTimePeriod.status=status\n\tTimePeriod.start=start\n\tTimePeriod.end=end\n\tif 'av' in kwargs.keys():\n\t\tTimePeriod.average_players=kwargs['av']\n\tif 'dev' in kwargs.keys():\n\t\tTimePeriod.deviation_players=kwargs['dev']\n\n\t##Appending to period list and saving in db\n\tsteamgame.periods.append(TimePeriod)\n\tlogger.info(\n\t\t\"\"\"\n\t\tPeriod {} for game {} correctly saved\n\t\t\"\"\".format(period_id,steamgame.appid)\n\t\t)\n\treturn steamgame" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes updated seating to Google Sheet.
def write_names(self): print('Writing to spreadsheet...') service = build('sheets', 'v4', credentials=self.credentials) # Call Google Sheets API. for period in self.periods: if period in self.class_lists: seating_update = self.extend_array(copy.deepcopy(self.seating_chart[period])) ss_range = 'Period {}!B2:G5'.format(period) body = {'values': seating_update, 'majorDimension': 'rows'} try: result = service.spreadsheets().values().update(spreadsheetId=self.seating_id, valueInputOption='RAW', range=ss_range, body=body).execute() except Exception as e: print('Period {}: Failed to record names.'.format(period)) print(e) else: print(result) # Verify success
[ "def update_google(season_data, sheet):\n pass", "def editGoogleSheet(client, data, timeStamp):\r\n\r\n #get the current worksheet\r\n worksheet_feed = client.GetWorksheetsFeed(config.speedsheet_id)\r\n\r\n for d in data:\r\n\r\n #find the sheet name we care about\r\n for entry in worksheet_feed.entry:\r\n if entry.title.text == d.sheet_name:\r\n worksheet_entry = entry\r\n break\r\n else: # no-break\r\n print \"finding worksheet\"\r\n\r\n worksheet_key = worksheet_entry.id.text.split('/')[-1]\r\n\r\n #print str(d.sheet_name)\r\n #print str(d.location[0])\r\n #print str(d.location[1])\r\n #print str(d.value)\r\n\r\n row = d.location[0]\r\n col = d.location[1]\r\n value = d.value\r\n\r\n client.UpdateCell(row, col, value, config.speedsheet_id, worksheet_key)\r\n\r\n\r\n date_row = config.cell_for_date[0]\r\n date_col = config.cell_for_date[1]\r\n\r\n #find the sheet name we care about for date\r\n for entry in worksheet_feed.entry:\r\n if entry.title.text == config.cell_for_date_worksheet:\r\n worksheet_key = entry.id.text.split('/')[-1]\r\n\r\n #time stamp a cell plz\r\n client.UpdateCell(date_row, date_col, timeStamp, config.speedsheet_id, worksheet_key)", "def update(self):\n print('Updating seating chart...')\n for period in self.periods:\n if period in self.class_lists:\n new_seating, version = self.new_tables(period)\n self.seating_chart[period] = new_seating\n\n # Verify success:\n if new_seating:\n print('Period {}'.format(period))\n for i in range(len(new_seating)):\n print('Table {}: {}'.format(i + 1, new_seating[i]))\n print('Version = {}'.format(version))\n else:\n print('Period {}: Failed to update seating.'.format(period))", "def synchronize_gsheet():\n # initialize google client\n gclient = gspread.authorize(credentials)\n # open spreadsheet using google client\n spreadsheet = gclient.open_by_key(docid)\n for i, worksheet in enumerate(spreadsheet.worksheets()):\n # we want the first sheet out of the workbook. Stops after the first (0)\n if i == 0:\n filename = docid + '-worksheet' + str(i) + '.csv'\n # encode as utf-8\n with open(filename, 'w', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(worksheet.get_all_values())\n # convert to spreadsheet pandas dataframe\n df = pd.read_csv(filename)\n\n columns = list(df)\n for c in columns:\n if c not in schema_list and c[:15] != 'FY19 ADP Status':\n df.drop(c, axis=1, inplace=True)\n else:\n continue\n\n df.columns = ['Property ID', 'Building Area (sqft)', 'Type I', 'Type II', 'UNITS (RP)',\n 'Year First Affected ; (Oct. 1, 201X)', 'Property Name', 'Street Address (TCAD) DO NOT EDIT',\n 'Situs Zip', 'Owner Name', 'Owner Address', 'Owner Address Line 2', 'Owner Address Line 3',\n 'Owner City', 'Owner State', 'Owner Zip+4', 'FY19 ADP Status']\n # fill NaN (not a number) in dataframe with ''\n df.fillna('', inplace=True)\n # convert to dictionary for requests payload\n data = df.to_dict('records')\n # save to csv for initial manual upload of data to socrata. Manually uploading to set columns/schema of asset\n df.to_csv('master-recycling-list.csv')\n\n # perform replace using a put request. Authentication is socrata username and password with ownership of asset in question\n r = requests.put(socrata_asset, json=data, auth=(os.environ['socrata_user'], os.environ['socrata_pass']), headers=headers)\n return r.json()", "def save(self, worksheet):\n pass", "def write_data(data):\n\n spreadsheet = SpreadSheet()\n sheet_name = os.getenv('SPREADSHEET_NAME')\n if not sheet_name:\n sheet_name = 'New spreadsheet'\n spreadsheet.write(sheet_name, data)", "def update_sales_worksheet(data):\n print('Updating sales worksheet...\\n')\n sales_worksheet = SHEET.worksheet('sales')\n sales_worksheet.append_row(data)\n pprint('Sales worksheet updated succesfully.\\n')", "def writer(data, sheet_name, share_email):\n\n # Grabbing Parameters for looping\n n_rows = data.shape[0]\n n_cols = data.shape[1]\n\n # load sheet if it exists or create and share sheet if it does not\n try:\n sheet = client.open(sheet_name)\n except gspread.exceptions.SpreadsheetNotFound:\n # creating sheets\n # Now will can access our google sheets we call\n # client.open on StartupName\n sheet = client.create(sheet_name)\n sheet.share(share_email, perm_type='user',\n role='writer') # sharing my email\n\n # getting cell list to batch update\n import string\n end_col = string.ascii_uppercase[n_cols - 1]\n end_row = n_rows + 1\n\n sheet_range = 'A1:' + end_col + str(end_row)\n\n # turning df to one long list\n df_as_list = data.stack().tolist()\n df_as_list = data.columns.tolist() + df_as_list\n\n # getting the target sheet\n ws = sheet.get_worksheet(0)\n cell_list = ws.range(sheet_range)\n\n # writing df list to cell range list\n for i in range(0, len(cell_list)):\n cell_list[i].value = df_as_list[i]\n\n # batch updating\n ws.update_cells(cell_list)", "def save_to_worksheet(spreadsheet_id: str,\n ws_title: str,\n data: List[List[str]],\n keep_header_row: bool) -> Dict[str, Union[str, int]]:\n\n gc = gspread.service_account(filename=constants.FILEPATH_GSHEET_CREDS)\n sheet = gc.open_by_key(spreadsheet_id)\n ws = sheet.worksheet(ws_title)\n\n start_row_idx = 2 if keep_header_row else 1 \n\n # 1. Add a new row to the end.\n ws.add_rows(1)\n # 2. Delete rows from start_row_idx till the 2nd-last row.\n ws.delete_rows(start_row_idx, ws.row_count - 1)\n # 3. Insert new data from start_row_idx onwards.\n resp = ws.insert_rows(data, start_row_idx)\n\n return {\n constants.UPDATED_RANGE: resp[constants.UPDATES][constants.UPDATED_RANGE],\n constants.UPDATED_ROWS: resp[constants.UPDATES][constants.UPDATED_ROWS],\n }", "def update_worksheet(data, worksheet):\n print(f'Updating {worksheet} worksheet...\\n')\n worksheet_to_update = SHEET.worksheet(worksheet)\n worksheet_to_update.append_row(data)\n print(f'{worksheet} worksheet update successfully\\n')", "def update_surplus_worksheet(data):\n print(\"Updating surplus worksheet...\\n\")\n surplus_worksheet = SHEET.worksheet(\"surplus\")\n surplus_worksheet.append_row(data)\n print(\"Surplus worksheet updated successfully.\\n\")", "def write_book(self):\n # self.writer.save()\n self.workbook.close()", "def update_worksheet_sales(sales_data):\n\n print(\"Updating sales worksheet...\\n\")\n sales_worksheet = SHEET.worksheet(\"sales\")\n sales_worksheet.append_row(sales_data)\n print(\"Sales worksheet updated successfully.\\n\")\n back_to_menu()", "def write(self, x, y, data, format=None):\n if format:\n self.sheet.write(y, x, data, self._formats[format])\n else:\n self.sheet.write(y, x, data)", "def update_google_spreadsheet(populaire_pk):\n populaire = Populaire.objects.get(pk=populaire_pk)\n client = google_docs_login(SpreadsheetsService)\n key = populaire.google_doc_id.split(':')[1]\n spreadsheet_list = client.GetListFeed(key)\n spreadsheet_rows = len(spreadsheet_list.entry)\n rider_list = Rider.objects.filter(\n populaire__short_name=populaire.short_name,\n populaire__date=populaire.date)\n # Update the rows already in the spreadsheet\n for row, rider in enumerate(rider_list[:spreadsheet_rows]):\n rider_number = row + 1\n new_row_data = _make_spreadsheet_row_dict(rider_number, rider)\n client.UpdateRow(spreadsheet_list.entry[row], new_row_data)\n # Add remaining rows\n for row, rider in enumerate(rider_list[spreadsheet_rows:]):\n rider_number = spreadsheet_rows + row + 1\n row_data = _make_spreadsheet_row_dict(rider_number, rider)\n client.InsertRow(row_data, key)", "def append_to_sheet(req):\n scopes = ['https://www.googleapis.com/auth/spreadsheets']\n sheet_id = app.config['OUTPUT_SHEET_ID']\n creds = service_account.ServiceAccountCredentials.from_json_keyfile_dict(app.config['GOOGLE_SERVICE_CREDENTIALS'],\n scopes=scopes)\n\n sheet_title = '{0} {1}'.format(req.request_date.month, req.request_date.year)\n gc = gspread.authorize(creds)\n sheets = gc.open_by_key(sheet_id)\n\n try:\n sheet = sheets.worksheet(sheet_title)\n sheet.append_row([str(req.request_date), req.rs_username])\n except WorksheetNotFound:\n sheet = sheets.add_worksheet(title=sheet_title, rows='400', cols='2')\n sheet.append_row([str(req.request_date), req.rs_username])\n return True", "def append2sheet(creds, sheet_id, tab_name, data):\n\n service = discovery.build('sheets', 'v4', credentials=creds)\n value_input_option = 'USER_ENTERED'\n body = {\n 'values': data,\n }\n result = service.spreadsheets().values().append(\n spreadsheetId=sheet_id, range=tab_name,\n valueInputOption=value_input_option, body=body).execute()\n\n return result", "def upload_information(sheet, page, row):\n cell_selection = \"A\" + str(row + 1) + \":G\" + str(row + 1)\n cell_list = sheet.range(cell_selection)\n cell_data = [page.page_url, page.title_text, page.title_length, page.anchors, page.alt_text['Without Alt'],\n page.h_tags['h1'], page.reading_score]\n\n for enum, data in enumerate(cell_data):\n cell_list[enum].value = data\n\n sheet.update_cells(cell_list)", "def uploadToGSheets():\n # Set credentials\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'authentication/service_key.json', scope)\n gc = gspread.authorize(credentials)\n\n # Load pandas dataframe from CSV file\n df = pd.read_csv('output/OLM_weekly_prometheus.csv', index_col=False)\n \n # Upload data to Google Sheets\n spreadsheet_key = \"XXXXXXXXXXXXXXXXXXXXXXXX\"\n wks_name = \"Sheet1\"\n \n workbook = gc.open_by_key(spreadsheet_key)\n sheet = workbook.worksheet(wks_name)\n\n sheet.clear()\n (row,col) = df.shape\n\n cells = sheet.range(\"A1:{}\".format(gspread.utils.rowcol_to_a1(row + 1, col)))\n for cell, val in zip(cells, iter_pd(df)):\n cell.value = val\n sheet.update_cells(cells)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes user input to determine which periods need new seating arrangements.
def get_periods(self, user_input): active_periods = list(self.schedules[self.year].keys()) if user_input == 'all': periods = active_periods.copy() print('New Seats: {}'.format(periods)) return periods choices = list(user_input) periods = [] for choice in choices: try: p = int(choice) except ValueError: pass else: if p in active_periods: periods.append(p) if periods: print('New Seats: {}'.format(periods)) return periods else: print('Invalid input.') print('***** Finished *****') exit()
[ "def main():\n print('This program takes dates from a file, or user input, and validates that it fits the format:\\n[d/dd]/[0m/m/mm/first 3 characters of month]/[yy/yyy] seperated by /,-, or <space>.\\n')\n op = input('Would you like to read(r) dates file or manually(m) input a date? [r|m|exit]\\n')\n while(True):\n if(op == 'exit'): break\n elif(op == 'm'):\n print('Enter date(type \"exit\" to escape):')\n while(True):\n date = input()\n if(date == 'exit'): break\n print(vd.validate(date))\n break\n elif(op == 'r'):\n filename = input('What is the name of the file including extension(.txt recommended)? eg. \"dates.txt\"\\n')\n while not (os.path.isfile(filename)):\n filename = input('Invalid input, try again:\\n')\n with open(filename, 'r') as f:\n for line in f:\n print(vd.validate(line))\n print('type \"exit\" to escape:')\n while(True):\n if(input()=='exit'): break\n break\n else:\n op = input('Invalid input, try again:[r|m|exit]\\n')", "def user_entered_positions():\n while True:\n try:\n print(\"*****PLEASE ENTER A LIST OF POSITIONS, SEPARATED BY COMMAS, NO BRACKETS *****\")\n input_number_of_shares = str(input())\n \n # Clean the positions input and check for basic mistakes\n no_white_space = input_number_of_shares.replace(\" \", \"\")\n no_commas = no_white_space.replace(\",\", \"\")\n no_dots = no_commas.replace(\"\\.\", \"\")\n \n two_consecutive_commas = re.findall(',,', no_white_space)\n contains_dots = re.findall(\"\\.\", input_number_of_shares) \n \n split_input = no_white_space.split(',')\n numbers_in_input = []\n for i in split_input:\n numbers_in_input.append(i)\n \n #positions_input will be used as an input in investment_instrument.py (define it outside too)\n positions_input = []\n \n if input_number_of_shares.lower() == 'quit' or input_number_of_shares.lower() == 'exit':\n raise Quit\n elif len(input_number_of_shares) == 0:\n raise NoInput\n elif len(two_consecutive_commas) > 0:\n raise TWoconsecutiveCommas\n elif len(contains_dots) > 0:\n raise ContainsPeriod\n elif no_dots.isdigit() is False and input_number_of_shares.lower() != 'quit' and input_number_of_shares.lower() != 'exit':\n raise ElementsOtherThanDigits\n elif no_white_space[-1] == \",\":\n raise LastElementIsComma\n elif len(numbers_in_input)>0:\n for i in numbers_in_input:\n if int(i)!=1 and int(i)!=10 and int(i) != 100 and int(i) !=1000:\n raise NotCorrectInput\n else:\n positions_input.append(int(i))\n return positions_input\n break \n\n except NoInput:\n print(\"WARNING: THE INPUT IS EMPTY. PLEASE TRY AGAIN.\")\n print()\n except TWoconsecutiveCommas:\n print(\"WARNING: THE INPUT CONTAINS AT LEAST 2 CONSECUTIVE COMMAS. PLEASE TRY AGAIN.\")\n print()\n except ContainsPeriod:\n print(\"WARNING: THE INPUT CONTAINS PERIODS OR DECIMAL POINTS. PLEASE TRY AGAIN.\")\n print()\n except ElementsOtherThanDigits:\n print(\"WARNING: THE INPUT CONTAINS ELEMENTS OTHER THAN DIGITS (EX: LETTERS, #,-}). PLEASE TRY AGAIN.\")\n print()\n except LastElementIsComma:\n print(\"WARNING: THE LAST ELEMENT OF THE INPUT IS A COMMA. PLEASE TRY AGAIN.\")\n print()\n except NotCorrectInput:\n print(\"WARNING: ONLY 1, 10, 100, and 1000 ARE VALID POSITION INPUTS. PLEASE TRY AGAIN.\")\n print()\n except Quit:\n print(\"END\")\n sys.exit()\n except (KeyboardInterrupt, SystemExit):\n print(\"END\")\n os._exit(1)", "def create_sems(sig):\r\n\r\n\r\n\r\n\r\n\r\n def create_new_sem(sem_type):\r\n \"\"\" Gets parameter sem_type in ('Instruction', 'Schedule', 'AdminEvent'). Returns updated tuple (current_inst, current_sched, current_evt)\r\n \"\"\"\r\n\r\n current_inst = current_instruction\r\n current_sched = current_schedule\r\n current_evt = current_event\r\n\r\n if sem_type == 'Instruction':\r\n current_inst = parse.add_new_DrugAdmin()\r\n current_sched = None\r\n current_evt = None\r\n elif sem_type == 'Schedule':\r\n if not current_inst:\r\n # E.g., we are at the start of the sig.\r\n current_inst = parse.add_new_DrugAdmin()\r\n current_sched = current_inst.add_new_schedule()\r\n current_evt = None\r\n elif sem_type == 'AdminEvent':\r\n if not current_inst:\r\n current_inst = parse.add_new_DrugAdmin()\r\n if not current_sched:\r\n current_sched = current_inst.add_new_schedule()\r\n current_evt = current_sched.add_new_AdminEvent()\r\n\r\n return (current_inst, current_sched, current_evt)\r\n\r\n\r\n\r\n\r\n\r\n for parse in sig.parses:\r\n\r\n sem_scope_data = SemScopeData(parse.strucs)\r\n #print sem_scope_data.pprint()\r\n\r\n current_instruction = None\r\n current_schedule = None\r\n current_event = None\r\n\r\n for (struc_index, struc) in enumerate(parse.strucs):\r\n duration_on_the_left = sem_scope_data.get_list_member_with_largest_index_less_than_num(struc_index, sem_scope_data.duration_like_strucs)\r\n duration_on_the_right = sem_scope_data.get_list_member_with_smallest_index_greater_than_num(struc_index, sem_scope_data.duration_like_strucs)\r\n timing_on_the_right = sem_scope_data.get_list_member_with_smallest_index_greater_than_num(struc_index, sem_scope_data.timing_like_strucs)\r\n\r\n if struc.label == 'THEN_CHRONO':\r\n # \"Then\" signals the start of a new Schedule or a new AdminEvent.\r\n # It signals new Schedule if it is followed by a new duration (or by most types of calendar_event). So we use a lookahead loop to verify this.\r\n # E.g., \"2 tabs now then 1 tab every morning for the next 4 days\", or \" then 1 tab tomorrow\" or \"then 1 tab on Monday\"\r\n # However, \"then\" can signal merely a new AdminEvent if it consists in change of timing from the previous AdminEvent of the same schedule.\r\n # E.g. �take 2 tabs in the morning then 1 in the evening then 1 at bedtime for pain as needed�\r\n\r\n\r\n # To determine if we need to start a new Schedule:\r\n # Check if there is Duration to the right with unassigned start.\r\n # If yes, create a new Schedule and assign this struc as the start of the schedule in schedule_start_dict.\r\n # If schedule_start_dict has a positive index assigned to the Schedule, do not create a new schedule. Proceed to check if it is the start of a new AdminEvent\r\n # If there are no Duration strucs to the right or if the next duration_on_the_right has None as it's left_scope_start, then:\r\n # a. Check if we are at the start of a left-scoped Duration, i.e. if we are at \"Then, Duration:\". If so, start a new Schedule.\r\n # b. Else check if there is a Duration to the right with left_scope_start assigned to it. If so, do not create a new schedule.\r\n # Proceed to check if it is the start of a new AdminEvent\r\n # c. If there is no Duration on the right but the current Schedule has a Duration filled we assume that this\r\n # is an implied Schedule to do something forever. E.g. \"take 2 tabs daily for 10 days, then 1 tab daily.\" So we start a new Schedule with semantics \"after that:\".\r\n\r\n if duration_on_the_right and duration_on_the_right.is_right_scoped and duration_on_the_right.struc_index == struc_index + 2:\r\n # I.e. we are the start of \"Then for the next n days: ...\"\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif duration_on_the_right and duration_on_the_right.is_left_scoped and duration_on_the_right.left_scope_start is None:\r\n duration_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif duration_on_the_right is None and duration_on_the_left:\r\n # If there is no Duration on the right but the current Schedule has a Duration filled we assume that this\r\n # is an implied Schedule to do something forever. E.g. \"take 2 tabs daily for 10 days, then 1 tab daily.\" So we start a new Schedule with semantics \"after that:\".\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif current_schedule is None:\r\n # A new AdminEvent can only be started by a Then_Chrono if it is in the middle of a Schedule, i.e. a prior AdminEvent has well been started.\r\n # So we are probably at the beginning of a new Instruction. Start a new Schedule then.\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif timing_on_the_right:\r\n # We didn't start a new Schedule, so Then_Chrono has to signify a start of a new Admin_Event\r\n # Since we are starting a new AdminEvent, the scope of Timing on the right has to start here.\r\n timing_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('AdminEvent')\r\n struc.accounted_for_by_sem = current_event\r\n elif current_event is None:\r\n (current_instruction, current_schedule, current_event) = create_new_sem('AdminEvent')\r\n struc.accounted_for_by_sem = current_event\r\n\r\n elif struc.label == 'AND_CONJ':\r\n # \"AND\" could signal the start of a new Schedule or a new AdminEvent.\r\n # It signals new Schedule if it is followed by a new duration or calendar_event. So we use a lookahead loop to verify this.\r\n # E.g., \"2 tabs day 1 and 3 tabs days 2-5\", or \"2 tabs today and 1 tab tomorrow\" or \"2 tabs now and 1 tab on Monday\"\r\n # However, \"and\" can signal merely a new AdminEvent if it consists in change of timing from the previous AdminEvent of the same schedule.\r\n # E.g. �take 2 tabs in the morning and 1 in the evening�\r\n\r\n # EXCEPT you don't need incompatible Timing to start a new AdminEvent! All you need is \"AND_Chrono\" before Timing.\r\n # E.g. \"Take 2 tabs every 3 hours and at bedtime.\" AND_Chrono there starts a new AdminEvent.\r\n\r\n if duration_on_the_right and duration_on_the_right.is_left_scoped and duration_on_the_right.left_scope_start is None:\r\n duration_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif current_schedule is None:\r\n # A new AdminEvent can only be started by a Then_Chrono if it is in the middle of a Schedule, i.e. a prior AdminEvent has well been started.\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n struc.accounted_for_by_sem = current_schedule\r\n elif timing_on_the_right:\r\n # We are starting a new AdminEvent, so break the scope of timing_on_the_right to here.\r\n timing_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('AdminEvent')\r\n struc.accounted_for_by_sem = current_event\r\n elif current_event is None:\r\n (current_instruction, current_schedule, current_event) = create_new_sem('AdminEvent')\r\n struc.accounted_for_by_sem = current_event\r\n\r\n elif DrugAdmin.is_valid_property(struc.label):\r\n if current_instruction and not struc.is_semantically_incompatible_with_given_sem(current_instruction):\r\n # If the struc is compatible with an existing current_instruction, just add this struc to that current_instruction.\r\n current_instruction.add_property_value(struc.label, struc)\r\n elif not current_instruction:\r\n # Need to create the first Instruction and assign the struc to it.\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Instruction')\r\n current_instruction.add_property_value(struc.label, struc)\r\n #else:\r\n # We don't create a new instruction because of incompatibility, because all the cases we have seen (e.g. multiple AS_NEEDED) are just stupid repetitions\r\n # which don't intend to signal new instruction.\r\n\r\n\r\n elif Schedule.is_valid_property(struc.label):\r\n if duration_on_the_left and duration_on_the_left.is_right_scoped and duration_on_the_left.struc_index == struc_index:\r\n # I.e. we are the start of a right-scoped Duration: \"For the next n days: ...\". Need to start a new Schedule.\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n elif duration_on_the_left and duration_on_the_left.is_right_scoped and duration_on_the_left.right_scope_end >= struc_index:\r\n # We are in the scope of an already-started right-scoped Duration Schedule, so don't start anything new.\r\n pass\r\n elif duration_on_the_right and duration_on_the_right.is_left_scoped and duration_on_the_right.left_scope_start is None:\r\n duration_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n elif current_schedule is None:\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n elif struc.label == 'TAPER':\r\n # Taper always starts a new schedule. E.g. \"take one capsule at bedtime increase by 1 capsule every day\"\r\n duration_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n current_schedule.add_property_value(struc.label, struc)\r\n\r\n elif AdminEvent.is_valid_property(struc.label):\r\n if current_event and not struc.is_semantically_incompatible_with_given_sem(current_event):\r\n # If the struc is compatible with an existing current_event, just add this struc to that current_event.\r\n current_event.add_property_value(struc.label, struc)\r\n else:\r\n start_new_event = True\r\n if duration_on_the_left and duration_on_the_right and duration_on_the_right.is_left_scoped:\r\n # This may be a multi_schedule instruction which doesn't have Timing in the first schedule's events.\r\n # E.g. \"take 7 tab twice daily for 5 days, 2 in am & 3 in pm for 4 days,then one tab once daily\"\r\n start_new_event = True\r\n elif current_event and not current_event.timing:\r\n # We are in a bind. We have an incompatible structure to the current_event, so on the one hand we need to close that event and\r\n # start a new one. But on the other hand, current_event has not Timing, so it is not really an Event.\r\n # The situation is typically due to repeated directive, e.g. \"use 1 apply vaginally every night at bedtime\"\r\n start_new_event = False\r\n if struc.label == 'DIRECTIVE' and current_event.directive:\r\n if struc.value == 'remove':\r\n # \"Remove\" should always start a separate Event for it represents a separate action with\r\n # it's own properties. The previous event doesn't have to have Timing. E.g. \"apply 1 patch for 10 hours, then remove at bedtime\"\r\n # For the frequent case of Duration on/off (e.g. \"apply 1 patch every day on 12 hours off for 12 hours\") we DO want to create a new\r\n # Schedule (and new Event to start it) because we have 2 Durations to deal with. We finagle the problem of this being really an intra-day duration in\r\n # process_special_duration_cases()\r\n # e.g. \"apply one patch once weekly as directed for three weeks. leave off for one week then repeat cycle\". \"remove\" with Duration should start a new event.\r\n start_new_event = True\r\n elif (\r\n (current_event.directive.value == 'use' and struc.value in ('inject', 'instill', 'mix', 'dissolve', 'chew', 'inject', 'take')) or\r\n (current_event.directive.value == struc.value) or\r\n (current_event.directive.value == 'use' and struc.value == 'rinse' and (not (current_event.dose) or current_event.dose.form.value in ('capful', 'ounce', 'ml', 'teaspoon', 'tablespoon')))\r\n ):\r\n # just make this directive be the event directive.\r\n # The case of rinse is this: we want to cover cases such as \"use 1 capful to rinse..\" but we want to avoid \"use 1 puff twice a day, rinse mouth after\"\r\n current_event.directive.rules_used.append('*removed_from_sem_in_create_new_sem*')\r\n current_event.remove_property('directive', current_event.directive)\r\n current_event.add_property_value(struc.label, struc)\r\n start_new_event = False\r\n elif current_event.directive.value in ('inject', 'instill', 'mix', 'dissolve', 'chew', 'inject', 'take') and struc.value in ('use', 'take'):\r\n # remove this directive. E.g. \"chew one tablet take one tablet three times a day before meals \"\r\n struc.rules_used.append('*removed_from_sem_in_create_new_sem*')\r\n start_new_event = False\r\n elif current_event.directive.value in ('mix', 'dissolve') and struc.value in ('drink', 'inject'):\r\n # This is a multi-event situation, e.g. \"mix with 1 cc diluent and inject intramuscularly\"\r\n # We will remove \"ALSO:\" from the transduction in process_schedule().\r\n start_new_event = True\r\n elif struc.value in ('stop'):\r\n # This is a multi-schedule situation, e.g. \"take 1 tablet by mouth once daily for 21 days, skip 7 days and repeat cycle\"\r\n start_new_event = True\r\n elif struc.label == 'DOSE' and current_event.dose:\r\n if current_event.dose.form.value != struc.form.value:\r\n # E.g. \"take 1 tablet and 1 gelcap by mouth daily\". So even though there is no timing difference, we can pretend that we first take the tablet then take the capsule.\r\n start_new_event = True\r\n elif current_event.specific_day:\r\n # we should start a new event because this is likely to be a switch of doses b/c of change in days:\r\n # eg. \"take one tablet by mouth on monday thru friday one and half tablet saturday and sunday\"\r\n start_new_event = True\r\n else:\r\n # probably repeat \"take 1 tablet 1 tablet\", so ignore the second Dose\r\n start_new_event = False\r\n elif struc.label == 'SPECIFIC_DAY' and current_event.specific_day:\r\n start_new_event = True\r\n\r\n if start_new_event:\r\n # Need to create a new event and assign the struc to it.\r\n # But first check if you need to start a new Schedule.\r\n if duration_on_the_right and duration_on_the_right.is_left_scoped and duration_on_the_right.left_scope_start is None:\r\n duration_on_the_right.left_scope_start = struc_index\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n elif current_schedule is None:\r\n (current_instruction, current_schedule, current_event) = create_new_sem('Schedule')\r\n (current_instruction, current_schedule, current_event) = create_new_sem('AdminEvent')\r\n if timing_on_the_right:\r\n # We are starting a new AdminEvent, so the scope of Timing on the right has to start here.\r\n timing_on_the_right.left_scope_start = struc_index\r\n current_event.add_property_value(struc.label, struc)\r\n\r\n\r\n parse.sem_scope_data = sem_scope_data\r\n\r\n #print sem_scope_data.pprint()\r\n #s = parse.show_struc_assignment_to_sems(include_struc_details = True, omit_coords = False)\r\n #print s\r", "def setrange():\n app.logger.debug(\"Entering setrange\") \n flask.flash(\"Setrange gave us '{}'\".format(\n request.form.get('daterange')))\n daterange = request.form.get('daterange')\n starttime = request.form.get('starttime')\n endtime = request.form.get('endtime')\n name = request.form.get('name')\n flask.session['name'] = name\n flask.session['daterange'] = daterange\n flask.session['text_beg_time'] = starttime\n flask.session['text_end_time'] = endtime\n daterange_parts = daterange.split()\n flask.session['begin_date'] = interpret_date(daterange_parts[0])\n \n app.logger.debug(flask.session['begin_date'])\n \n flask.session['end_date'] = interpret_date(daterange_parts[2])\n flask.session['begin_time'] = interpret_time(starttime)\n flask.session['end_time'] = interpret_time(endtime)\n flask.session['is_participant'] = \"False\"\n \n app.logger.debug(\"Setrange parsed {} - {} dates as {} - {}\".format(\n daterange_parts[0], daterange_parts[1], \n flask.session['begin_date'], flask.session['end_date']))\n app.logger.debug(\"Set time range from {} - {} TO {} - {}\".format(starttime, endtime, flask.session['begin_time'], flask.session['end_time']))\n return flask.redirect(flask.url_for(\"choose\"))", "def update(self):\n print('Updating seating chart...')\n for period in self.periods:\n if period in self.class_lists:\n new_seating, version = self.new_tables(period)\n self.seating_chart[period] = new_seating\n\n # Verify success:\n if new_seating:\n print('Period {}'.format(period))\n for i in range(len(new_seating)):\n print('Table {}: {}'.format(i + 1, new_seating[i]))\n print('Version = {}'.format(version))\n else:\n print('Period {}: Failed to update seating.'.format(period))", "def process_special_duration_cases(instruction):\r\n\r\n def find_pair_on_off_duration_schedules(instruction):\r\n for sched_0_index in range(len(instruction.schedules) - 1):\r\n (schedule_0, schedule_1) = instruction.schedules[sched_0_index : sched_0_index + 2]\r\n duration_0 = schedule_0.duration\r\n duration_1 = schedule_1.duration\r\n if (duration_0 and duration_1 and duration_0.on_off == 'on' and duration_1.on_off == 'off' and\r\n duration_0.time_interval.time_unit.value == duration_1.time_interval.time_unit.value and duration_0.time_interval.time_unit.value == 'hour'):\r\n return (schedule_0, schedule_1)\r\n return None\r\n\r\n if instruction.schedules and len(instruction.schedules) > 1 and instruction.primary_directive.value == 'apply' and instruction.primary_form.value == 'patch':\r\n on_off_duration_schedules = find_pair_on_off_duration_schedules(instruction)\r\n if not on_off_duration_schedules:\r\n return []\r\n (schedule_0, schedule_1) = on_off_duration_schedules\r\n duration_0 = schedule_0.duration\r\n duration_1 = schedule_1.duration\r\n # we are dealing with \"apply (patch) 12 hours on then 12 hours off\"\r\n directive = instruction.primary_directive\r\n if schedule_1.events:\r\n directive_remove = schedule_1.events[0].directive\r\n else: # should never happen, since schedule_1 should have 1 event with directive = remove\r\n return []\r\n then_chrono = ThenChrono([])\r\n struc_list = [directive, duration_0, then_chrono, directive_remove, duration_1]\r\n dose_included_event = None\r\n if schedule_0.events and schedule_0.events[0].dose:\r\n dose = schedule_0.events[0].dose\r\n struc_list.insert(1, dose)\r\n dose_included_event = schedule_0.events[0]\r\n elif schedule_1.events and schedule_1.events[0].dose:\r\n # case such as \"apply 12 hours on and 12 hours off daily 1 patch\"\r\n dose = schedule_1.events[0].dose\r\n struc_list.insert(1, dose)\r\n dose_included_event = schedule_1.events[0]\r\n elif instruction.schedules[0].events and instruction.schedules[0].events[0].dose:\r\n # case where the on/off duration are not in the first schedule, e.g. \"apply 1-3 patches topically once daily for 30 days 12 hours on and 12 hours off daily\"\r\n dose = instruction.schedules[0].events[0].dose\r\n struc_list.insert(1, dose)\r\n dose_included_event = instruction.schedules[0].events[0]\r\n empty_version = Version(parse)\r\n\r\n candidate_atoms = get_structurally_similar_list_of_atoms_in_dict(struc_list)\r\n versions = try_adding_struc_list_to_versions_list(struc_list, [empty_version], restrict_to_these_atoms_list = candidate_atoms)\r\n if versions:\r\n # Check if the best new version now contains Duration. If it does, we then change the Sem by consolidating the 2 schedules into 1 and removing\r\n # both Duration\r\n uncovered_label2struc_list = find_strucs_not_covered_by_top_versions(struc_list, versions, max_number_of_versions = 1)\r\n if 'DURATION' not in uncovered_label2struc_list:\r\n # The top new version covers Duration\r\n schedule_1_properties = [prop for prop in schedule_1.substantive_properties if schedule_1.get_property_values(prop)]\r\n for prop in schedule_1_properties:\r\n # for every substantive component of schedule_1 proper, reassign it to schedule_0.\r\n value = schedule_1.get_property_values(prop)\r\n if type(value) == list:\r\n for struc in value:\r\n schedule_1.remove_property(prop, struc)\r\n if not schedule_0.get_property_values(prop):\r\n schedule_0.add_property_value(prop, struc)\r\n struc.rules_used.append('*reassigned_sem_by_process_special_duration_cases()*')\r\n else:\r\n struc.rules_used.append('*removed_from_sem_by_process_special_duration_cases()*')\r\n else:\r\n schedule_1.remove_property(prop, value)\r\n if not schedule_0.get_property_values(prop):\r\n schedule_0.add_property_value(prop, value)\r\n value.rules_used.append('*reassigned_sem_by_process_special_duration_cases()*')\r\n else:\r\n value.rules_used.append('*removed_from_sem_by_process_special_duration_cases()*')\r\n schedule_0.remove_property('duration', duration_0)\r\n duration_0.rules_used.append('*removed_from_sem_by_process_special_duration_cases()*')\r\n index_schedule_1 = instruction.schedules.index(schedule_1)\r\n instruction.schedules[index_schedule_1:index_schedule_1 + 1] = []\r\n if dose_included_event:\r\n dose_included_event.dose.rules_used.append('*removed_from_sem_by_process_special_duration_cases()*')\r\n dose_included_event.remove_property('dose')\r\n # minor cleanup: there may still be loose strucs that are assigned Schedule_1 or one of its events as\r\n # their Sems (e.g. Then_Chrono). So reassign these to None.\r\n for struc in instruction.parse.strucs:\r\n if struc.accounted_for_by_sem == schedule_1 or struc.accounted_for_by_sem in schedule_1.events:\r\n struc.accounted_for_by_sem = None\r\n struc.rules_used.append('*removed_from_sem_by_process_special_duration_cases()*')\r\n schedule_1_events = schedule_1.events[:]\r\n for event in schedule_1_events:\r\n schedule_1.remove_event(event)\r\n return versions\r\n return []", "def choose_time(customer_name, day, duration):\n print(\"\\nChoose a time for %s's haircut on %s:\" %(customer_name,\n day.date_text))\n # Creates a list of numbers and times, parses them, and prints with buffer\n # This allows for three columns of times and cleaner presentation.\n column = []\n for i in range(32):\n column.append(\"%i. %s\" %(i + 1, day.times[i]))\n # Blank space added to improve formatting. In future builds, find more\n # efficient way to format.\n column.append(\" \")\n for a, b, c in zip(column[:11], column[11:22], column[22:33]):\n print('{:<30}{:<30}{:<}'.format(a, b, c))\n # While loop repeats until user enters valid, available input\n while True:\n time_choice = input(\"\\nType the number of your choice \"\n \"and press 'Enter': \")\n if time_choice.isdigit():\n if int(time_choice) <= 32 and int(time_choice) > 0:\n time_index = int(time_choice) - 1\n time = day.times[time_index]\n # If duration is 30 minutes check two 15 minute blocks\n if duration == '1':\n if \"BOOKED\" in day.times[time_index:time_index + 2]:\n print(\"That time is unavailable. Select another time.\")\n else:\n # Book two 15 minute blocks and break while loop.\n for booked in range(time_index, time_index + 2):\n day.times[booked] = \"BOOKED\"\n break\n # If duration is 60 minutes check four 15 minute blocks\n elif duration == '2':\n if \"BOOKED\" in day.times[time_index:time_index + 4]:\n print(\"That time is unavailable. Select another time.\")\n else:\n # Book four 15 minute blocks and break while loop.\n for booked in range(time_index, time_index + 4):\n day.times[booked] = \"BOOKED\"\n break\n else:\n print(\"Invalid input. Input is outside menu scope.\")\n else:\n print(\"Invalid input. Input must be a positive integer.\")\n return (time, time_index)", "def _process_date_price_list(self, dp_list):\n dp_list.sort(key=lambda x: x[0]) # Sort the list by date\n dp_list_to_add = [] # date, price combos that need to be added\n DAY = 86400 # (60 * 60 * 24) # Number of seconds in a day: 86400\n\n # If the list is longer than this, it returned a query with too many get_prices, only one at a time.\n # May consider expanding this to multiple types at a time\n assert len(dp_list[0]) == 2\n for idx, dp in enumerate(dp_list):\n if idx == 0:\n continue\n days_between = abs(syt.get_days_between(dp_list[idx][0], dp_list[idx - 1][0]))\n if days_between == 1:\n continue\n elif days_between == 0:\n syt.log_error(\"Days between two dates is zero.\")\n syt.log.error(\" {} – {}\".format(arrow.get(dp_list[idx][0]).date(), arrow.get(dp_list[idx - 1][0]).date()))\n delete_sql = self._convert_select_to_delete(dp_list[idx][0])\n syt.log_info(\"SQL string: \" + delete_sql)\n # delete_dup = input(\"Delete the duplicate date? (y/n) \")\n db.run_sql(delete_sql)\n return [[\"rerun\", None]]\n # if delete_dup is \"y\":\n # db.run_sql(delete_sql)\n # return [[\"rerun\", None]]\n # else:\n # raise ZeroDivisionError\n else:\n current_price = dp_list[idx][1]\n previous_price = dp_list[idx - 1][1]\n increment = 0\n # If the start and end prices are None, make the inbetween prices None\n # print(days_between)\n price_is_none = False\n if current_price is None and previous_price is None:\n price_is_none = True\n\n # If a previous price exists but a current price doesn't, then guess prices\n # If a current price exists but a previous price doesn't, don't guess if more than 35 days\n elif current_price is None:\n pass # current price isn't used so no need to do anything since increment will still be zero\n elif previous_price is None:\n if days_between > 35:\n price_is_none = True\n # print(\"None\")\n else:\n previous_price = current_price\n\n else:\n #Get the number of days between (price delta)\n increment = round((syt.float_zero(current_price) - syt.float_zero(previous_price)) / days_between,\n ndigits=2)\n\n for n in range(1, days_between):\n if price_is_none is False:\n dp_list_to_add.append([arrow.get(dp_list[idx - 1][0]).replace(days=+n).timestamp,\n round((syt.float_zero(previous_price) + (increment * n)), ndigits=2)])\n else:\n dp_list_to_add.append([arrow.get(dp_list[idx - 1][0]).replace(days=+n).timestamp, None])\n\n dp_list.extend(dp_list_to_add)\n return dp_list", "def add(date, until, approved):\n if until:\n leave = LeaveRange(date, until)\n else:\n leave = Leave(date)\n click.echo(leave.store())", "def available_days(self):\n self.input_available_days = input(\"Please enter the number of available days\\\n/rounds you want to play. (5 to 40)\")\n if self.input_available_days.isdigit():\n if int(self.input_available_days) in range(5, 41):\n print(\"You chose to play with \", self.input_available_days, \" days.\")\n self.input_available_days = int(self.input_available_days)\n else:\n print(\"Unexpected input. Please enter a number between 5 and 40.\")\n self.available_days()\n else:\n print(\"Unexpected input. Please enter a number between 5 and 40.\")\n self.available_days()", "def edit_availability(self) -> None:\n while True:\n Parser.print_clean()\n option_selection = Parser.selection_parser(\n options={\"A\": \"View all your current availability\", \"D\": \"Edit availability by date\",\n \"--back\": \"to go back\"})\n if option_selection == \"--back\":\n Parser.print_clean()\n return\n elif option_selection == \"A\":\n today = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0, 0))\n availability_result = Paging.give_pointer(SQLQuery(\"SELECT Timeslot FROM available_time WHERE StaffId \"\n \"= ? AND Timeslot >= ?\")\n .fetch_all(parameters=(self.ID, today)))\n if len(availability_result) == 0:\n print(\"You have no current availability recorded in the system.\")\n else:\n print(f\"Viewing current availability for GP {self.username}\")\n Paging.show_page(1, availability_result, 10, 2, [\"Pointer\", \"Timeslot\"])\n # input(\"Press Enter to continue...\")\n Parser.handle_input()\n continue\n selected_date = Parser.date_parser(f\"Editing availability for GP {self.username}.\\n\"\n \"Select a Date:\")\n if selected_date == \"--back\":\n # --back returns the user to the main GP menu.\n Parser.print_clean()\n return\n Parser.print_clean()\n # Retrieving availability from the database\n availability_result = SQLQuery(\n \"SELECT Timeslot FROM available_time WHERE StaffID = ? AND Timeslot >= ? AND Timeslot <= ? \"\n \"ORDER BY Timeslot\",\n ).fetch_all(parameters=(self.ID, selected_date, selected_date + datetime.timedelta(days=1)))\n # Creating two corresponding tables for the fetched data - one for SQL manipulation, one for display\n availability_table = Paging.give_pointer(availability_result)\n Parser.print_clean(f\"You are viewing your schedule for: {selected_date}\")\n options = {\"A\": \"add availability\"}\n if len(availability_table) == 0:\n Parser.print_clean(f\"You have no availability for this day yet.\\n\")\n else:\n Paging.show_page(1, availability_table, 10, 2, [\"Pointer\", \"Timeslot\"])\n options[\"R\"] = \"remove availability\"\n options[\"--back\"] = \"back to previous page\"\n option_selection = Parser.selection_parser(options=options)\n if option_selection == \"A\":\n # selected_date is passed as argument rather than an instance variable for safety\n # (selected_date is used as a variable name across many methods)\n self.add_availability(selected_date)\n elif (option_selection == \"R\") and (len(availability_table) >= 1):\n # the same applies to the availability table\n self.remove_availability(availability_table)", "def _set_periods(self):\n\n pint, pamp, popt = [], [], []\n # Remove first and last intervals if shut\n if self.rampl[0] == 0:\n self.rtint = self.rtint[1:]\n self.rampl = self.rampl[1:]\n self.rprop = self.rprop[1:]\n if self.rtint[-1] < 0:\n self.rtint = self.rtint[:-1]\n self.rampl = self.rampl[:-1]\n self.rprop = self.rprop[:-1]\n while self.rampl[-1] == 0:\n self.rtint = self.rtint[:-1]\n self.rampl = self.rampl[:-1]\n self.rprop = self.rprop[:-1]\n\n oint, oamp, oopt = self.rtint[0], self.rampl[0] * self.rtint[0], self.rprop[0]\n n = 1\n while n < len(self.rtint):\n if self.rampl[n] != 0:\n oint += self.rtint[n]\n oamp += self.rampl[n] * self.rtint[n]\n if self.rprop[n] >= 8: oopt = 8\n\n if n == (len(self.rtint) - 1):\n pamp.append(oamp/oint)\n pint.append(oint)\n popt.append(oopt)\n else:\n # found two consequent gaps\n if oamp == 0 and self.rampl[n] == 0 and oopt < 8:\n pint[-1] += self.rtint[n]\n # skip bad opening\n #elif (self.badopen > 0 and oint > self.badopen) or (oopt >= 8):\n elif (oopt >= 8):\n popt[-1] = 8\n oint, oamp, oopt = 0.0, 0.0, 0\n# if n != (len(self.rint) - 2):\n# n += 1\n else: # shutting terminates good opening\n pamp.append(oamp/oint)\n pint.append(oint)\n popt.append(oopt)\n oint, oamp, oopt = 0.0, 0.0, 0\n pamp.append(0.0)\n pint.append(self.rtint[n])\n popt.append(self.rprop[n])\n n += 1\n\n self.ptint, self.pampl, self.pprop = pint, pamp, popt\n self.opint = self.ptint[0::2]\n self.opamp = self.pampl[0::2]\n self.oppro = self.pprop[0::2]\n self.shint = self.ptint[1::2]\n self.shamp = self.pampl[1::2]\n self.shpro = self.pprop[1::2]", "def manage_bookings(self) -> None:\n stage = 0\n while True:\n while stage == 0:\n Parser.print_clean(f\"Managing bookings for GP {self.username}.\")\n option_selection = Parser.selection_parser(\n options={\"P\": \"View and edit your pending bookings\", \"D\": \"View and edit bookings by date\",\n \"--back\": \"to go back\"})\n if option_selection == \"--back\":\n Parser.print_clean()\n return\n elif option_selection == \"P\":\n bookings_result = SQLQuery(\"SELECT visit.BookingNo, visit.Timeslot, visit.NHSNo, users.firstName, \"\n \"users.lastName, visit.Confirmed FROM visit INNER JOIN users ON \"\n \"visit.NHSNo = users.ID WHERE visit.StaffID = ? AND visit.Confirmed = \"\n \"'P' ORDER BY visit.Timeslot ASC\").fetch_all(EncryptionHelper(),\n parameters=(self.ID,))\n message = \"with status 'pending'.\"\n stage = 1\n elif option_selection == \"D\":\n selected_date = Parser.date_parser(question=f\"Accessing bookings for GP {self.username}\\n\"\n f\"Select a Date:\")\n if selected_date == \"--back\":\n return\n else:\n bookings_result = SQLQuery(\n \"SELECT visit.BookingNo, visit.Timeslot, visit.NHSNo, users.firstName, \"\n \"users.lastName, visit.Confirmed FROM visit INNER JOIN users ON \"\n \"visit.NHSNo = users.ID WHERE visit.StaffID = ? AND visit.Timeslot >= ?\"\n \" AND visit.Timeslot <= ? ORDER BY visit.Timeslot ASC\"\n ).fetch_all(EncryptionHelper(), (self.ID, selected_date,\n selected_date + datetime.timedelta(\n days=1)))\n message = f\"for: {selected_date.strftime('%Y-%m-%d')}\"\n stage = 1\n while stage == 1:\n if option_selection == \"P\":\n bookings_result = SQLQuery(\"SELECT visit.BookingNo, visit.Timeslot, visit.NHSNo, users.firstName, \"\n \"users.lastName, visit.Confirmed FROM visit INNER JOIN users ON \"\n \"visit.NHSNo = users.ID WHERE visit.StaffID = ? AND visit.Confirmed = \"\n \"'P' ORDER BY visit.Timeslot ASC\").fetch_all(EncryptionHelper(),\n parameters=(self.ID,))\n elif option_selection == \"D\":\n bookings_result = SQLQuery(\n \"SELECT visit.BookingNo, visit.Timeslot, visit.NHSNo, users.firstName, \"\n \"users.lastName, visit.Confirmed FROM visit INNER JOIN users ON \"\n \"visit.NHSNo = users.ID WHERE visit.StaffID = ? AND visit.Timeslot >= ?\"\n \" AND visit.Timeslot <= ? ORDER BY visit.Timeslot ASC\"\n ).fetch_all(EncryptionHelper(), (self.ID, selected_date,\n selected_date + datetime.timedelta(\n days=1)))\n row = GP.print_select_bookings(bookings_result, message)\n if not row:\n stage = 0\n else:\n self.booking_transaction(row)\n stage = 0", "def add_availability(self, selected_date) -> bool:\n stage = 0\n slots_to_add = []\n while True:\n while stage == 0:\n start_time = Parser.time_parser(f\"GP {self.username}: you're adding availability for \"\n f\"{selected_date}. Each timeslot is 15 minutes long. \\nEnter \"\n f\"the hour you wish to start taking appointments:\")\n if start_time == \"--back\":\n return False\n else:\n selected_start = datetime.datetime.combine(selected_date, start_time)\n stage = 1\n while stage == 1:\n end_time = Parser.time_parser(f\"GP {self.username}: Each timeslot is 15 minutes long. You have \"\n f\"chosen to start from {str(selected_start)}. \\nEnter the end\"\n \" of your last available appointment:\")\n if end_time <= start_time:\n print(\"The end time cannot be earlier than the start time!\")\n continue\n if end_time == \"--back\":\n stage = 0\n else:\n selected_end = datetime.datetime.combine(selected_date, end_time)\n stage = 2\n while stage == 2:\n temporary_time = selected_start\n while temporary_time < selected_end:\n slots_to_add.append(temporary_time)\n temporary_time = temporary_time + datetime.timedelta(minutes=15)\n slots_to_add = Paging.give_pointer(slots_to_add)\n print(\"You have chosen to add the following slots: \")\n Paging.show_page(1, slots_to_add, 10, 2, [\"Pointer\", \"Timeslot\"])\n confirm = Parser.selection_parser(options={\"Y\": \"Confirm\", \"N\": \"Go back and select again\"})\n if confirm == \"Y\":\n try:\n for slot in slots_to_add:\n SQLQuery(\"INSERT INTO available_time VALUES (?, ?)\").commit((self.ID, slot[1]))\n print(\"Your slots have been successfully added!\")\n logger.info(\"Added timeslot, DB transaction completed\")\n # input(\"Press Enter to continue...\")\n Parser.handle_input()\n return True\n # temporary exception\n except DBRecordError:\n print(\"Invalid selection. Some of the entries may already be in the database. \"\n \"Please Retry\")\n stage = 0\n slots_to_add = []\n logger.warning(\"Error in DB, add action failed\")\n Parser.string_parser(\"Press Enter to continue...\")\n if confirm == \"N\":\n stage = 0\n slots_to_add = []\n print(\"Starting over...\")\n time.sleep(2)", "def season_choice():\r\n\r\n SPRING = \"Spring\"\r\n SUMMER = \"Summer\"\r\n AUTUMN = \"Autumn\"\r\n WINTER = \"Winter\"\r\n\r\n while True:\r\n list_season = []\r\n season_status = True\r\n\r\n print(\"Which seasons do you plan to travel in?\")\r\n print(\" 1) \" + SPRING)\r\n print(\" 2) \" + SUMMER)\r\n print(\" 3) \" + AUTUMN)\r\n print(\" 4) \" + WINTER)\r\n season = input(\"> \")\r\n\r\n list_season_int = change_multivalue_input(season)\r\n\r\n for number in list_season_int:\r\n if number > 0 and number < 5:\r\n if number == 1:\r\n list_season.append(SPRING.lower())\r\n elif number == 2:\r\n list_season.append(SUMMER.lower())\r\n elif number == 3:\r\n list_season.append(AUTUMN.lower())\r\n elif number == 4:\r\n list_season.append(WINTER.lower())\r\n\r\n else:\r\n print()\r\n print(\"I'm sorry, but \" + season + \" is not a valid choice. Please try again.\")\r\n print()\r\n season_status = False\r\n break\r\n if season_status == False:\r\n continue\r\n else:\r\n break\r\n\r\n return list_season", "def adjust(self, *, day_off: [int] = ()):\n if len(day_off) == 0:\n return\n\n dayoff = sorted(set(filter(lambda d: d > 0, day_off)))\n onduty = sorted(set(abs(x) for x in filter(lambda d: d < 0, day_off)))\n\n invalid_dates = sorted(set(filter(lambda d: abs(d) > len(self.month.days) or d == 0, day_off)))\n if len(invalid_dates) > 0:\n print(\"Parameter out of range of month: {0}\".format(invalid_dates))\n return\n\n conflict_dates = sorted(set(filter(lambda d: d in onduty, dayoff)))\n conflict_dates = [(d, -d) for d in conflict_dates]\n if len(conflict_dates) > 0:\n print(\"Parameter conflicts at: {0}\".format(conflict_dates))\n return\n\n dayoff_days = {date: self.month.days[date - 1] for date in dayoff}\n onduty_days = {date: self.month.days[date - 1] for date in onduty}\n\n past_day = dict(filter(lambda item: item[1].is_past, dayoff_days.items()))\n past_day.update(dict(filter(lambda item: item[1].is_past, onduty_days.items())))\n if len(past_day) > 0:\n print(\"Illegal past dates: {0}\".format(list(past_day.keys())))\n return\n\n dayoff_days = dayoff_days.values()\n onduty_days = onduty_days.values()\n\n for day in dayoff_days:\n day.dayoff()\n for day in onduty_days:\n day.is_dayoff = False", "def validate_full_sem_segmentation(self):\r\n\r\n strucs = self.strucs\r\n errors = []\r\n last_admin_event_pos = 0\r\n last_sched_pos = 0\r\n last_inst_pos = 0\r\n sched_pos = None\r\n admin_event_pos = None\r\n\r\n for i, struc in enumerate(strucs):\r\n sem = struc.accounted_for_by_sem\r\n if struc.is_unparsed_non_punctuation_txt():\r\n #error = 'Struc #%d (%s: %s) is unlabeled text' % (i, struc.label, struc)\r\n #errors.append(error)\r\n continue\r\n elif struc.is_space_or_punctuation_only():\r\n continue\r\n if not sem:\r\n rules_used = ''.join(struc.rules_used)\r\n if 'removed_from_sem' not in rules_used and 'assigned_to_dose' not in rules_used:\r\n error = 'Struc #%d (%s: %s) is not accounted by a sem' % (i, struc.label, struc)\r\n errors.append(error)\r\n continue\r\n coords = sem.get_coordinate_in_sem() # returns a tuple of length 1 to 3. E.g. for AE: (n, m, k) where\r\n # n is pos of AE in sched.events, M position of sched in instructions, k pos of inst in parse.instructions\r\n inst_pos = coords[-1]\r\n if inst_pos < last_inst_pos:\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d, though previously saw inst #%d' %\r\n (i, struc.label, struc, inst_pos, last_inst_pos))\r\n errors.append(error)\r\n elif inst_pos == last_inst_pos + 1:\r\n # we just started a new instruction, so reset the last_schedule and last_admin_event counters\r\n last_sched_pos = sched_pos = 0\r\n last_admin_event_pos = admin_event_pos = 0\r\n elif inst_pos > last_inst_pos + 1:\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d, though previously saw inst more than 1 removed: #%d' %\r\n (i, struc.label, struc, inst_pos, last_inst_pos))\r\n errors.append(error)\r\n\r\n if len(coords) > 1:\r\n sched_pos = coords[-2]\r\n if inst_pos > last_inst_pos and sched_pos != 0:\r\n # We started a new instruction and this is the first schedule we saw of that inst. Better be schedule #0\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d, though we just started a new instruction' %\r\n (i, struc.label, struc, inst_pos, sched_pos))\r\n errors.append(error)\r\n elif inst_pos == last_inst_pos and sched_pos < last_sched_pos:\r\n # We are on the same instruction, so schedule positions should be non-decreasing\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d, though previously saw schedule #%d' %\r\n (i, struc.label, struc, inst_pos, sched_pos, last_sched_pos))\r\n errors.append(error)\r\n elif inst_pos == last_inst_pos and sched_pos == last_sched_pos + 1:\r\n # we just started a new schedule, so reset the last_admin_event counter\r\n last_admin_event_pos = admin_event_pos = 0\r\n elif sched_pos > last_sched_pos + 1:\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d, though previously saw schedule more than 1 removed: #%d' %\r\n (i, struc.label, struc, inst_pos, sched_pos, last_sched_pos))\r\n errors.append(error)\r\n\r\n if len(coords) > 2:\r\n admin_event_pos = coords[-3]\r\n if sched_pos > last_sched_pos and admin_event_pos != 0:\r\n # We started a new schedule and this is the first admin_event we saw of that inst. Better be schedule #0\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d/admin_event #%d though we just started a new schedule' %\r\n (i, struc.label, struc, inst_pos, sched_pos, admin_event_pos))\r\n errors.append(error)\r\n elif sched_pos == last_sched_pos and admin_event_pos < last_admin_event_pos:\r\n # We are on the same schedule, so admin_event positions should be non-decreasing\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d/admin_event #%d, though previously saw admin_event #%d' %\r\n (i, struc.label, struc, inst_pos, sched_pos, admin_event_pos, last_admin_event_pos))\r\n errors.append(error)\r\n elif admin_event_pos > last_admin_event_pos + 1:\r\n error = ('Struc #%d (%s: %s) belongs to instruc #%d/schedule #%d/admin_event #%d, though previously saw admin_event more than 1 removed: #%d' %\r\n (i, struc.label, struc, inst_pos, sched_pos, admin_event_pos, last_admin_event_pos))\r\n errors.append(error)\r\n\r\n last_inst_pos = inst_pos\r\n if sched_pos is not None:\r\n last_sched_pos = sched_pos\r\n if admin_event_pos is not None:\r\n last_admin_event_pos = admin_event_pos\r\n\r\n return errors", "def process_not_submitted_pt(period):\n warnings = ['\\nProcessing Not Submitted data Warnings:\\n']\n warnings_to_process = False\n print('\\nNot Submitted data.')\n # Confirm the required files are in place\n required_files = ['Submissions Made report', 'Student File',\n 'Active Students File']\n ad.confirm_files('Not Submitted Report', required_files)\n # Get name for Submissions Made Report data file and then load\n report_data, to_add, warnings_to_add = load_data('Submissions_Made_')\n # print('Check loaded data:')\n # ad.debug_list(report_data)\n if to_add:\n warnings_to_process = True\n for line in warnings_to_add:\n warnings.append(line)\n # Get the name for the Students File\n student_data, to_add, warnings_to_add = load_data('Students_File_')\n if to_add:\n warnings_to_process = True\n for line in warnings_to_add:\n warnings.append(line)\n # Get the name for the Active Students File\n active_students, to_add, warnings_to_add = load_data('Active_Students_File_')\n if to_add:\n warnings_to_process = True\n for line in warnings_to_add:\n warnings.append(line)\n # Create a dataframe for Submissions Made report data\n headings = ['Student ID', 'Student', 'Course', 'Tutor', 'Assignment name',\n 'Last submission date']\n subs = pd.DataFrame(data = report_data, columns = headings)\n # Change value in Course column to 'Skip' if not a Part-time course\n subs['Course'] = subs['Course'].apply(list_non_pt)\n # Remove courses that are not Part-time ('Skip' in 'Course')\n subs = subs.drop(subs.index[subs['Course'] == 'Skip'])\n # Clean the Last submission date\n last_col = 'Last submission date'\n subs[last_col] = subs[last_col].apply(extract_last_submission_date)\n # Replace 01-01-1970 with an empty string in date column Last Submission\n subs[last_col] = subs[last_col].apply(da.replace_nil_date)\n # Create a dataframe for the students in the course\n headings = ['Course', 'Tutor', 'Student ID', 'Student']\n students = pd.DataFrame(data = student_data, columns = headings)\n # Change value in Course column to 'Skip' if not a Part-time course\n students['Course'] = students['Course'].apply(list_non_pt)\n # Remove courses that are not Part-time ('Skip' in 'Course')\n students = students.drop(students.index[students['Course'] == 'Skip'])\n # Create a dataframe for active students\n headings = ['Student ID', 'Student', 'Course']\n active = pd.DataFrame(data = active_students, columns = headings)\n # Change value in Course column to 'Skip' if not a Part-time course\n active['Course'] = active['Course'].apply(list_non_pt)\n # Remove courses that are not Part-time ('Skip' in 'Course')\n active = active.drop(active.index[active['Course'] == 'Skip'])\n # Remove students that aren't active in the course from students dataframe\n active_students = []\n # Get the Student ID for each student in active\n for index, row in active.iterrows():\n active_students.append(row['Student ID'])\n # Remove inactive students\n students = students[students['Student ID'].isin(active_students)]\n # Find students that have not submitted\n submitted_students = []\n # Get the Student ID for each student that has submitted\n for index, row in subs.iterrows():\n submitted_students.append(row['Student ID'])\n # Remove from Students those that have submitted\n sid_col = 'Student ID'\n non_sub_students = students[~students[sid_col].isin(submitted_students)]\n # Sort on the Student ID column\n non_sub_students = non_sub_students.sort_values(['Tutor', 'Student ID'])\n # Save a master file ordered by Tutor and Student ID\n f_name = 'Not_Submitted_All_{}{}.xls'.format(period,\n ft.generate_time_string())\n non_sub_students.to_excel(f_name, index=False)\n print('\\nNot_Submitted_All_ has been saved to {}'.format(f_name))\n ft.process_warning_log(warnings, warnings_to_process)", "def search_periods(self):\n # Busco la cantidad de meses entre esas dos fechas\n period_qty = relativedelta(datetime.strptime(self.date_to + ' 00:00:00', '%Y-%m-%d %H:%M:%S'),\n datetime.strptime(self.date_from, '%Y-%m-%d'))\n # Primer dia del primer mes es el seteado\n first_day = datetime.strptime(self.date_from, '%Y-%m-%d')\n # Busco el ultimo dia del primer mes\n last_day = datetime.strptime(self.date_from, '%Y-%m-%d') + relativedelta(day=1) + relativedelta(\n months=1) - relativedelta(days=1)\n months = period_qty.months + period_qty.years * 12\n # Chequeo si el rango seteado es del mismo mes\n if datetime.strptime(self.date_to, '%Y-%m-%d').strftime('%m/%Y') \\\n == datetime.strptime(self.date_from, '%Y-%m-%d').strftime('%m/%Y'):\n periods = [(datetime.strptime(self.date_to, '%Y-%m-%d') + relativedelta(day=1),\n datetime.strptime(self.date_to, '%Y-%m-%d'))]\n else:\n # Agrego las dos fechas para luego comparar\n periods = [(first_day, last_day)]\n # Itero por la cantidad de meses que hay entre fechas\n new_first_day = datetime.strptime(self.date_from, '%Y-%m-%d')\n for p in range(months - 1):\n new_first_day += relativedelta(day=1) + relativedelta(months=1)\n new_last_day = new_first_day + relativedelta(months=1) - relativedelta(days=1)\n periods.append((new_first_day, new_last_day))\n # Agrego el ultimo mes\n periods.append((datetime.strptime(self.date_to, '%Y-%m-%d') + relativedelta(day=1), datetime.strptime(self.date_to, '%Y-%m-%d')))\n return periods" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes seating update to storage.json.
def update_storage(self): print('Updating storage...') update = self.create_update() try: with open('storage.json', 'r+') as storage: storage_data = json.load(storage) storage_data["Updates"].append(update) storage.seek(0) json.dump(storage_data, storage, ensure_ascii=False, indent=4) except Exception as e: print('Update failed: {}'.format(e)) else: print(update)
[ "def __write_store__(self):\n store_file = open(os.path.join(os.path.expanduser('~'), Dhop.DHOP_STORE), 'w')\n json_str = json.JSONEncoder().encode(self.store)\n store_file.write(json_str)\n store_file.close()\n return", "def save(self):\r\n try:\r\n with open(self.json_name(), \"w\") as json_file:\r\n json_str = dumps(self.values)\r\n json_file.write(json_str)\r\n except:\r\n print(\"Error: Writing data to file failed\")", "def update(self):\n print('Updating seating chart...')\n for period in self.periods:\n if period in self.class_lists:\n new_seating, version = self.new_tables(period)\n self.seating_chart[period] = new_seating\n\n # Verify success:\n if new_seating:\n print('Period {}'.format(period))\n for i in range(len(new_seating)):\n print('Table {}: {}'.format(i + 1, new_seating[i]))\n print('Version = {}'.format(version))\n else:\n print('Period {}: Failed to update seating.'.format(period))", "def save(self):\n print(\"Saving device information...\")\n\n data = {\n \"name\": self.name\n }\n\n with open(settings.DATA_STORAGE_PATH, 'w') as f:\n json.dump(data, f, ensure_ascii=False)", "def add_entry(skey, data):\n shelf = get_shelf()\n print(\"Added: %s\" % skey)\n shelf[skey] = data\n shelf.sync()\n with open(os.path.join(\"backup/\", '%s.json' % skey), \"w\") as f:\n f.write(json.dumps(data))", "def mark_as_write(response):\r\n response._db_write = True\r\n return response", "def write(self, creds): \n if not os.path.isdir(self.aws_shared_cache_path):\n os.makedirs(self.aws_shared_cache_path)\n\n with open(self.bastion_sts_cache_path, 'w+') as f:\n creds[\"Version\"] = 1\n json.dump(creds, f, indent=4)", "def _save(self):\n with open(self.metadata_file, 'w') as f:\n f.write(json.dumps(self._metadata, indent=2))", "def _write(self, content):\n val = json.dumps(content)\n P4Key.set(self.p4, self.owners_key, val)", "def writejson(self):\t\t\n\t\twith open(self.filename, 'w+') as outfile:\n\t\t\tjson.dump(self.cache, outfile, sort_keys=True, indent=4)", "def save_guests_data(guests):\n with open('guests.json', 'w') as datafile:\n json.dump(guests, datafile)", "def write_to_json(self):\n ostream = open(self.name + '.json', 'w')\n jsondata = self.to_json()\n ostream.write(str(jsondata))\n ostream.close()", "def renew_ski_json():\r\n filename_json = \"../ski.json\"\r\n with open(filename_json, \"w\") as f_json:\r\n json.dump(dict_ski(), f_json)", "def write_json(self):\n print \"writing json file...\",\n f = open(\"absorptionspectra.json\",\"w\")\n json.dump(self.data,f)\n f.close()\n print \"done!\"", "def save(data, count):\n\n # connect to AWS DB service\n db = boto.dynamodb.connect_to_region(\"eu-west-1\",\n aws_access_key_id=conf.ACESS_KEY,\n aws_secret_access_key=conf.SECRET_KEY\n )\n db.use_decimals()\n\n for obj in data:\n name = obj[\"name\"]\n address = obj[\"address\"]\n lat = decimal.Decimal(str(obj[\"position\"][\"lat\"]))\n time_stamp = obj['last_update']\n lng = decimal.Decimal(str(obj[\"position\"][\"lng\"]))\n free = obj['available_bikes']\n number = obj[\"number\"]\n bike_stands = obj[\"bike_stands\"]\n available_bike_stands = obj['available_bike_stands']\n count += 1\n\n item_data = {\n \"name\": name,\n \"address\": address,\n \"lat\": lat,\n \"lna\": lng,\n \"time_stamp\": time_stamp,\n \"free\": free,\n \"number\": number,\n \"bike_stands\": bike_stands,\n \"available_bike_stands\": available_bike_stands,\n \"count\": count\n }\n table = db.get_table('DublinBikes')\n item = table.new_item(\n # primary key\n hash_key=name,\n # range key\n range_key=time_stamp,\n # attributes\n attrs=item_data\n )\n item.put()\n print(\"Adding bike occupancy data from:\", name,\n \"free bikes at the moment: \", free, \"from\", number)\n\n print(\"Put Items succeeded. Last update at:\", datetime.fromtimestamp(int(\n data[0][\"last_update\"]) / 1000).strftime('%Y.%m.%d %H:%M:%S'))", "def _save(self, s3_prefix):\n bucket_name, prefix = split_s3_path(s3_prefix)\n bucket = self.s3_conn.get_bucket(bucket_name)\n self._compute_percentages()\n self.stats['last_updated'] = datetime.now().isoformat()\n key = boto.s3.key.Key(\n bucket=bucket,\n name='{}/{}/{}.json'.format(\n prefix,\n self.directory,\n self.dataset_id\n )\n )\n key.set_contents_from_string(json.dumps(self.stats))", "def save(self, data, single_pass=False):\n\n f = _JSONWriter(data, single_pass=single_pass,\n pathname=self.pathname, sign=self.sign)\n f.save()\n\n # Update in-memory copy to reflect stored data.\n self.signatures = f.signatures()\n\n # Ensure the permissions on the new file are correct.\n try:\n os.chmod(self.pathname, self.__file_mode)\n except EnvironmentError as e:\n if e.errno == errno.EACCES:\n raise api_errors.PermissionsException(\n e.filename)\n if e.errno == errno.EROFS:\n raise api_errors.ReadOnlyFileSystemException(\n e.filename)\n raise\n\n # Finally, set the file times to match the last catalog change.\n if self.last_modified:\n mtime = calendar.timegm(\n self.last_modified.utctimetuple())\n os.utime(self.pathname, (mtime, mtime))", "def save(self):\n json_dict = {}\n for key_id in self.__objects.keys():\n json_dict[key_id] = self.__objects[key_id].to_dict()\n with open(self.__file_path, \"w\") as f:\n f.write(json.dumps(json_dict))", "def update(self, entitate: Entitate):\n if self.find_by_id(entitate.id_entitate) is None:\n raise KeyError(f'Nu exista o entitate cu id-ul {entitate.id_entitate} pe care sa il actualizam!')\n self.__storage[entitate.id_entitate] = entitate\n self.__write_file()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines appropriate number of tables for a given class size.
def number_of_tables(class_size): if class_size in [1, 2, 3]: return 1 if class_size in [4, 5, 6]: return 2 if class_size in [7, 9]: return 3 return 4
[ "def n_tables(self) -> int:\n return self._d", "def get_table_size(self) -> int:\r\n return self.table_size", "def db_table_size(self, cls):\r\n with self.__conn.cursor() as cursor:\r\n cursor.execute(\r\n \"select round(bytes/1024/1024,3)|| 'MB'\\n\"\\\r\n \"from user_segments\\n\"\\\r\n \"where segment_name=:tb\",\r\n tb=get_table_name(cls))\r\n\r\n row = cursor.fetchone()\r\n return row[0] if row else 0", "def total_table_size():\n query_total_table_size(current_app.extensions['sqlalchemy'].db)", "def num_elements_per_table(self):\n for table in self.show_tables():\n print(\"{0}: {1}\".format(table,\n self.query(f\"SELECT COUNT(*) FROM {table}\")[0][0]))", "def get_book_table_size(connect, cursor):\n cursor.execute(\"\"\"select * from book\"\"\")\n results = cursor.fetchall()\n connect.commit()\n return len(results)", "def table_index_size():\n query_table_index_size(current_app.extensions['sqlalchemy'].db)", "def _batch_size(docsize):\n table = [(2e3, 2000), (1e4, 1000), (1e5, 200), (2e6, 10)]\n for (limit, bsize) in table:\n if docsize < limit:\n return bsize\n return 1", "def _get_sizes(self) -> int:\n pass", "def table_load(self):\r\n # FIXME: Write this function\r\n\r\n return self.size / self.capacity # divide size by capacity\r", "def calculate_graphsize(args, graphtype, multiplier=1.0):\n if graphtype not in khmer._buckets_per_byte:\n raise ValueError('unknown graph type: ' + graphtype)\n\n if args.max_memory_usage:\n tablesize = float(multiplier) * (khmer._buckets_per_byte[graphtype] *\n args.max_memory_usage / args.n_tables)\n else:\n tablesize = args.max_tablesize\n\n return tablesize", "def get_load_factor(self):\r\n load = self.num_items / self.table_size\r\n return load", "def __getclassesnum__(self):\n return len(LETTERS_)", "def tableLen(table_name):\n\t\treturn DBconnect.utils.tableLen(table_name,db_name=DATABASE, user_name=USER, password=PASSWORD)", "def validate_class_size_dataset(self, ds):\n self.assertEqual(len(ds.columns), 3)\n self.assertEqual(ds.column_by_name('A').identifier, 0)\n self.assertEqual(ds.column_by_name('B').identifier, 1)\n self.assertEqual(ds.row_count, 7)\n # Get the first row\n row = ds.fetch_rows(offset=0, limit=1)[0]\n self.assertTrue(isinstance(row.values[0], int))", "def n_classes(self):\n return self.hypnogram.n_classes", "def _estimate_cache_batch_count(self, columns, table_size, batch_size):\n\n sample_size = 10\n max_cache_batch_count = 50\n upper_bound = 20 * 1000000\n\n if table_size < sample_size:\n return 1\n\n batch = self.read_batch(start=0, end=sample_size, columns=columns)\n\n size_sample = _nested_list_size(batch)\n size_per_batch = size_sample * batch_size / sample_size\n\n # `size_per_batch * cache_batch_count` will\n # not exceed upper bound but will always greater than 0\n cache_batch_count_estimate = max(int(upper_bound / size_per_batch), 1)\n\n return min(cache_batch_count_estimate, max_cache_batch_count)", "def size(self):\n\t\treturn len(self._table.keys())", "def size(self):\n return super(RoutingPacket, self).size() + 2 * 8 * len(self.costTable)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extends array to 4X6.
def extend_array(array): for row in array: while len(row) < 6: row.append('') while len(array) < 4: array.append(['', '', '', '', '', '']) return array
[ "def extend(self):\n new_size = self.size * self.extension_factor\n new_array = [None] * new_size\n new_array[:self.size] = self.array\n self.array = new_array\n self.size = new_size\n # Don't touch self.index", "def extend_array(array, max_row):\n\n shape = array.shape\n diff = max_row - shape[0]\n\n if diff != 0:\n new_array = np.row_stack([array, np.full((diff, shape[1]), -1, dtype=np.int)])\n else:\n new_array = array\n\n return(new_array)", "def _grow_array(self):\n\n # Copy the current array\n olddata = copy.deepcopy(self.data)\n self.occupied = 0\n\n # Double the size of the array\n self.size = 2*self.size\n\n self.data = [None]*self.size\n\n # Re-write the data into the new array\n for contents in olddata:\n\n if contents is None:\n continue\n elif contents is (None, None, None):\n continue\n else:\n self.add_elem(contents[1], contents[2])", "def diminish(self):\n if self.size <= 1:\n # Won't diminish further\n return\n\n new_size = int(self.size / self.extension_factor)\n new_array = self.array[:new_size]\n self.array = new_array\n self.size = new_size\n # Make sure this is pointing inside the current size\n self.index = self.size", "def _array_elements_sweep():\n\n # array_elements <= 8 noisy\n # larger array_elements extremely slow to synthesize (especially w/ nesting)\n for dim_size in range(12, 37, 12):\n yield dim_size", "def dup_array(self): # real signature unknown; restored from __doc__\n pass", "def growArray(*args, **kwargs):\n \n pass", "def sweep_expand(self, arr, dtype=\"float32\"):\n return np.repeat(arr, self.rays_per_sweep).astype(dtype)", "def fill_array(self, v):\n self.__array[:] = v", "def create_and_fill(old_data, entries):\n final_array = np.arange(entries * 5, dtype=int)\n final_array.shape = (entries,5)\n final_array = get_date(final_array, old_data, entries)\t# contains no header/footer line\n final_array = get_time(final_array, old_data, entries)\n final_array = get_temp_dewpt(final_array, old_data, entries)\n final_array = calculate_difference(final_array, old_data, entries)\n return final_array", "def inherit(self, new_array):\n cast_array = new_array.view(type(self))\n cast_array._dt = self.dt\n cast_array._f0 = self.f0\n cast_array._te = self.te\n cast_array._tr = self.tr\n cast_array.ppm0 = self.ppm0\n cast_array.voxel_dimensions = self.voxel_dimensions\n cast_array.transform = self.transform\n cast_array.metadata = self.metadata\n return cast_array", "def redim_row_array(T_array, T_, T0_):\r\n T, T0 = int(T_), int(T0_)\r\n return np.hstack((\r\n T_array,\r\n np.zeros((1, T0 - T)),\r\n )) if T0>T else T_array[:, :T0]", "def num_37():\n nums = np.arange(24) # whatever, just shape appropriately\n a = nums.reshape(2, 3, 4) # the base 3D array shaped as (z, y, x) \n a0 = nums.reshape(2, 4, 3) # y, x axes, swapped\n a1 = nums.reshape(3, 2, 4) # add to z, reshape y, x accordingly to main size\n a2 = nums.reshape(3, 4, 2) # swap y, x\n a3 = nums.reshape(4, 2, 3) # add to z again, resize as before\n a4 = nums.reshape(4, 3, 2) # swap y, x\n frmt = \"\"\"\n Array ... {} :..shape {}\n {}\n \"\"\"\n args = [['nums', nums.shape, nums],\n ['a', a.shape, a], ['a0', a0.shape, a0],\n ['a1', a1.shape, a1], ['a2', a2.shape, a2],\n ['a3', a3.shape, a3], ['a4', a4.shape, a4],\n ]\n for i in args:\n print(dedent(frmt).format(*i))\n #return a", "def reshape_for_polyline(self, array):\n return np.array(array, np.int32).reshape((-1, 1, 2))", "def reset_array(self):\n self.array = np.zeros((self.size[0], self.size[1]))", "def make_array_2d(arr):\n if arr.ndim == 1:\n arr.shape = (arr.shape[0], 1)", "def _2d(a):\n\tif len(a.shape) == 1:\n\t\ta.shape = (len(a), 1)\n\treturn a", "def _resize(self, new_cap):\n \n B = self.make_array(new_cap) # New bigger array \n for k in range(self.n): # Reference all existing values \n B[k] = self.A[k] \n \n self.A = B # Call A the new bigger array \n self.capacity = new_cap # Reset the capacity ", "def _resize(self, new_capacity):\n B = self.make_array(new_capacity) # make a new bigger array\n\n for k in range(self.n): # Reference all existing values\n B[k] = self.A[k]\n\n self.A = B # A is now a new bigger array\n self.capacity = new_capacity # reset the capacity" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return True if btw between a and b
def between(s, btw, a, b): s = s.replace('::', '') # ignore std::, etc. ai = s.rfind(a) bi = s.rfind(b) btwi = s.rfind(btw) return True if btwi < bi and btwi > ai else False
[ "def dominates(a, b):\n return np.all(a <= b)", "def is_between(thetas, lower, upper):\n lower, upper = correct_angle(np.array([lower, upper]))\n if lower <= upper:\n return (lower <= thetas) & (thetas <= upper)\n else: # if upper loops around\n return (upper <= thetas) & (thetas <= lower)", "def in_interval(number, start, stop):\r\n ok = False\r\n if number >= start and number <= stop:\r\n ok = True\r\n return ok", "def __is_between(self, val, sta, sto): \n if (val>=sta) and (val<=sto):\n return True\n else:\n return False", "def love6(a, b):\n return a == 6 or b == 6 or a + b == 6 or abs( a - b ) == 6 # if any option is true condition met", "def approx_equals(a, b):\n return (a - b) < 1.5e-16", "def test_assertIsBetween_numbers_true(self):\n self.assertIsBetween(5,3,7)", "def _between(self, a, b, c):\n\t\treturn a < c < b or a > c > b", "def compare(a, b, scores, count, Budget):\r\n if(count < Budget):\r\n if(random.uniform(0, scores[a-1]+scores[b-1]) < scores[a-1]):\r\n return False\r\n else:\r\n return True\r\n else:\r\n if(random.uniform(0, 1) < 0.5):\r\n return False\r\n else:\r\n return True", "def validate_in_range(x, a, b):\n return a < x < b", "def is_between(x, begin, end):\n return begin <= x < end or end < begin <= x or x < end < begin", "def in_bounds(t0, t1):\n assert t0 <= t1\n ends_before_bounds = t1 < start_time\n starts_after_bounds = t0 >= end_time\n return not (ends_before_bounds or starts_after_bounds)", "def nextto(p1, p2):\n return abs(p1-p2) == 1", "def overlap(a1, a2, b1, b2):\n\tassert a1 <= a2\n\tassert b1 <= b2\n\tassert isinstance(a1, int) and isinstance(a2, int) and isinstance(b1, int) and isinstance(b2, int)\n\t\n\t# if a interval is completely to the left of the b interval\n\tif a2 < b1:\n\t\treturn False\n\t# if a interval is completely to the right of the b interval\n\telif a1 > b2:\n\t\treturn False\n\telse:\n\t\treturn True", "def check_validity_interval(f, a, b, *args):\n return np.sign(f(a, *args)) != np.sign(f(b, *args))", "def range_test(val, lower_limit, upper_limit):\n flag = (val > lower_limit) & (val < upper_limit)\n return (flag)", "def epsilon_lte(a, b):\n float_epsilon = numpy.finfo(numpy.float32).eps\n return float_epsilon > a - b", "def is_between(element, h1, h2):\n\n g = time.gmtime(float(element['ts']))\n hour = g.tm_hour\n return hour >= h1 and hour < h2", "def inr(r,s,t):\n return (r < t) and (r >= s)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prioritizing structural variants in a VCF file annotated with SnpEff.
def main(input_file, output_file=None, genome=None): vcf = cyvcf2.VCF(input_file or '-') add_cyvcf2_hdr(vcf, 'SIMPLE_ANN', '.', 'String', "Simplified structural variant annotation: 'SVTYPE | EFFECT | GENE(s) | TRANSCRIPT | PRIORITY (1-4)'") add_cyvcf2_hdr(vcf, 'SV_TOP_TIER', '1', 'Integer', "Highest priority tier for the effects of a variant entry") w = cyvcf2.Writer(output_file or '-', vcf) w.write_header() # TODO: ? Rerun SnpEFF as well to target canonical transcripts, so we don't miss # intergenic variants touching non-canonical transripts? princ_tr_by_gid = canon_transcript_per_gene(genome, use_gene_id=True, only_principal=True) all_trs_by_gid = canon_transcript_per_gene(genome, use_gene_id=True, only_principal=False) princ_trs = set(princ_tr_by_gid.values()) all_trs = set(flatten(all_trs_by_gid.values())) # Read in gene lists for rec in vcf: rec = process_record(rec, princ_trs, all_trs) w.write_record(rec)
[ "def filter_somatic_score(\n input_vcf: str,\n output_vcf: str,\n *,\n tumor_sample_name: str = \"TUMOR\",\n drop_somatic_score: int = 25,\n min_somatic_score: int = 40,\n) -> None:\n logger = Logger.get_logger(\"filter_somatic_score\")\n logger.info(\"Filters SomaticSniper VCF files based on Somatic Score.\")\n\n # setup\n total = 0\n removed = 0\n tagged = 0\n written = 0\n\n reader = pysam.VariantFile(input_vcf)\n filter_tag = \"ssc{0}\".format(min_somatic_score)\n logger.info(\"Filter tag: {}\".format(filter_tag))\n reader.header.filters.add(\n filter_tag, None, None, \"Somatic Score < {0}\".format(min_somatic_score)\n )\n mode = get_pysam_outmode(output_vcf)\n writer = pysam.VariantFile(output_vcf, mode=mode, header=reader.header)\n\n # Process\n try:\n for record in reader.fetch():\n total += 1\n ssc = record.samples[tumor_sample_name][\"SSC\"]\n\n if ssc < drop_somatic_score:\n removed += 1\n continue\n elif ssc < min_somatic_score:\n tagged += 1\n record.filter.add(filter_tag)\n\n written += 1\n writer.write(record)\n\n finally:\n reader.close()\n writer.close()\n\n if mode == \"wz\":\n logger.info(\"Creating tabix index...\")\n tbx = pysam.tabix_index(output_vcf, preset=\"vcf\", force=True)\n\n logger.info(\n \"Processed {} records - Removed {}; Tagged {}; Wrote {} \".format(\n total, removed, tagged, written\n )\n )", "def order_snps(self):\n logging.info('Counting prevalence of SNPs')\n species_group_snp_num_dict = \\\n TreeMethods.determine_snp_number(group_strain_snp_sequence=self.group_strain_snp_sequence,\n species_group_best_ref=self.species_group_best_ref)\n if self.debug:\n logging.info('SNP prevalence')\n for ref_chrom, pos_dict in species_group_snp_num_dict['species']['group'].items():\n if pos_dict:\n print(ref_chrom, pos_dict)\n logging.info('Determining amino acid sequence at SNP locations')\n self.translated_snp_residue_dict, self.ref_translated_snp_residue_dict = \\\n TreeMethods.determine_aa_sequence(\n group_strain_snp_sequence=self.group_strain_snp_sequence,\n species_group_best_ref=self.species_group_best_ref,\n strain_parsed_vcf_dict=self.strain_parsed_vcf_dict,\n species_group_annotated_snps_dict=self.species_group_annotated_snps_dict,\n reference_strain_dict=self.reference_strain_dict,\n species_group_snp_num_dict=species_group_snp_num_dict,\n iupac=self.iupac)\n logging.info('Creating SNP matrix')\n TreeMethods.create_snp_matrix(species_group_best_ref=self.species_group_best_ref,\n group_strain_snp_sequence=self.group_strain_snp_sequence,\n matrix_path=self.matrix_path)\n logging.info('Ranking SNPs based on prevalence')\n species_group_snp_rank, self.species_group_num_snps = \\\n TreeMethods.rank_snps(species_group_snp_num_dict=species_group_snp_num_dict)\n if self.debug:\n logging.info('Ranked SNPs')\n for num_snps, ref_dict in sorted(species_group_snp_rank['species']['group'].items(), reverse=True):\n for ref_chrom, pos_dict in ref_dict.items():\n print(num_snps, ref_chrom, pos_dict)\n logging.info('Sorting SNPs based on order of strains in phylogenetic trees')\n self.species_group_sorted_snps = \\\n TreeMethods.sort_snps(species_group_order_dict=self.species_group_order_dict,\n species_group_snp_rank=species_group_snp_rank,\n species_group_best_ref=self.species_group_best_ref,\n group_strain_snp_sequence=self.group_strain_snp_sequence)\n if self.debug:\n logging.info('Sorted SNPs')\n for num_snps, ref_dict in self.species_group_sorted_snps['species']['group'].items():\n for ref_chrom, pos_dict in ref_dict.items():\n print(num_snps, ref_chrom, pos_dict)", "def parse_vcfs(args, db):\n for sid in db[\"samples\"]:\n for mode in [\"SNV\", \"INDEL\"]:\n parse_vcf(args, db, sid, mode)", "def snp_calling(self):\n logging.info('Preparing files for SNP calling with deepvariant make_examples')\n strain_examples_dict, strain_variant_path_dict, strain_gvcf_tfrecords_dict = \\\n VCFMethods.deepvariant_make_examples(strain_sorted_bam_dict=self.strain_sorted_bam_dict,\n strain_name_dict=self.strain_name_dict,\n strain_reference_abs_path_dict=self.strain_reference_abs_path_dict,\n vcf_path=os.path.join(self.seq_path, 'vcf_files'),\n home=self.home,\n logfile=self.logfile,\n threads=self.threads,\n working_path=self.working_path,\n deepvariant_version=self.deepvariant_version)\n logging.info('Calling variants with deepvariant call_variants')\n strain_call_variants_dict = \\\n VCFMethods.deepvariant_call_variants(strain_variant_path_dict=strain_variant_path_dict,\n strain_name_dict=self.strain_name_dict,\n vcf_path=os.path.join(self.seq_path, 'vcf_files'),\n home=self.home,\n threads=self.threads,\n logfile=self.logfile,\n working_path=self.working_path,\n deepvariant_version=self.deepvariant_version,\n variant_caller='deepvariant')\n logging.info('Creating VCF files with deepvariant postprocess_variants')\n self.strain_vcf_dict = \\\n VCFMethods.deepvariant_postprocess_variants_multiprocessing(\n strain_call_variants_dict=strain_call_variants_dict,\n strain_variant_path_dict=strain_variant_path_dict,\n strain_name_dict=self.strain_name_dict,\n strain_reference_abs_path_dict=self.strain_reference_abs_path_dict,\n strain_gvcf_tfrecords_dict=strain_gvcf_tfrecords_dict,\n vcf_path=os.path.join(self.seq_path, 'vcf_files'),\n home=self.home,\n logfile=self.logfile,\n deepvariant_version=self.deepvariant_version,\n threads=self.threads)\n logging.info('Copying gVCF files to common folder')\n VCFMethods.copy_vcf_files(strain_vcf_dict=self.strain_vcf_dict,\n vcf_path=os.path.join(self.seq_path, 'vcf_files'))", "def get_svs_features(\n tagger_vars: dict,\n preselected_events: NanoEventsArray,\n fj_idx_lep,\n fatjet_label: str = \"FatJet\",\n svs_label: str = \"FatJetSVs\",\n normalize: bool = True,\n) -> Dict[str, np.ndarray]:\n\n feature_dict = {}\n\n jet = ak.firsts(preselected_events[fatjet_label][fj_idx_lep])\n msk = preselected_events[svs_label].jetIdx == ak.firsts(fj_idx_lep)\n\n jet_svs = preselected_events.SV[preselected_events[svs_label].sVIdx[(preselected_events[svs_label].sVIdx != -1) * (msk)]]\n\n # sort by dxy significance\n jet_svs = jet_svs[ak.argsort(jet_svs.dxySig, ascending=False)]\n\n # negative eta jets have -1 sign, positive eta jets have +1\n eta_sign = ak.values_astype(jet_svs.eta > 0, int) * 2 - 1\n feature_dict[\"sv_etarel\"] = eta_sign * (jet_svs.eta - jet.eta)\n feature_dict[\"sv_phirel\"] = jet_svs.delta_phi(jet)\n feature_dict[\"sv_abseta\"] = np.abs(jet_svs.eta)\n feature_dict[\"sv_mass\"] = jet_svs.mass\n feature_dict[\"sv_pt_log\"] = np.log(jet_svs.pt)\n\n feature_dict[\"sv_ntracks\"] = jet_svs.ntracks\n feature_dict[\"sv_normchi2\"] = jet_svs.chi2\n feature_dict[\"sv_dxy\"] = jet_svs.dxy\n feature_dict[\"sv_dxysig\"] = jet_svs.dxySig\n feature_dict[\"sv_d3d\"] = jet_svs.dlen\n feature_dict[\"sv_d3dsig\"] = jet_svs.dlenSig\n svpAngle = jet_svs.pAngle\n feature_dict[\"sv_costhetasvpv\"] = -np.cos(svpAngle)\n\n feature_dict[\"sv_px\"] = jet_svs.px\n feature_dict[\"sv_py\"] = jet_svs.py\n feature_dict[\"sv_pz\"] = jet_svs.pz\n # feature_dict[\"sv_energy\"] = jet_svs.E\n feature_dict[\"sv_energy\"] = jet_svs.energy\n\n feature_dict[\"sv_mask\"] = (\n ~(\n ma.masked_invalid(\n ak.pad_none(\n feature_dict[\"sv_etarel\"],\n tagger_vars[\"sv_features\"][\"var_length\"],\n axis=1,\n clip=True,\n ).to_numpy()\n ).mask\n )\n ).astype(np.float32)\n if isinstance(feature_dict[\"sv_mask\"], np.float32):\n feature_dict[\"sv_mask\"] = np.ones((len(feature_dict[\"sv_abseta\"]), tagger_vars[\"sv_features\"][\"var_length\"])).astype(\n np.float32\n )\n\n # convert to numpy arrays and normalize features\n if \"sv_vectors\" in tagger_vars.keys():\n variables = set(tagger_vars[\"sv_features\"][\"var_names\"] + tagger_vars[\"sv_vectors\"][\"var_names\"])\n else:\n variables = tagger_vars[\"sv_features\"][\"var_names\"]\n\n for var in variables:\n a = (\n ak.pad_none(\n feature_dict[var],\n tagger_vars[\"sv_features\"][\"var_length\"],\n axis=1,\n clip=True,\n )\n .to_numpy()\n .filled(fill_value=0)\n ).astype(np.float32)\n a = np.nan_to_num(a)\n\n # print(var)\n # print(a[11])\n\n if normalize:\n if var in tagger_vars[\"sv_features\"][\"var_names\"]:\n info = tagger_vars[\"sv_features\"][\"var_infos\"][var]\n else:\n info = tagger_vars[\"sv_vectors\"][\"var_infos\"][var]\n\n # print(info)\n # print(\"\\n\")\n\n a = (a - info[\"median\"]) * info[\"norm_factor\"]\n a = np.clip(a, info.get(\"lower_bound\", -5), info.get(\"upper_bound\", 5))\n\n feature_dict[var] = a\n\n return feature_dict", "def test_partial_sv_sensitivities(self):\n # Make sure vecs are initialized to zero\n self.zero_tacs_vecs()\n\n # Initial solve\n func_vals = self.run_solve()\n\n # Compute the partial derivative w.r.t. state variables\n self.assembler.addSVSens(\n self.func_list, self.dfdu_list, self.alpha, self.beta, self.gamma\n )\n\n # Compute the total derivative w.r.t. material design variables using fd/cs\n self.perturb_tacs_vec(self.ans1, self.ans0, self.ans_pert)\n # Set the perturbed state variables\n self.assembler.setVariables(self.ans1)\n # Compute functions w/o resolving problem\n func_vals_pert = self.assembler.evalFunctions(self.func_list)\n # Compute approximate sens\n f_u_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)\n\n # Tests cs/fd against sensitivity from partial\n for i in range(len(self.func_list)):\n with self.subTest(function=self.func_list[i]):\n dfdu_proj_i = self.dfdu_list[i].dot(self.ans_pert)\n np.testing.assert_allclose(\n dfdu_proj_i, f_u_sens_approx[i], rtol=self.rtol, atol=self.atol\n )", "def vasp_ncl_files(single_defect_dict, input_dir=None, incar_settings=None):\n NELECT = single_defect_dict['NELECT']\n # Variable parameters first\n vaspnclincardict = {'# May need to change NELECT, NCORE, KPAR, AEXX, ENCUT, NUPDOWN': 'variable parameters',\n 'NELECT': NELECT,\n 'NUPDOWN': f\"{NELECT % 2:.0f} # But could be {NELECT % 2 + 2:.0f} if ya think we got a bit of crazy ferromagnetic shit going down\",\n 'NCORE': 12, 'KPAR': 2, 'AEXX': 0.25, 'ENCUT': 450,\n 'ICORELEVEL': '0 # Get core potentials in OUTCAR for Kumagai corrections',\n 'NSW': 0, 'LSORBIT': True, 'EDIFF': 1e-06, 'EDIFFG': -0.01, 'ALGO': 'All', 'ADDGRID': True,\n 'HFSCREEN': 0.2, 'IBRION': -1, 'ICHARG': 1, 'ISIF': 2, 'ISYM': 0, 'ISMEAR': 0, 'LASPH': True,\n 'LHFCALC': True, 'LORBIT': 11, 'LREAL': False, 'LVHAR': True, 'LWAVE': True, 'NEDOS': 2000,\n 'NELM': 100, 'PREC': 'Accurate', 'PRECFOCK': 'Fast', 'SIGMA': 0.05}\n if incar_settings:\n vaspnclincardict.update(incar_settings)\n\n # Directory\n vaspnclinputdir = input_dir + \"/vasp_ncl/\" if input_dir else 'VASP_Files/vasp_ncl/'\n if not os.path.exists(vaspnclinputdir):\n os.makedirs(vaspnclinputdir)\n\n # defect_supercell = supercell ## Add structure if re-generating POSCAR/POTCAR or some shit\n\n vaspnclincar = Incar.from_dict(vaspnclincardict)\n # vaspnclinput = DictSet(defect_supercell, config_dict=vasppotcardict)\n # vaspnclinput.potcar.write_file(vaspnclinputdir+'POTCAR') ## No POTCAR, use POTCAR from vasp_std run\n # vaspnclinput.poscar.write_file(vaspnclinputdir+'POSCAR') ## No POSCAR, use CONTCAR from vasp_std run\n\n with zopen(vaspnclinputdir + 'INCAR', \"wt\") as f:\n f.write(vaspnclincar.get_string())", "def sc_label_fusion(self, selected_index):\n sc_slices = np.asarray([(dic_slice.im_M > 200).astype(int) for dic_slice in self.model.dictionary.slices])\n\n for i, selected_ind_by_slice in enumerate(selected_index): # selected_slices:\n slice_sc_seg = compute_majority_vote_mean_seg(sc_slices[selected_ind_by_slice])\n self.target[i].set(sc_seg=slice_sc_seg)", "def filter_trio_vcf(trio_vcf, workdir, sample_name):\n trio_vcf_basename = os.path.basename(trio_vcf)\n if trio_vcf_basename.endswith('.vcf'):\n offset = -4\n elif trio_vcf_basename.endswith('.vcf.gz'):\n offset = -7\n else:\n return\n tmp_header = workdir + '/tmp_header.vcf'\n tmp_variants = workdir + '/tmp_variants.vcf'\n tmp_reheadered = workdir + '/tmp_reheadered.vcf'\n trio_filtered_het_phased_vcf = workdir + '/' + trio_vcf_basename[:offset] + '.filtered.het.phased.pstag.vcf'\n trio_filtered_het_phased_zipped_vcf = trio_filtered_het_phased_vcf + '.gz'\n \n command_get_header = ['bcftools', 'view', '-h', trio_vcf, '>', tmp_header]\n command_modify_header = 'sed -i \\'5i##FORMAT=<ID=PS,Number=1,Type=Integer,Description=\\\"ID of Phase Set for Variant\\\">\\' ' + str(tmp_header)\n command_get_variants = ['bcftools', 'view', '-H', trio_vcf, '>', tmp_variants]\n command_reheader = ['cat', tmp_header, tmp_variants, '>', tmp_reheadered]\n command_zip = ['bgzip', trio_filtered_het_phased_vcf]\n command_index = ['tabix', trio_filtered_het_phased_zipped_vcf]\n command_clean = ['rm', workdir + '/tmp*']\n \n logging.info(' -> Adding PS FORMAT to header')\n run(' '.join(command_get_header), shell=True, check=True, executable='/bin/bash')\n run(command_modify_header, shell=True, check=True, executable='/bin/bash')\n run(' '.join(command_get_variants), shell=True, check=True, executable='/bin/bash')\n run(' '.join(command_reheader), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Write filtered, phased and heterozygous variants to {0}'.format(trio_filtered_het_phased_vcf))\n get_filtered_phased_het_trio_variants(tmp_reheadered, trio_filtered_het_phased_vcf, sample_name)\n \n logging.info(' -> Compress VCF file')\n run(' '.join(command_zip), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Index VCF file')\n run(' '.join(command_index), shell=True, check=True, executable='/bin/bash')\n \n logging.info(' -> Clean temporary files')\n run(' '.join(command_clean), shell=True, check=True, executable='/bin/bash')\n \n return trio_filtered_het_phased_zipped_vcf", "def run_sa(model_file, starting_error, ignition_conditions, psr_conditions, flame_conditions,\n error_limit, species_safe, phase_name='', algorithm_type='greedy', species_limbo=[],\n num_threads=1, path=''\n ):\n current_model = ReducedModel(\n model=ct.Solution(model_file, phase_name), error=starting_error, filename=model_file\n )\n \n logging.info(f'Beginning sensitivity analysis stage, using {algorithm_type} approach.')\n\n # The metrics for the starting model need to be determined or read\n initial_metrics = sample_metrics(\n model_file, ignition_conditions, reuse_saved=True, phase_name=phase_name,\n num_threads=num_threads, path=path\n )\n\n if not species_limbo:\n species_limbo = [\n sp for sp in current_model.model.species_names if sp not in species_safe\n ]\n\n logging.info(53 * '-')\n logging.info('Number of species | Species removed | Max error (%)')\n\n # Need to first evaluate all induced errors of species; for the ``initial`` method,\n # this will be the only evaluation.\n species_errors = evaluate_species_errors(\n current_model, ignition_conditions, initial_metrics, species_limbo, \n phase_name=phase_name, num_threads=num_threads\n )\n\n # Use a temporary directory to avoid cluttering the working directory with\n # all the temporary model files\n with TemporaryDirectory() as temp_dir:\n while species_limbo:\n # use difference between error and current error to find species to remove\n idx = np.argmin(np.abs(species_errors - current_model.error))\n species_errors = np.delete(species_errors, idx)\n species_remove = species_limbo.pop(idx)\n\n test_model = trim(\n current_model.filename, [species_remove], f'reduced_model_{species_remove}.cti', \n phase_name=phase_name\n )\n test_model_file = soln2cti.write(\n test_model, output_filename=f'reduced_model_{species_remove}.cti', path=temp_dir\n )\n\n reduced_model_metrics = sample_metrics(\n test_model_file, ignition_conditions, phase_name=phase_name, \n num_threads=num_threads, path=path\n )\n error = calculate_error(initial_metrics, reduced_model_metrics)\n\n logging.info(f'{test_model.n_species:^17} | {species_remove:^17} | {error:^.2f}')\n\n # Ensure new error isn't too high\n if error > error_limit:\n break\n else:\n current_model = ReducedModel(model=test_model, filename=test_model_file, error=error)\n\n # If using the greedy algorithm, now need to reevaluate all species errors\n if algorithm_type == 'greedy':\n species_errors = evaluate_species_errors(\n current_model, ignition_conditions, initial_metrics, species_limbo, \n phase_name=phase_name, num_threads=num_threads\n )\n if min(species_errors) > error_limit:\n break\n \n # Final model; may need to rewrite\n reduced_model = ReducedModel(\n model=current_model.model, filename=f'reduced_{current_model.model.n_species}.cti', \n error=current_model.error\n )\n soln2cti.write(reduced_model.model, reduced_model.filename, path=path)\n\n logging.info(53 * '-')\n logging.info('Sensitivity analysis stage complete.')\n logging.info(f'Skeletal model: {reduced_model.model.n_species} species and '\n f'{reduced_model.model.n_reactions} reactions.'\n )\n logging.info(f'Maximum error: {reduced_model.error:.2f}%')\n return reduced_model", "def preproc_job(subj):\n\n subj_data_dir = join(data_dir, subj)\n\n # path to feat template\n template_path = join(feat_template_dir, 'preprocessing.fsf')\n\n # input vars\n outputDir = join(subj_data_dir, 'preprocessed')\n inputFile = join(subj_data_dir, (subj + '_TDSL2.nii.gz'))\n anatFile = join(subj_data_dir, (subj + '_MPRAGE_brain.nii.gz'))\n\n # open the template file in 'read' mode, extract all text\n with open(template_path, 'r') as template_file:\n text = template_file.read()\n\n # make substitutions\n text = text.replace('SUB_OUTPUTDIR_SUB', outputDir)\n text = text.replace('SUB_INPUTFILE_SUB', inputFile)\n text = text.replace('SUB_ANAT_SUB', anatFile)\n\n # write the temporary template file\n subj_preproc_fsf = join(feat_template_dir, 'tmp_preprocessing.fsf')\n with open(subj_preproc_fsf, 'w') as preproc_design:\n preproc_design.write(text)\n\n # submit the command\n cmd_str = 'feat ' + subj_preproc_fsf\n os.system(cmd_str)", "def determine_Fv_coefficient(site_class, S1):\r\n if site_class == 'A':\r\n Fv = 0.8\r\n elif site_class == 'B':\r\n Fv = 1.0\r\n elif site_class == 'C':\r\n if S1 <= 0.1:\r\n Fv = 1.7\r\n elif S1 <= 0.5:\r\n Fv = 1.7 - 1.0*(S1 - 0.1)\r\n else:\r\n Fv = 1.3\r\n elif site_class == 'D':\r\n if S1 <= 0.1:\r\n Fv = 2.4\r\n elif S1 <= 0.2:\r\n Fv = 2.4 - 4*(S1 - 0.1)\r\n elif S1 <= 0.4:\r\n Fv = 2.0 - 2*(S1 - 0.2)\r\n elif S1 <= 0.5:\r\n Fv = 1.6 - 1*(S1 - 0.4)\r\n else:\r\n Fv = 1.5\r\n elif site_class == 'E':\r\n if S1 <= 0.1:\r\n Fv = 3.5\r\n elif S1 <= 0.2:\r\n Fv = 3.5 - 3*(S1 - 0.1)\r\n elif S1 <= 0.4:\r\n Fv = 3.2 - 4*(S1 - 0.2)\r\n else:\r\n Fv = 2.4\r\n else:\r\n Fv = None\r\n print(\"Site class is entered with an invalid value\")\r\n\r\n return Fv", "def infer_snp_genotype_from_tumour(snp_genotype_filename, seqdata_filenames, chromosome, config):\n\n sequencing_base_call_error = remixt.config.get_param(config, 'sequencing_base_call_error')\n homozygous_p_value_threshold = remixt.config.get_param(config, 'homozygous_p_value_threshold')\n \n # Calculate total reference alternate read counts in all tumours\n snp_counts_df = pd.DataFrame(columns=['position', 'ref_count', 'alt_count']).astype(int)\n for tumour_id, seqdata_filename in seqdata_filenames.items():\n snp_counts_df = pd.concat([snp_counts_df, read_snp_counts(seqdata_filename, chromosome)], ignore_index=True)\n snp_counts_df = snp_counts_df.groupby('position').sum().reset_index()\n\n snp_counts_df['total_count'] = snp_counts_df['alt_count'] + snp_counts_df['ref_count']\n\n snp_counts_df = snp_counts_df[snp_counts_df['total_count'] > 50]\n \n binom_test_ref = lambda row: scipy.stats.binom_test(\n row['ref_count'], row['total_count'],\n p=sequencing_base_call_error, alternative='greater')\n\n snp_counts_df['prob_no_A'] = snp_counts_df.apply(binom_test_ref, axis=1)\n \n binom_test_alt = lambda row: scipy.stats.binom_test(\n row['alt_count'], row['total_count'],\n p=sequencing_base_call_error, alternative='greater')\n \n snp_counts_df['prob_no_B'] = snp_counts_df.apply(binom_test_alt, axis=1)\n\n snp_counts_df['has_A'] = snp_counts_df['prob_no_A'] < homozygous_p_value_threshold\n snp_counts_df['has_B'] = snp_counts_df['prob_no_B'] < homozygous_p_value_threshold\n\n snp_counts_df['AA'] = (snp_counts_df['has_A'] & ~snp_counts_df['has_B']) * 1\n snp_counts_df['BB'] = (snp_counts_df['has_B'] & ~snp_counts_df['has_A']) * 1\n snp_counts_df['AB'] = (snp_counts_df['has_A'] & snp_counts_df['has_B']) * 1\n \n snp_counts_df.to_csv(snp_genotype_filename, sep='\\t', columns=['position', 'AA', 'AB', 'BB'], index=False)", "def load_vcf(self,vcf_file):\n\n # setup the dictionaries of expected SNPs for each lineage\n self._reset_lineage_snps()\n\n # open the VCF file for reading\n vcf_reader = vcf.Reader(open(vcf_file, 'r'))\n\n # read the VCF file line-by-line\n for record in vcf_reader:\n\n # consider each lineage in turn\n for lineage_name in self.lineages:\n\n # only proceed if the genome position occurs in the list of identifiable positions\n if record.POS in self.reference_snps[lineage_name].keys():\n\n # parse the record\n for sample in record.samples:\n geno = sample['GT'][0]\n\n # if there is a null call, record a hyphen which won't match, regardless of the reference\n if geno == '.':\n\n self.sample_snps[lineage_name][int(record.POS)]=\"-\"\n\n # otherwise replace the H37Rv base with the actual base from the VCF file\n elif geno != 0:\n self.sample_snps[lineage_name][int(record.POS)]=record.ALT[int(geno)-1]", "def determine_species_based_on_sa(self) -> List[int]:\n species_keys, pdep_rxns_to_explore = list(), list()\n if self.sa_dict is None:\n self.logger.error(f\"T3's sa_dict was None. Please check that the input file contains a proper 'sensitivity' \"\n f\"block and/or that SA was run successfully.\\n\"\n f\"Not performing refinement based on sensitivity analysis!\")\n return species_keys\n\n sa_dict_max = {'kinetics': dict(), 'thermo': dict()}\n for key in ['kinetics', 'thermo']:\n for observable_label in self.sa_dict[key].keys():\n if observable_label not in sa_dict_max[key]:\n sa_dict_max[key][observable_label] = list()\n for parameter in self.sa_dict[key][observable_label].keys():\n entry = dict()\n entry['parameter'] = parameter # rxn number (int) or spc label (str)\n entry['max_sa'] = max(self.sa_dict[key][observable_label][parameter].max(),\n abs(self.sa_dict[key][observable_label][parameter].min()))\n sa_dict_max[key][observable_label].append(entry)\n\n for observable_label, sa_list in sa_dict_max['kinetics'].items():\n sa_list_sorted = sorted(sa_list, key=lambda item: item['max_sa'], reverse=True)\n for i in range(min(self.t3['sensitivity']['top_SA_reactions'], len(sa_list_sorted))):\n reaction = get_reaction_by_index(sa_list_sorted[i]['parameter'] - 1, self.rmg_reactions)\n if reaction is None:\n continue\n for species in reaction.reactants + reaction.products:\n if self.species_requires_refinement(species=species):\n num = f'{i+1}{get_ordinal_indicator(i+1)} ' if i else ''\n reason = f'(i {self.iteration}) participates in the {num}most sensitive reaction ' \\\n f'for {observable_label}: {reaction}'\n key = self.add_species(species=species, reasons=reason)\n if key is not None:\n species_keys.append(key)\n if reaction.kinetics.is_pressure_dependent() \\\n and reaction not in [rxn_tup[0] for rxn_tup in pdep_rxns_to_explore] \\\n and self.t3['sensitivity']['pdep_SA_threshold'] is not None:\n pdep_rxns_to_explore.append((reaction, i, observable_label))\n for observable_label, sa_list in sa_dict_max['thermo'].items():\n sa_list_sorted = sorted(sa_list, key=lambda item: item['max_sa'], reverse=True)\n for i in range(min(self.t3['sensitivity']['top_SA_species'], len(sa_list_sorted))):\n species = get_species_by_label(sa_list_sorted[i]['parameter'], self.rmg_species)\n if species is None:\n self.logger.error(f\"Could not identify species {sa_list_sorted[i]['parameter']}!\")\n if self.species_requires_refinement(species=species):\n num = f'{i+1}{get_ordinal_indicator(i+1)} ' if i else ''\n reason = f'(i {self.iteration}) the {num}most sensitive species thermo for {observable_label}'\n key = self.add_species(species=species, reasons=reason)\n if key is not None:\n species_keys.append(key)\n\n species_keys.extend(self.determine_species_from_pdep_network(pdep_rxns_to_explore=pdep_rxns_to_explore))\n\n return species_keys", "def _parse(self, vcffile):\n # read and parse the vcf file\n self.baseproperties={}\n with gzip.open(vcffile, \"r\") as f:\n \n # iterate over the vcf file\n returnedLines=0\n for line in f:\n if line[0] == \"#\":\n continue # it is a comment; go to next line;\n if \"INDEL\" in line:\n continue #this is not needed because ours don't contain INDELs anymore; go to next line;\n \n # parse the line.\n chrom, pos, varID, ref, alts, score, filterx, infos, fields, sampleInfo = line.strip().split()\n pos = int(pos)\n alts = alts.split(\",\")\n infos = dict(item.split(\"=\") for item in infos.split(\";\"))\n baseCounts4=map(int, infos['BaseCounts4'].split(\",\")) #get frequencies of high quality bases\n baseFreqs=map(int, infos['BaseCounts4'].split(\",\")) #get frequencies of high quality bases\n baseFreqs.sort(reverse=True) #get frequencies of high quality bases, sorted\n depth = sum(baseCounts4)\n \n # compute probability from exact binomial test\n if (baseFreqs[0]<depth and depth>0): # the majority base is not the only base AND depth is more than 0;\n pvalue=stats.binom_test(x=baseFreqs[1],n=depth,p=self.expectedErrorRate) # do the test if any variation\n elif baseFreqs[0]==depth:\n pvalue=1 # there is only one base\n elif depth==0:\n pvalue=None # can't tell, no data\n else:\n raise Error(\"Logical error: should never reach this point {0} {1}\".format(baseFreqs[0], depth))\n \n if pvalue==0:\n mlp= 250 # code minus log p as 250\n elif pvalue is not None:\n mlp= -math.log(pvalue,10)\n elif pvalue is None:\n mlp=None\n \n # store output in a dictionary \n if depth>0:\n maf=float(baseFreqs[1])/float(depth)\n else:\n maf=None\n self.baseproperties[pos]={'pos':pos, 'ref':ref, 'depth':depth,\\\n 'base_a':baseCounts4[0], 'base_c':baseCounts4[1], 'base_g':baseCounts4[2], 'base_t':baseCounts4[3], \\\n 'maf':maf,'pvalue':pvalue, 'mlp':mlp}\n \n returnedLines=returnedLines+1\n if (returnedLines>=self.maxLines):\n break # debug setting; we have returned however many lines we need to do our testing;\n if returnedLines % 100000 ==0:\n print(returnedLines)\n \n ## apply fdr \n positions=self.baseproperties.keys() # which positions we are analysing\n pvalues=[] # extract the p values into a vector\n for position in positions: # for all the positions analysed\n pvalue=self.baseproperties[position]['pvalue']\n if not pvalue is None:\n pvalues.append(self.baseproperties[position]['pvalue']) # add the unadjusted value to a list\n \n adjustedpvalues=self.adjustedpValues(pvalues) # and apply fdr\n \n # write back qvalues into dictionary\n n=-1\n for position in positions: # for all the positions analysed\n n+=1\n if not self.baseproperties[position]['pvalue'] is None:\n qvalue=adjustedpvalues[n]\n self.baseproperties[position]['qvalue']=qvalue\n\n if qvalue==0:\n mlq= 250 # code minus log p as 250\n elif qvalue is not None:\n mlq= -math.log(qvalue,10)\n elif qvalue is None:\n mlq=None\n self.baseproperties[position]['mlq']=mlq", "def _simulate_vasprun(self, wf):\n test_dir = os.path.abspath(os.path.join(ref_dir, \"neb_wf\"))\n neb_ref_dirs = {\"parent\": os.path.join(test_dir, \"1\"),\n \"ep0\": os.path.join(test_dir, \"2\"),\n \"ep1\": os.path.join(test_dir, \"3\"),\n \"neb1\": os.path.join(test_dir, \"4\"),\n \"neb2\": os.path.join(test_dir, \"5\")}\n return use_fake_vasp(wf, neb_ref_dirs, params_to_check=[\"ENCUT\"])", "def main(args):\n # get causal snps\n causals = pd.read_table(args.causalfn, delim_whitespace=True, header=None, \n names=['SNP', 'A1', 'True_beta']) \n # Get GWAS and compute the covariance\n #gwas1 = GWASnCOV(args.freq, args.gwasfn1)\n gwas2 = GWASnCOV(args.freq1file, args.gwasfn2)\n cotags = pd.read_table(args.cotagfn, sep='\\t')\n #cotags = cotagDF.nlargest(top, 'Cotagging')\n snpPRSes = pd.concat(Parallel(n_jobs=args.cpus)(delayed(snpPRS)(\n args.plinkexe, args.bedfile1, (i.SNP, i.A1, str(i.BETA)), args.pheno1fn,\n args.freq1file, args.labels) for i in gwas2.itertuples())) \n snpPRSes.to_csv('%s_SNPwiseregr.tsv' % args.prefix, sep='\\t', index=False)", "def select_qual_from_stdin(args):\n var_pct_full = args.var_pct_full\n qual_fn = args.qual_fn if args.qual_fn is not None else \"qual\"\n vcf_fn = file_path_from(args.vcf_fn)\n ref_pct_full = args.ref_pct_full if args.ref_pct_full else var_pct_full\n # for efficiency, we use a maximum 30% reference candidates proportion for full-alignment calling, which is almost cover all false negative candidates\n # for ont platform, we set a default 10% reference candidates proportion for full-alignment calling unless a known vcf file is provided (genotyping mode)\n # directly set default value in run_clair3.sh from v0.1-r5\n # ref_pct_full = 0.1 if args.platform == 'ont' else ref_pct_full\n # ref_pct_full = min(ref_pct_full, 0.3)\n\n variant_qual_list = []\n ref_qual_list = []\n for row in stdin:\n if row[0] == '#':\n continue\n row = row.rstrip().split()\n\n qual, gt_info = row[5], row[9]\n genotype = gt_info.split(':')[0]\n if genotype == '0/0':\n ref_qual_list.append(float(qual))\n else:\n variant_qual_list.append(float(qual))\n\n ref_qual_list = sorted(ref_qual_list)\n variant_qual_list = sorted(variant_qual_list)\n low_variant_qual_list = variant_qual_list[:int(var_pct_full * len(variant_qual_list))]\n if len(low_variant_qual_list) == 0:\n print(log_warning(\n \"[WARNING] Cannot find any low-quality 0/1 or 1/1 variant in pileup output using variant quality cut-off proportion: {}, total variants: {}\".format(\n var_pct_full, len(variant_qual_list))))\n print(log_warning(\"[WARNING] Set low variant quality score cut-off to 0.0\"))\n var_qual_cut_off = 0.0\n else:\n var_qual_cut_off = low_variant_qual_list[-1]\n\n # If a known vcf file is provided, use user-defined proportion\n low_ref_qual_list = ref_qual_list[:int(ref_pct_full * len(ref_qual_list))] if vcf_fn is None else ref_qual_list[:int(args.ref_pct_full * len(ref_qual_list))]\n if len(low_ref_qual_list) == 0:\n print(log_warning(\n \"[WARNING] Cannot find any low-quality 0/0 reference calls in pileup output using reference quality cut-off proportion: {}, total reference calls: {}\".format(\n ref_pct_full, len(ref_qual_list))))\n print(log_warning(\"[WARNING] Set low reference quality score cut-off to 0.0\"))\n ref_qual_cut_off = 0.0\n else:\n ref_qual_cut_off = low_ref_qual_list[-1]\n print ('[INFO] Set variants quality cutoff {}'.format(round(var_qual_cut_off, 0)))\n print ('[INFO] Set reference calls quality cutoff {}'.format(round(ref_qual_cut_off, 0)))\n\n if args.output_fn:\n with open(os.path.join(args.output_fn, qual_fn), 'w') as output:\n output.write(str(var_qual_cut_off) + ' ' + str(ref_qual_cut_off))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take the annotations for a particular transcript and parse them to find the numbers of the exons that have been deleted
def find_deleted_exons(annotations): exons = [] gene = '' for anno_fields in annotations: _, _, _, g, _, _, _, _, rank, _, _ = anno_fields[:11] gene = gene or g try: exons.append(int(rank.split('/')[0])) except ValueError: pass return exons, gene
[ "def annotate_du_file(xmlfile,results):\n outfile = xmlfile.replace(\".xml\", \"_visasAnnot_temp.xml\")\n tree = ET.parse(IN + xmlfile)\n root = tree.getroot()\n sentID = 0\n for par in root.iter(\"utterance\"):\n for sent in par:\n sentID += 1\n remove_results = []\n for result in results:\n if sentID is result[0]:\n wordID = 0\n for lexeme in sent.iter('word'):\n wordID += 1\n if result[1] <= wordID <= result[2]:\n lexeme.set('ling_feature',result[3])\n remove_results.append(result)\n for r in remove_results:\n if r in results:\n results.remove(r)\n\n tree.write(IN + \"annotated/\" + outfile)", "def annotate_exon_loss(exon_loss_anno_by_tid, prioritised_genes):\n\n annos = set()\n for transcript, annotations in exon_loss_anno_by_tid.items():\n exons, gene = find_deleted_exons(annotations)\n exons = list(set(exons))\n if len(exons) == 0:\n return None\n if max(exons) - min(exons) + 1 == len(exons):\n if len(exons) == 1:\n deleted_exons = f'exon{exons[0]}del'\n else:\n deleted_exons = f'exon{min(exons)}-{max(exons)}del'\n else:\n deleted_exons = f'exon{min(exons)}-{max(exons)}del'\n var_priority = 2 if gene in prioritised_genes else 3\n annos.add((gene, transcript, deleted_exons, var_priority))\n return annos", "def getTxtRelPositions(transcript_id,txt_annot,readStrand,readPosition):\n #figure out sense/antisense. Easy\n txtStrand=txt_annot['strand']\n if txtStrand==readStrand:\n SorAS='S'\n else:\n SorAS='AS'\n \n #Great, now figure out position relative to CDSStart\n #To do this, I will calculate the distance from the txtStart to readPosition, and txtStart to CDSStart. I'll subtract the two.\n cdsStart=min([entry[0] for entry in txt_annot['CDS']])\n exons=txt_annot['exon']\n exonStarts=[exon[0] for exon in exons]\n exonEnds=[exon[1] for exon in exons]\n exonStarts.sort(),exonEnds.sort()\n exons=list(zip(exonStarts,exonEnds))\n #Edit: Apparently the exons are not necessarily orderd in the gtf file.\n \n txtStart_cdsStart_dist=getDist(exons,cdsStart)\n txtStart_readPosition_dist=getDist(exons,readPosition)\n readPosition_rel_to_CDSstart=txtStart_readPosition_dist-txtStart_cdsStart_dist\n \n #now do the same thing with the cdsEnd\n cdsEnd=max([entry[1] for entry in txt_annot['CDS']])\n txtStart_cdsEnd_dist=getDist(exons,cdsEnd)\n #already determined txtStart_readPosition_dist\n readPosition_rel_to_CDSend=txtStart_readPosition_dist-txtStart_cdsEnd_dist\n \n #stranded issues. Although ensembl defines start_codon and stop_codon as those exact locations on the - and + strand, here I find the start/stop by taking min/max of CDS exon boundaries, so I need to flip it\n if txtStrand=='+':\n return ':'.join([transcript_id,str(readPosition_rel_to_CDSstart),str(readPosition_rel_to_CDSend),SorAS])\n else:\n return ':'.join([transcript_id,str(-readPosition_rel_to_CDSend),str(-readPosition_rel_to_CDSstart),SorAS])", "def delete_annot_nids(ibs, aid_list):\n # FIXME: This should be implicit by setting the anotation name to the\n # unknown name\n ibs.delete_annot_relations_oftype(aid_list, constants.INDIVIDUAL_KEY)", "def test_annotate_edit_does_not_delete(self):\n self.t(\"add tw-20\")\n\n self.t(\"1 annotate 1st annotation\")\n self.t(\"1 annotate 2nd annotation\")\n\n code, _timestamp1a, err = self.t(\"_get 1.annotations.1.entry\")\n code, _timestamp2a, err = self.t(\"_get 1.annotations.2.entry\")\n\n self.t(\"1 edit\")\n\n code, _timestamp1b, err = self.t(\"_get 1.annotations.1.entry\")\n code, _timestamp2b, err = self.t(\"_get 1.annotations.2.entry\")\n\n self.assertEqual( _timestamp1a, _timestamp1b )\n self.assertEqual( _timestamp2a, _timestamp2b )\n\n code, out, err = self.t(\"info\")\n\n self.assertNotIn(\"Annotation '1st annotation' deleted.\", out)\n self.assertNotIn(\"Annotation '2nd annotation' deleted.\", out)", "def test_multiple_deletions():\n vep_headers = [\"Allele\", \"Feature_type\", \"SYMBOL\"]\n annotation = [\n \"ACTT|Transcript|NOC2L\", \n \"ACT|Transcript|NOC2L\"\n ]\n \n vep_dict = build_vep_annotation(\n csq_info=annotation, \n reference='TACTTT', \n alternatives=['TACT', 'TACTT'], \n vep_columns=vep_headers\n )\n \n assert vep_dict['TACT'] == [\n {'Allele': 'ACT',\n 'Feature_type': 'Transcript',\n 'SYMBOL': 'NOC2L'\n }]\n \n assert vep_dict['TACTT'] == [\n {'Allele': 'ACTT',\n 'Feature_type': 'Transcript',\n 'SYMBOL': 'NOC2L'\n }\n ]", "def get_deletions(self, aln, extension):\n return self.__get_insdel(aln, _modeller.mod_alignment_next_delete,\n extension)", "def annotation_tuples_from_file(self, document):\n annotations = []\n f = file(self.mpqa_root + document, 'r')\n tmp = f.read()\n f.close()\n for tuple in self.getmpqatuples(document, 'annotations'):\n annotations.append(tuple)\n #print annotations\n annotations.sort(key=lambda x: (x[1].start))\n #print annotations\n return annotations", "def get_transcript_ids(root):\n\n\ttranscripts = []\n\tfor transcript_type in root.iter('annotation_set'):\n\t\tsource = transcript_type.attrib[\"type\"]\n\t\tif source == \"ncbi\" or source == \"ensembl\":\n\t\t\t# Loop through transcript tags to collect the different \n\t\t\t# transcripts available\n\t\t\tfor transcript in transcript_type:\n\t\t\t\tif transcript.tag == \"mapping\":\n\t\t\t\t\ttranscript_id = transcript.attrib[\"coord_system\"]\n\t\t\t\t\ttranscripts.append(transcript_id)\n\treturn transcripts", "def test_correct_annotations(self):\n for doc in self.prj.documents:\n if doc.id == 26608:\n assert len(doc.annotations(self.prj.get_label_by_id(579))) == 1", "def DeleteAnnotation(con, cur, annotationid, userid=0, commit=True):\n debug(1, 'DeleteAnnotation for annotationid %d userid %d' % (annotationid, userid))\n err, origuser = GetAnnotationUser(con, cur, annotationid)\n if err:\n return err\n if origuser != 0:\n if userid == 0:\n debug(6, 'cannot delete non-anonymous annotation (userid=%d) with default userid=0' % origuser)\n return('Cannot delete non-anonymous annotation with default user. Please log in first')\n if origuser != userid:\n debug(6, 'cannot delete. annotation %d was created by user %d but delete request was from user %d' % (annotationid, origuser, userid))\n return 'Cannot delete. Annotation was created by a different user'\n\n # find how many sequences are in the annotations\n cur.execute('SELECT seqCount FROM AnnotationsTable WHERE id=%s', [annotationid])\n res = cur.fetchone()\n num_seqs = res[0]\n\n # update the ontology term sequence counts\n err, parents = GetAnnotationParents(con, cur, annotationid)\n if err:\n msg = 'Could not find ontology parents. Delete aborted'\n debug(3, msg)\n return msg\n for cterm in parents:\n cur.execute('UPDATE OntologyTable SET seqCount = seqCount-%s, annotationCount=annotationCount-1 WHERE description = %s', [num_seqs, cterm])\n debug(3, 'fixed ontologytable counts')\n\n cur.execute('DELETE FROM AnnotationsTable WHERE id=%s', [annotationid])\n debug(1, 'deleted from annotationstable')\n cur.execute('DELETE FROM AnnotationListTable WHERE idannotation=%s', [annotationid])\n debug(1, 'deleted from annotationliststable')\n cur.execute('DELETE FROM SequencesAnnotationTable WHERE annotationid=%s', [annotationid])\n debug(1, 'deleted from sequencesannotationtable')\n # delete the annotation parents entries\n cur.execute('DELETE FROM AnnotationParentsTable WHERE idAnnotation=%s', [annotationid])\n debug(1, 'deleted from annotationParentsTable')\n\n if commit:\n con.commit()\n return('')", "def get_expert_annoation_stats(annotations):\n if annotations:\n coherence = []\n consistency = []\n fluency = []\n relevance = []\n for annotate in annotations:\n coherence.append(annotate['coherence'])\n consistency.append(annotate['consistency'])\n fluency.append(annotate['fluency'])\n relevance.append(annotate['relevance'])\n if coherence and consistency and fluency and relevance:\n return [sum(coherence) / len(coherence), sum(consistency) / len(consistency), sum(fluency) / len(\n fluency), sum(relevance) / len(relevance)]\n else:\n return -1", "def get_filtered_annotations(assoc_file, accepted_evcodes=None,\n remove_leading_gene_id=None,\n use_symbol=None, tax_id=None):\n\n if assoc_file.endswith('.gz'):\n assoc_fh = gzip.open(assoc_file, 'r')\n else:\n assoc_fh = open(assoc_file, 'r')\n\n annotations = []\n\n for line in assoc_fh:\n if line.startswith('!'):\n continue\n\n toks = line.strip().split('\\t')\n\n (xrdb, xrid, details, goid, refstring, ev_code, taxon, date) = (\n toks[0], toks[1], toks[3], toks[4], toks[5], toks[6],\n toks[12].split(':')[1], toks[13])\n\n if tax_id and (tax_id != taxon):\n continue\n\n if remove_leading_gene_id:\n xrid = xrid.split(':')[1]\n\n if xrdb in DB_REMAP:\n xrdb = DB_REMAP[xrdb]\n\n if use_symbol:\n xrdb = 'Symbol'\n if toks[0] == 'UniProtKB':\n xrid = toks[2]\n\n # These next few lines are needed for processing\n # Arabidopsis annotations\n if xrdb == 'TAIR':\n tair_regex = re.compile('AT[0-9MC]G[0-9][0-9][0-9][0-9][0-9]')\n first_alias = toks[10].split('|')[0]\n if tair_regex.match(toks[2]):\n xrid = toks[2]\n elif tair_regex.match(toks[9]):\n xrid = toks[9]\n elif tair_regex.match(first_alias):\n xrid = first_alias\n\n if details == 'NOT':\n continue\n\n if accepted_evcodes is not None and (ev_code not in accepted_evcodes):\n continue\n\n annotation = (xrdb, xrid, goid, refstring, date)\n\n annotations.append(annotation)\n\n return annotations", "def collect_parse(annotations, calculated):\n\n\tdifferences = calculated[\"differences\"]\n\tlabel_name_pairs_x = calculated[\"label_name_pairs_x\"]\n\tlabel_value_pairs_y = calculated[\"label_value_pairs_y\"]\n\tlabel_count = calculated[\"misc\"]\n\ty_axis_unit_name = calculated[\"y_axis_unit_name\"]\n\tx_axis_label_name = calculated[\"x_axis_label_name\"]\n\n\n\tprevious_line_empty = True\n\t#TODO: check encoding\n\t# with open(version_dir + annotations, \"r\", encoding=\"ISO-8859-1\") as f:\n\twith open(version_dir + annotations, \"r\", encoding=\"ISO-8859-1\") as f:\n\t\tend_desc = False\n\t\tdesc_tokens = [] # list of strings\n\t\tdesc_labels = [] # list of tuples \n\t\ti_token = 0 # track of tokens within one description\n\t\tfor_xml = {}\n\t\ti_desc = 1\n\t\tfor line in f:\n\t\t\tline2 = line.split()\n\t\t\tline3 = \" \".join(line2)\n\n\t\t\tif line2 == []: # an empty line between descriptions\n\t\t\t\tend_desc = True\n\t\t\t\tprevious_line_empty = True\n\n\t\t\tif len(line2) == 1:\n\t\t\t\tif line2[-1][0] not in {'<', '\"'}:\n\t\t\t\t\t#print(\"no label\", line2)\n\t\t\t\t\ti_token += 1\n\t\t\t\t\tdesc_tokens.append(line2[0])\n\n\t\t\t\t# if line2[-1][0] == \"<\":\n\t\t\t\t# \tprint(line2[-1], line2[0], \"\\n\\n\")\n\t\t\t\n\n\t\t\tif len(line2) > 1 and line2[-1].startswith(\"<\"):\n\t\t\t\tprevious_line_empty = False\n\t\t\t\tlabel = line2[-1]\n\n\t\t\t\tif len(line2[:-1]) == 1: # one token, one label\n\t\t\t\t\tdesc_tokens.append(line2[0])\n\t\t\t\t\ti_token += 1\n\t\t\t\t\tdesc_labels.append((label, i_token, i_token, line2[0]))\n\t\t\t\t\t\n\t\t\t\telse: # several tokens, one label\n\t\t\t\t\ti_token += 1\n\n\t\t\t\t\ttemp = \" \".join(line2[:-1])\n\t\t\t\t\t#temp_doc = nlp_web(temp) # old?\n\t\t\t\t\ttemp_doc = tokenizer(temp)\n\t\t\t\t\ttemp_tokens = [t.text for t in temp_doc]\n\t\t\t\t\tif temp_tokens == [\",\",\"000\"]: # TOKENIZER EXCEPTION\n\n\t\t\t\t\t\ttemp_tokens = [\",000\"]\n\t\t\t\t\ttemp_join = \" \".join(temp_tokens)\n\t\t\t\t\ti_end = i_token + len(temp_tokens) - 1\n\t\t\t\t\tdesc_tokens += temp_tokens\n\t\t\t\t\tdesc_labels.append((label, i_token, i_end, temp_join))\n\t\t\t\t\ti_token = i_end\n\n\n\n\n\t\t\tif len(line2) == 1 and line2[0] == \"<end_of_description>\" and previous_line_empty == False:\n\t\t\t\tif desc_tokens:\n\n\t\t\t\t\tdesc = \" \".join(desc_tokens)\n\n\t\t\t\t\t#doc = nlp(desc) # TODO this is where ,000 is split into , and 000 old?\n\t\t\t\t\tdoc = tokenizer(desc)\n\n\t\t\t\t\tdoc_tokens = [t.text for t in doc]\n\n\t\t\t\t\t#print(desc_tokens,\"\\n\" , \"\\n\", doc_tokens, \"\\n \\n\", desc_labels, \"\\n\",desc, \"\\n\", len(desc_tokens), len(doc))\n\n\t\t\t\t\tfor_xml[i_desc] = {\"desc_tokens\":desc_tokens,\"doc_tokens\":doc_tokens,\"desc_labels\":desc_labels}\n\t\t\t\t\ti_desc += 1\n\t\t\t\t\t\n\t\t\t\tdesc_tokens, desc_labels, i_token = [], [], 0\n\t\t\t\t\n\t\t\t\t\n\treturn for_xml", "def pull_anno(self, index):\n img_id = self.ids[index]\n # anno = ET.parse(self._annopath % img_id).getroot()\n # gt = self.target_transform(anno, 1, 1)\n # return img_id[1], gt", "def all_annotations(num, test) -> None:\n return None", "def compareAnnotations2(old, new, output, args={}):\n result = {}\n global no_change, UTR_added, yardSale, exonChange, modelChangeNotProt, dropped, added, total_transcripts, total_genes\n (\n no_change,\n UTR_added,\n yardSale,\n exonChange,\n modelChangeNotProt,\n dropped,\n added,\n total_transcripts,\n total_genes,\n ) = (0,) * 9\n lib.log.info(\n \"Comparing original annotation to updated\\n original: {}\\n updated: {}\".format(\n old, new\n )\n )\n if args.gff and args.fasta:\n oldInter, oldGenes = gff2interlap(old, args.fasta)\n else:\n oldInter, oldGenes = gbk2interlap(old)\n newInter, newGenes = gff2interlap(new, args.fasta)\n # do the simple stuff first, find models that were deleted\n for contig in oldInter:\n for gene in oldInter[contig]:\n if not gene in newInter[contig]: # these models are removed\n dropped += 1\n if not gene[2] in oldGenes:\n continue\n # populate output dictionary with results\n if not gene[2] in result:\n # dropped model has AED of 1.000\n cdsAED = \"1.000\"\n exonAED = \"1.000\"\n result[gene[2]] = {\n \"contig\": oldGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"old_location\": oldGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"strand\": oldGenes[gene[2]][\"strand\"],\n \"mRNA\": oldGenes[gene[2]][\"mRNA\"],\n \"location\": oldGenes[gene[2]][\"location\"],\n \"CDS\": oldGenes[gene[2]][\"CDS\"],\n \"message\": \"gene model removed\",\n \"cdsAED\": cdsAED,\n \"exonAED\": exonAED,\n \"transcript_id\": oldGenes[gene[2]][\"ids\"],\n \"pident\": [],\n \"protein_id\": oldGenes[gene[2]][\"ids\"],\n \"seq\": oldGenes[gene[2]][\"protein\"],\n }\n\n # now go through the updated annotation, comparing to old annot\n for contig in newInter:\n for gene in newInter[contig]:\n # means this is a new model, so add it\n if not gene in oldInter[contig]:\n added += 1\n total_genes += 1\n if not gene[2] in newGenes:\n continue\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": 0,\n \"old_location\": newGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": \"new gene model\",\n \"cdsAED\": \"0.000\",\n \"exonAED\": \"0.000\",\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": [],\n }\n else: # means this is existing model, and need to do some comparisons\n hitList = list(oldInter[contig].find(gene))\n # there might be some overlapping transcripts, so enforce locus name\n hit = None\n for z in hitList:\n if gene[2] == z[2]:\n hit = z\n if not hit:\n # there is no real hit, so this a new gene\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n added += 1\n total_genes += 1\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": 0,\n \"old_location\": newGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": \"new gene model\",\n \"cdsAED\": \"0.000\",\n \"exonAED\": \"0.000\",\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": [],\n }\n else:\n # since we may have multiple transcripts from hit as well as new annotation we need to be aware of that\n # also, tRNA annotations do not exist in Proteins dictionary, so process them differently\n # get the reference hits, pull out CDS and mRNA for pairwiseAED calculation\n total_genes += 1\n total_transcripts += len(newGenes[gene[2]][\"ids\"])\n\n # get the old annotation\n hitInfo = oldGenes.get(gene[2])\n\n # calculate AED\n exonAED = pairwiseAED(newGenes[gene[2]][\"mRNA\"], hitInfo[\"mRNA\"])\n if (\n newGenes[gene[2]][\"type\"] == \"mRNA\"\n and hitInfo[\"type\"] == \"mRNA\"\n ):\n cdsAED = pairwiseAED(newGenes[gene[2]][\"CDS\"], hitInfo[\"CDS\"])\n else:\n cdsAED = \"0.000\"\n\n # check translation, to deal with multiple transcripts, lets loop through new\n protMatches = []\n if (\n newGenes[gene[2]][\"type\"] == \"mRNA\"\n and hitInfo[\"type\"] == \"mRNA\"\n ):\n for i in range(0, len(newGenes[gene[2]][\"ids\"])):\n protMatch = None\n for y in range(0, len(oldGenes[gene[2]][\"ids\"])):\n pident = pairwiseAlign(\n newGenes[gene[2]][\"protein\"][i],\n oldGenes[gene[2]][\"protein\"][y],\n )\n if not protMatch:\n protMatch = pident\n else:\n if pident > protMatch:\n protMatch = pident\n protMatches.append(protMatch)\n # summarize UTRs for mRNA features\n if newGenes[gene[2]][\"type\"] == \"mRNA\":\n try:\n UTRs = findUTRs(\n newGenes[gene[2]][\"CDS\"],\n newGenes[gene[2]][\"mRNA\"],\n newGenes[gene[2]][\"strand\"],\n )\n except:\n UTRs = []\n lib.log.debug(\n \"UTR detection failed for {}: CDS={} mRNA={} strand={}\".format(\n newGenes[gene[2]][\"ids\"],\n newGenes[gene[2]][\"CDS\"],\n newGenes[gene[2]][\"mRNA\"],\n newGenes[gene[2]][\"strand\"],\n )\n )\n else:\n UTRs = []\n\n # structured comments/counts for gene models\n msg, no_change, UTR_added, yardSale, exonChange = message(\n newGenes[gene[2]][\"location\"],\n oldGenes[gene[2]][\"location\"],\n cdsAED,\n exonAED,\n protMatches,\n UTRs,\n no_change,\n UTR_added,\n yardSale,\n exonChange,\n )\n\n if not gene[2] in result:\n result[gene[2]] = {\n \"contig\": newGenes[gene[2]][\"contig\"],\n \"old_num_transcripts\": len(oldGenes[gene[2]][\"ids\"]),\n \"old_location\": oldGenes[gene[2]][\"location\"],\n \"num_transcripts\": len(newGenes[gene[2]][\"ids\"]),\n \"strand\": newGenes[gene[2]][\"strand\"],\n \"mRNA\": newGenes[gene[2]][\"mRNA\"],\n \"location\": newGenes[gene[2]][\"location\"],\n \"CDS\": newGenes[gene[2]][\"CDS\"],\n \"message\": msg,\n \"cdsAED\": cdsAED,\n \"exonAED\": exonAED,\n \"transcript_id\": newGenes[gene[2]][\"ids\"],\n \"protein_id\": newGenes[gene[2]][\"ids\"],\n \"seq\": newGenes[gene[2]][\"protein\"],\n \"pident\": protMatches,\n }\n\n total_cdsAED = []\n total_exonAED = []\n with open(output, \"w\") as out:\n out.write(\n \"Locus_tag\\tOrig_Location\\tOrig_Num_Transcripts\\tContig:start-end\\tStrand\\tGene_Length\\tNum_Transcripts\\tmRNA_AED\\tCDS_AED\\tDescription\\n\"\n )\n for k, v in natsorted(list(result.items())):\n start = str(v[\"location\"][0])\n end = str(v[\"location\"][1])\n GeneLength = int(end) - int(start)\n total_cdsAED.append(float(v[\"cdsAED\"]))\n total_exonAED.append(float(v[\"exonAED\"]))\n out.write(\n \"{:}\\t{:}:{:}-{:}\\t{:}\\t{:}:{:}-{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\n\".format(\n k,\n v[\"contig\"],\n v[\"old_location\"][0],\n v[\"old_location\"][1],\n v[\"old_num_transcripts\"],\n v[\"contig\"],\n start,\n end,\n v[\"strand\"],\n GeneLength,\n v[\"num_transcripts\"],\n v[\"exonAED\"],\n v[\"cdsAED\"],\n v[\"message\"],\n )\n )\n Avg_cdsAED = sum(total_cdsAED) / float(len(total_cdsAED))\n Avg_exonAED = sum(total_exonAED) / float(len(total_exonAED))\n # output some simple stats to cmd line\n lib.log.info(\n \"Updated annotation complete:\\n\\\n-------------------------------------------------------\\n\\\nTotal Gene Models:\\t{:,}\\n\\\nTotal transcripts:\\t{:,}\\n\\\nNew Gene Models:\\t{:,}\\n\\\nNo Change:\\t\\t{:,}\\n\\\nUpdate UTRs:\\t\\t{:,}\\n\\\nExons Changed:\\t\\t{:,}\\n\\\nExons/CDS Changed:\\t{:,}\\n\\\nDropped Models:\\t\\t{:,}\\n\\\nCDS AED:\\t\\t{:.3f}\\n\\\nmRNA AED:\\t\\t{:.3f}\\n\\\n-------------------------------------------------------\".format(\n total_genes,\n total_transcripts,\n added,\n no_change,\n UTR_added,\n exonChange,\n yardSale,\n dropped,\n Avg_cdsAED,\n Avg_exonAED,\n )\n )", "def _clear_annotations(self):\n for i in range(len(self._text_annotations)):\n self._text_annotations[i].remove()\n self._text_annotations = []", "def pull_anno(self, index):\n img_id = self.ids[index]\n anno = ET.parse(self._annopath % img_id).getroot() #Element\n gt = self.target_transform(anno, 1, 1)\n return img_id[1], gt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the exon loss simple annotation from the exon dict created in simplify_ann For each transcript with exon losses, find the numbers for each exon and create the annotation
def annotate_exon_loss(exon_loss_anno_by_tid, prioritised_genes): annos = set() for transcript, annotations in exon_loss_anno_by_tid.items(): exons, gene = find_deleted_exons(annotations) exons = list(set(exons)) if len(exons) == 0: return None if max(exons) - min(exons) + 1 == len(exons): if len(exons) == 1: deleted_exons = f'exon{exons[0]}del' else: deleted_exons = f'exon{min(exons)}-{max(exons)}del' else: deleted_exons = f'exon{min(exons)}-{max(exons)}del' var_priority = 2 if gene in prioritised_genes else 3 annos.add((gene, transcript, deleted_exons, var_priority)) return annos
[ "def write_novel_exon_statistics(self, annotation):\n\n # loop over the novel exon\n for novel_exon in self.confident_novel_exons:\n\n # split the exon\n sp = novel_exon.split(\":\")\n\n # determine splice in reads (all and\n # the ones that fall exactly in the SS)\n splice_in_all = 0\n splice_in_borders = 0\n for splice_in in list(set(self.splice_in)):\n\n for event in splice_in.split_event_list:\n\n if event.strand == '+':\n\n # the 3'SS lies within the exon borders\n # and the 5'SS is somewhere upstream\n if (\n int(event.three_prime_ss) >= int(sp[1]) and\n int(event.three_prime_ss) <= int(sp[2]) and\n int(event.five_prime_ss) < int(sp[1]) and\n int(event.five_prime_ss) < int(sp[2])\n ):\n\n splice_in_all += 1\n\n # the 3'SS lies exactly on the exon borders\n # and the 5'SS is somewhere upstream\n if int(event.three_prime_ss) == int(sp[1]):\n\n splice_in_borders += 1\n\n if event.strand == '-':\n\n # same as above but for the minus strand\n\n if (\n int(event.three_prime_ss) >= int(sp[1]) and\n int(event.three_prime_ss) <= int(sp[2]) and\n int(event.five_prime_ss) > int(sp[1]) and\n int(event.five_prime_ss) > int(sp[2])\n ):\n\n # this is a splice in reads\n splice_in_all += 1\n\n if int(event.three_prime_ss) == int(sp[2]):\n\n splice_in_borders += 1\n\n # determine splice out reads\n splice_out_all = 0\n splice_out_borders = 0\n for splice_out in list(set(self.splice_out)):\n\n for event in splice_out.split_event_list:\n\n # the five_prime_ss lies within the exon borders\n # and the three_prime_ss is somewhere downstream\n\n if event.strand == '+':\n\n if (\n int(event.five_prime_ss) >= int(sp[1]) and\n int(event.five_prime_ss) <= int(sp[2]) and\n int(event.three_prime_ss) > int(sp[2]) and\n int(event.three_prime_ss) > int(sp[1])\n ):\n\n splice_out_all += 1\n\n if int(event.five_prime_ss) == int(sp[2]):\n\n splice_out_borders += 1\n\n if event.strand == '-':\n\n if (\n int(event.five_prime_ss) >= int(sp[1]) and\n int(event.five_prime_ss) <= int(sp[2]) and\n int(event.three_prime_ss) < int(sp[1]) and\n int(event.three_prime_ss) < int(sp[2])\n ):\n\n # This is a splice out read\n splice_out_all += 1\n\n if int(event.five_prime_ss) == int(sp[1]):\n\n splice_out_borders += 1\n\n # count unspliced reads that either fall entirely in the\n # exon borders or they cross the 5' or 3' border\n\n reads_entirely_fall_in_exon = 0\n reads_fall_in_5p_border = 0\n reads_fall_in_3p_border = 0\n\n for det in self.unspliced_reads:\n\n if det.aln.iv.strand == '+':\n\n # Find unspliced reads that entirely\n # fall within the exon coordinates\n if (\n int(det.aln.iv.start) >= int(sp[1]) and\n int(det.aln.iv.start) <= int(sp[2]) and\n int(det.aln.iv.end) >= int(sp[1]) and\n int(det.aln.iv.end) <= int(sp[2])\n ):\n\n reads_entirely_fall_in_exon += 1\n\n # Find unspliced reads that crosss the 5' border\n if (\n int(det.aln.iv.start) <= int(sp[1]) and\n int(det.aln.iv.end) >= int(sp[1])\n ):\n\n reads_fall_in_5p_border += 1\n\n # Find unspliced reads that crosss the 3' border\n if (\n int(det.aln.iv.start) <= int(sp[2]) and\n int(det.aln.iv.end) >= int(sp[2])\n ):\n\n reads_fall_in_3p_border += 1\n\n elif det.aln.iv.strand == '-':\n\n # Find unspliced reads that entirely fall\n # within the exon coordinates\n if (\n int(det.aln.iv.start) >= int(sp[1]) and\n int(det.aln.iv.start) <= int(sp[2]) and\n int(det.aln.iv.end) >= int(sp[1]) and\n int(det.aln.iv.end) <= int(sp[2])\n ):\n\n reads_entirely_fall_in_exon += 1\n\n # Find unspliced reads that crosss the 5' border\n if (\n int(det.aln.iv.start) <= int(sp[2]) and\n int(det.aln.iv.end) >= int(sp[2])\n ):\n\n reads_fall_in_5p_border += 1\n\n # Find unspliced reads that crosss the 3' border\n if (\n int(det.aln.iv.start) <= int(sp[1]) and\n int(det.aln.iv.end) >= int(sp[1])\n ):\n\n reads_fall_in_3p_border += 1\n\n # Generate the profile for the novel exon\n exon_profile = list(str(x) for x in list(\n self.global_profile[\n HTSeq.GenomicInterval(sp[0],\n int(sp[1]),\n int(sp[2]),\n sp[3])]))\n\n # reverse profile in case of minus strand\n if sp[3] == \"-\":\n exon_profile = exon_profile[::-1]\n\n region = \":\".join([sp[0],\n str(int(sp[1]) + 1),\n sp[2],\n sp[3]])\n profile = str(\",\".join(str(x) for x in exon_profile))\n\n region_feature = FeatureCounts(\n region=region,\n annotation=str(\"novel_splicing\"),\n gene_id=str(self.gene_id),\n splice_in_all=int(splice_in_all),\n splice_in_borders=int(splice_in_borders),\n splice_out_all=int(splice_out_all),\n splice_out_borders=int(splice_out_borders),\n unspliced_feature=int(reads_entirely_fall_in_exon),\n unspliced_5pSS=int(reads_fall_in_5p_border),\n unspliced_3pSS=int(reads_fall_in_3p_border),\n profile=profile\n )\n\n annotation.genes[\n self.gene_id].potential_novel_exons.append(region_feature)", "def annotate_pattern(pos_dict,ex_dict,tag):\n for dufile in os.listdir(IN):\n if dufile.endswith(\"_parsed.xml\"):\n # call function to creat a POS file (into directory ./output/POS by default)\n convert(IN + dufile)\n posfile = dufile.replace(\"_parsed.xml\", \"_pos.txt\")\n # Do extraction and annotation step for each feature\n results = get_annotation(posfile, pos_dict, ex_dict, tag)\n print(\"results for file \" +dufile + \" :\" + str(results))\n\n # Annotate results to du_file\n annotate_du_file(dufile,results)", "def formats_mapping_evidence(ont_dict: dict, source_dict: Dict, result: Tuple, clin_data: Dict) -> Tuple:\n\n dbx_evid, lab_evid, syn_evid, sim_evid = ([] for _ in range(4)) # type: ignore\n ont_label, ont_syns, ont_syntp = ont_dict['label'], ont_dict['synonym'], ont_dict['synonym_type']\n dbxref_type = normalizes_clinical_source_codes(ont_dict['dbxref_type'], source_dict)\n\n # sort clinical data\n if None not in result[0]:\n for x in result[0][2].split(' | '):\n lvl = x.split('_')[0]\n clin = {k: v for k, v in clin_data.items() if lvl in k}\n\n if 'dbxref' in x.lower():\n if x.split('_')[-1] in dbxref_type.keys():\n prefix = dbxref_type[x.split('_')[-1]]\n else:\n prefix = 'DbXref*' + x.split('_')[-1].split(':')[0]\n updated_prefix = 'OBO_' + prefix.split('*')[0] + '-OMOP_' + lvl + '_CODE'\n dbx_evid.append(updated_prefix + ':' + prefix.split('*')[-1] + '_' + x.split(':')[-1].replace(':', '_'))\n if 'label' in x.lower():\n lab_evid, clin_lab = [], ' | '.join([clin[x] for x in clin.keys() if 'label' in x.lower()])\n for lab in set(clin_lab.split(' | ')):\n if lab.lower() in ont_label.keys() and ont_label[lab.lower()].split('/')[-1] in result[0][0]:\n lab_evid.append('OBO_LABEL-OMOP_' + x.split('_')[0] + '_LABEL:' + x.split(':')[-1])\n if lab.lower() in ont_syns.keys() and ont_syns[lab.lower()].split('/')[-1] in result[0][0]:\n lab_evid.append('OBO_' + ont_syntp[lab.lower()] + '-OMOP_' + lvl + '_LABEL:' + x.split(':')[-1])\n if 'synonym' in x.lower():\n syn_evid, clin_syn = [], ' | '.join([clin[x] for x in clin.keys() if 'synonym' in x.lower()])\n for syn in set(clin_syn.split(' | ')):\n if syn.lower() in ont_label.keys() and ont_label[syn.lower()].split('/')[-1] in result[0][0]:\n syn_evid.append('OBO_LABEL-OMOP_' + x.split('_')[0] + '_SYNONYM:' + x.split(':')[-1])\n if clin_syn.lower() in ont_syns.keys() and ont_syns[syn.lower()].split('/')[-1] in result[0][0]:\n syn_lab = '-OMOP_' + lvl + '_SYNONYM:'\n syn_evid.append('OBO_' + ont_syntp[syn.lower()] + syn_lab + x.split(':')[-1])\n if None not in result[1]:\n sim_evid = ['CONCEPT_SIMILARITY:' + x for x in result[1][-1].split(' | ')]\n\n # compile evidence\n compiled_exact = ' | '.join(list(filter(None, list(unique_everseen(dbx_evid + lab_evid + syn_evid)))))\n compiled_sim = ' | '.join(list(filter(None, list(unique_everseen(sim_evid)))))\n\n return compiled_exact, compiled_sim", "def compute_transcript_level_statistics(exons):\n exons_stats = [x[constants.STATISTICS] for x in exons]\n total_bases = int(np.sum([x[constants.BASES] for x in exons_stats])) if exons_stats else 0\n bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in exons_stats])) if exons_stats else 0\n bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in exons_stats])) if exons_stats else 0\n bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in exons_stats])) if exons_stats else 0\n bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in exons_stats])) if exons_stats else 0\n stats = {\n constants.BASES: total_bases,\n constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in exons_stats])), 3)\n if exons_stats else 0.0,\n constants.MEDIAN: round(float(np.sum(\n [x[constants.MEDIAN] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else float(0.0),\n constants.PERCENTILE25: round(float(np.sum(\n [x[constants.PERCENTILE25] * x[constants.BASES] for x in exons_stats])) / total_bases, 3)\n if exons_stats else 0.0,\n constants.PERCENTILE75: round(float(np.sum(\n [x[constants.PERCENTILE75] * x[constants.BASES] for x in exons_stats])) / total_bases, 3)\n if exons_stats else 0.0,\n constants.SD: round(float(np.sum(\n [x[constants.SD] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else 0.0,\n constants.LT15X: round(float(bases_lt_15x) / total_bases, 5) if total_bases > 0 else 0.0,\n constants.GTE15X: round(float(bases_gte_15x) / total_bases, 5) if total_bases > 0 else 0.0,\n constants.GTE30X: round(float(bases_gte_30x) / total_bases, 5) if total_bases > 0 else 0.0,\n constants.GTE50X: round(float(bases_gte_50x) / total_bases, 5) if total_bases > 0 else 0.0,\n constants.BASES_LT15X: bases_lt_15x,\n constants.BASES_GTE15X: bases_gte_15x,\n constants.BASES_GTE30X: bases_gte_30x,\n constants.BASES_GTE50X: bases_gte_50x\n }\n try:\n stats[constants.GC_CONTENT] = round(float(np.sum(\n [x[constants.GC_CONTENT] * x[constants.BASES] for x in exons_stats]) / total_bases), 5) \\\n if exons_stats and total_bases > 0 else 0.0\n except KeyError:\n # There is no GC content data to show (e.g.: the union transcript)\n pass\n return stats", "def ins_to_ann(interest_rate):\n\n return np.expm1(interest_rate)", "def get_gentrex(novel_ext, model):\n\n transcripts = dict()\n\n t1 = time.time()\n # this doesn't seem to cover all transcripts\n # (also it doesn't take up a lot of memory or time)\n for line_nr, line in enumerate(open(novel_ext, 'rb')):\n ts_id, novex = line.split()\n transcripts[ts_id] = [novex, 0, 0, 0]\n\n print 'Made transcript dict: {0} seconds'.format(time.time()-t1)\n print 'Nr of transcripts: {0}'.format(len(transcripts))\n # gene_id -> transcritpt ID\n genes = dict()\n exons = dict()\n\n t2 = time.time()\n # store the exons in an exon dict\n for line_nr, line in enumerate(open(model, 'rb')):\n (chrm, d,d, beg, end, d, strand, d,d,d,d, transcript_id, d, exon_nr)\\\n = line.split()[:14]\n transcript_id = transcript_id.rstrip(';').strip('\"')\n gene_id = '.'.join(transcript_id.split('.')[:-1])\n exon_nr = exon_nr.rstrip(';').strip('\"')\n\n # beg_transcript_ID_exonNr\n Utail_ID = '_'.join([beg, transcript_id, exon_nr])\n #exons[Utail_ID] = Cufflink_exon(chrm, beg, end, strand, transcript_id,\n #gene_id, exon_nr, Utail_ID)\n exons[Utail_ID] = [chrm, beg, end, exon_nr, transcript_id, gene_id, []]\n\n if gene_id in genes:\n if not transcript_id in genes[gene_id]:\n genes[gene_id].add(transcript_id)\n else:\n genes[gene_id] = set([transcript_id])\n\n if transcript_id in transcripts:\n transcripts[transcript_id][1] +=1 # nr of exons for this transcript\n transcripts[transcript_id][2] = strand\n transcripts[transcript_id][3] = gene_id\n else:\n transcripts[transcript_id] = [0, 1, strand, gene_id] # 0 for not in the txt\n pass\n\n print 'Made exon and gene dict: {0} seconds'.format(time.time()-t2)\n print 'Nr of exons: {0}'.format(len(exons))\n print 'Nr of genes: {0}'.format(len(genes))\n\n return genes, transcripts, exons", "def get_geomfromann(vdict, version):\n \n if version == 1:\n # for version 1 ann, may need to fill the data manually\n #Image Corner 1 Latitude -> upper left\n #Image Corner 1 Longitude \n #Image Corner 2 Latitude -> upper right\n #Image Corner 2 Longitude\n #Image Corner 3 Latitude -> lower left\n #Image Corner 3 Longitude\n #Image Corner 4 Latitude -> lower right\n #Image Corner 4 Longitude\n lat0 = float(vdict['Image Corner 1 Latitude'])\n lon0 = float(vdict['Image Corner 1 Longitude'])\n lat1 = float(vdict['Image Corner 2 Latitude'])\n lon1 = float(vdict['Image Corner 2 Longitude'])\n lat2 = float(vdict['Image Corner 4 Latitude'])\n lon2 = float(vdict['Image Corner 4 Longitude']) \n lat3 = float(vdict['Image Corner 3 Latitude'])\n lon3 = float(vdict['Image Corner 3 Longitude'])\n\n if version >= 2:\n # suppose it is version 2\n #Approximate Upper Left Latitude\n #Approximate Upper Left Longitude\n #Approximate Upper Right Latitude\n #Approximate Upper Right Longitude\n #Approximate Lower Left Latitude\n #Approximate Lower Left Longitude\n #Approximate Lower Right Latitude\n #Approximate Lower Right Longitude\n lat0 = float(vdict['Approximate Upper Left Latitude'])\n lon0 = float(vdict['Approximate Upper Left Longitude'])\n lat1 = float(vdict['Approximate Upper Right Latitude'])\n lon1 = float(vdict['Approximate Upper Right Longitude'])\n lat2 = float(vdict['Approximate Lower Right Latitude'])\n lon2 = float(vdict['Approximate Lower Right Longitude']) \n lat3 = float(vdict['Approximate Lower Left Latitude'])\n lon3 = float(vdict['Approximate Lower Left Longitude'])\n \n coords = [[lon0,lat0],[lon1,lat1],[lon2,lat2],[lon3,lat3],[lon0,lat0]]\n #geom = \"GeomFromText('POLYGON((\"\n geom = \"POLYGON((\"\n for i in range(len(coords)):\n geom += str(coords[i][0]) + \" \" + str(coords[i][1])\n geom += \",\"\n geom = geom[:-1]\n geom += \"))\"\n #geom += \"))', 4326)\"\n \n return geom", "def predict(note):\n\n # Patterns for information extraction\n p = re.compile(r\"edss\", re.IGNORECASE)\n p_score = re.compile(r\"\\d\\.\\d\")\n p_num = re.compile(r\"zero|one|two|three|four|five|six|seven|eight|nine\", re.IGNORECASE)\n num_dict = {\n \"zero\":0,\n \"one\":1,\n \"two\":2,\n \"three\":3,\n \"four\":4,\n \"five\":5,\n \"six\":6,\n \"seven\":7,\n \"eight\":8,\n \"nine\":9\n }\n score = -1\n sentences = sent_tokenize(note)\n for sent in sentences:\n # Find sentence with \"EDSS\"\n if len(re.findall(p, sent)) > 0:\n # Find score with format \"x.x\"\n if len(re.findall(p_score, sent)) > 0:\n score = float(re.findall(p_score, sent)[0])\n break\n # Find score with format \"EDSS is x\"\n elif len(re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)) > 0:\n number = re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)[0]\n score = float(re.sub(r\"\\s|\\.|\\,|\\)\", r\"\", number))\n break\n # Find score writtent in \"zero/one ...\"\n elif len(re.findall(p_num, sent)) > 0:\n score = float(num_dict[re.findall(p_num, sent)[0].lower()])\n break\n \n if score not in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]:\n score = -1\n \n \n label_dict = {0.0:0,\n 1.0:1,\n 1.5:2,\n 2.0:3,\n 2.5:4,\n 3.0:5,\n 3.5:6,\n 4.0:7,\n 4.5:8,\n 5.0:9,\n 5.5:10,\n 6.0:11,\n 6.5:12,\n 7.0:13,\n 7.5:14,\n 8.0:15,\n 8.5:16,\n 9.0:17,\n 9.5:18,\n -1:-1}\n \n return label_dict[score]", "def pull_anno(self, index):\n img_id = self.ids[index]\n # anno = ET.parse(self._annopath % img_id).getroot()\n # gt = self.target_transform(anno, 1, 1)\n # return img_id[1], gt", "def assist2aspect ( assist ):\n ## columns in assist that are in aspect\n assist_aspect_cols = [ \n 'Date', 'LAT', 'LON', \n 'TC','PPC','PT','PZ','PF','PTop','PSY','PSH','PA','PMPC',\n 'PMPD','SPC','ST','SZ','SF','STop','SSY','SSH','SA',\n 'SMPC','SMPD','TPC','TT','TZ','TF','TTop','TSY','TSH',\n 'TA','TMPC','TMPD','OW','WT','AT','WS','WD','TCC','V',\n 'WX','PO', 'AO','Comments'\n ]\n ## columns used in constructing the topography feild in assist\n topo_cols = [\n 'PTop','PTopC','PRH','POld','PCs','PSC',\n 'STop','STopC','SRH','SOld','SCs','SSC',\n 'TTop','TTopC','TRH','TOld','TCs','TSC',\n ]\n topo_data = assist[ topo_cols ]\n topo_data = format_topo_data( topo_data )\n \n # brown ice columns\n bi_data = assist[['PA', 'PAL', 'SA', 'SAL', 'TA', 'TAL']]\n \n # get aspect columns from assist and rename to aspect names\n aspect = assist[ assist_aspect_cols ]\n #~ aspect = aspect.astype(str)\n aspect.columns = [\n 'Date', 'Latitude', 'Longitude', \n 'T/Conc', 'c1', 'ty1', 'z1', 'f1', 'to1', 'Sty1', 'Sz1', 'BI1', 'MPc1',\n 'MPd1', 'c2', 'ty2', 'z2', 's2', 'to2', 'Sty2', 'Sz2', 'BI2',\n 'MPc2', 'MPd2', 'c3', 'ty3', 'z3', 'f3', 'to3', 'Sty3', 'Sz3',\n 'BI3', 'MPc3', 'MPd3', 'O/W', 'Water Temp', 'Air Temp', \n 'Wind speed [m/s]','Wind Dir. [deg]','Cloud [_/8]','Vis',\n 'WW', 'PO', 'AO', 'Comments'\n ]\n # split out time and date\n aspect['Time'] = aspect['Date'].map(lambda d: str(d).strip().split(' ')[1])\n aspect['Time'] = aspect['Time'].map(\n lambda d: str(\n int(d.split(':')[0]) + (1 if int(d.split(':')[1]) >= 30 else 0)\n )\n )\n aspect['Date'] = aspect['Date'].map(lambda d: str(d).strip().split(' ')[0])\n \n # add columns not in assist\n not_in_assist = ['MPl11', 'MPl21', 'MPl12', 'MPl22', 'MPl13', 'MPl23']\n for key in not_in_assist:\n aspect [key] = None\n \n # Ice Types\n for key in ['ty1', 'ty2', 'ty3']:\n aspect [aspect[key] == '75'][key] = '85'\n \n \n #~ # Floe sizes: this is a direct conversion \n #~ convert = ['f1', 'f2', 'f3']\n \n # Topography \n for col in [('P','to1'), ('S','to2'), ('T','to3')]:\n temp = topo_data[[c for c in topo_data.columns if c[0] == col[0]]]\n temp.columns = ['Top','TopC','RH','Old','Cs','SC',]\n aspect[col[1]] = create_aspect_topo_code(temp)\n \n \n # Algea -> brown ice\n ## use concentration and location to crate single aspect code\n for col in [('P','BI1'),('S','BI2'),('T','BI3')]:\n temp = bi_data[[c for c in bi_data.columns if c[0] == col[0]]]\n temp.columns = ['A', 'AL']\n aspect[col[1]] = ''\n aspect[col[1]][temp['A'] == '0'] = \"'0'\"\n \n for loc in [('10',\"'d00'\"), ('20',\"'0d0'\"), ('30',\"'00d'\")]:\n index = np.logical_and(\n np.logical_or(temp['A'] != '', temp['A'] != '0'),\n temp['AL'] == loc[0]\n )\n aspect[col[1]][index] = \\\n temp['A'][index].map(lambda d: loc[1].replace('d',d))\n \n \n \n \n \n \n # Wind speed convert to m/s\n aspect['Wind speed [m/s]'] = \\\n aspect['Wind speed [m/s]'].astype(float) * .514 # knots * ([m/s]/knots) \n \n # Format weather codes\n #~ aspect['WW'] = aspect['WW'].map(lambda x: str(x).replace('nan',''))\n aspect['WW'] = aspect['WW'].map(lambda x: '{:02d}'.format(int(x)) if str(x) != 'nan' else '')\n \n aspect['AO'][ aspect['AO'].map(lambda x: str(x).lower()) == 'nan'] = ''\n aspect['Observer'] = aspect['PO'] + \\\n aspect['AO'].map(lambda o: ':'+ str(o) if len(str(o)) > 0 else str(o))\n \n \n aspect['Flag1'] = ''\n aspect['Flag2'] = ''\n aspect['Flag3'] = ''\n # aspect columns in corret order\n sorted_aspect_cols = [ \n 'Date', 'Time', 'Latitude', 'Longitude', \n 'T/Conc','c1','ty1','z1','f1','to1','Sty1','Sz1','BI1','MPc1','MPd1',\n 'MPl11','MPl21','c2','ty2','z2','s2','to2','Sty2','Sz2','BI2','MPc2',\n 'MPd2','MPl12','MPl22','c3','ty3','z3','f3','to3','Sty3','Sz3','BI3',\n 'MPc3','MPd3','MPl13','MPl23','O/W','Water Temp','Air Temp',\n 'Wind speed [m/s]','Wind Dir. [deg]','Cloud [_/8]','Vis',\n 'WW', 'Flag1', 'Flag2', 'Flag3','Observer','Comments'\n ]\n\n ## TEMP\n return aspect[sorted_aspect_cols]", "def pull_anno(self, index):\n img_id = self.ids[index]\n anno = ET.parse(self._annopath % img_id).getroot() #Element\n gt = self.target_transform(anno, 1, 1)\n return img_id[1], gt", "def _parse_ann_info(self, img_info, ann_info):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n\n gt_masks_ann.append(ann['segmentation'])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann", "def enrich_annotation_novel_splicing(self, annotation, verbose):\n\n # Loop over the confident novel exons\n for novel_exon in self.confident_novel_exons:\n\n # Each novel exon might have more than one upstream\n # exons that can splice to.\n # For this reason for each novel exon we loop as\n # many times as the upstream exons that splices to.\n for up in self.confident_novel_exons[novel_exon]:\n\n # For the novel exon and the specific upstream splice site\n # we identify all the possible upstream exons.\n upstream_exons = []\n\n for current_feature_region in annotation.feature_regions_upstream_coordinates:\n\n # Loop over the possible exons\n for upstream_exon in annotation.feature_regions_upstream_coordinates[current_feature_region]:\n\n if upstream_exon.split(\":\")[3] is \"+\":\n\n # Check if the 5'SS is the same\n if upstream_exon.split(\":\")[2] == up[0].split(\":\")[2]:\n\n if upstream_exon not in upstream_exons:\n\n upstream_exons.append(upstream_exon)\n\n elif upstream_exon.split(\":\")[3] is \"-\":\n\n # Check if the 5'SS is the same\n if upstream_exon.split(\":\")[1] == up[0].split(\":\")[2]:\n\n if upstream_exon not in upstream_exons:\n\n upstream_exons.append(upstream_exon)\n\n # Now that we have the upstream exons, we find annotated\n # transcripts that contain these exons\n\n # loop over the upstream exons\n for exon in upstream_exons:\n\n if exon in annotation.genes[self.gene_id\n ].exon_coordinates_dict:\n\n # find exon ids for the exon coordinates\n for exon_id in annotation.genes[self.gene_id].exon_coordinates_dict[exon]:\n\n # Get transcripts of the gene\n for transcript in annotation.genes[self.gene_id].get_known_transctipt_ids():\n\n # Split exon\n novel_exon_sp = novel_exon.strip().split(\":\")\n\n # make sure that the novel exon is not\n # found after the transcript end\n if (\n (\n (annotation.transcripts[\n transcript].strand == '+') and\n (int(novel_exon_sp[1]) <\n int(annotation.transcripts[\n transcript].end)) and\n (int(novel_exon_sp[2]) <\n int(annotation.transcripts[\n transcript].end))\n ) or\n (\n (annotation.transcripts[\n transcript].strand == '-') and\n (int(novel_exon_sp[1]) >\n int(annotation.transcripts[\n transcript].start)) and\n (int(novel_exon_sp[2]) >\n int(annotation.transcripts[\n transcript].start))\n )\n ):\n\n # Loop over the exons of each transcript\n for transcript_exon in annotation.transcripts[transcript].exon_list_sorted_by_end_coord:\n\n # If exon exists in this transcript\n if exon_id == transcript_exon.exon_id:\n\n if annotation.transcripts[\n transcript].strand == \"+\":\n\n # Create novel transcript annotation\n exons_before_novel_one = annotation.transcripts[\n transcript].get_existing_and_upstream_exons(\n exon.split(\":\")[1],\n exon.split(\":\")[2],\n exon.split(\":\")[3])\n\n # In case the list is not empty\n if len(exons_before_novel_one) > 0:\n\n exons_before_novel_one_name = []\n for x in exons_before_novel_one:\n exons_before_novel_one_name.append(\":\".join([str(x.start), str(x.end)]))\n\n # create unique transcript id based on all the exons found before the novel one\n upstream_coords = hashlib.md5(('_'.join(exons_before_novel_one_name).encode('utf-8'))).hexdigest()\n novel_transcript_id = \"\"\n novel_transcript_id += \"novel_\"+annotation.transcripts[transcript].gene_id\n novel_transcript_id += \"|UE_\"+upstream_coords\n novel_transcript_id += \"|5pSS_\"+str(str(novel_exon.split(\":\")[1]))\n novel_transcript_id += \"|PAS_\"+str(str(novel_exon.split(\":\")[2]))\n\n # create transcript object\n novelTranscript = Transcript(\n chromosome=annotation.transcripts[transcript].chromosome,\n source=\"TECtool_annotated\",\n feature=\"transcript\",\n start=str(annotation.transcripts[transcript].start),\n end=novel_exon.split(\":\")[2],\n score=annotation.transcripts[transcript].score,\n strand=\"+\",\n frame=\".\",\n gene_id=annotation.transcripts[transcript].gene_id,\n transcript_id=novel_transcript_id,\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype=\"novel_splicing\"\n )\n\n # Add the transcript in the list of novel transcripts\n annotation.genes[self.gene_id].novel_transcripts.append(novelTranscript)\n\n # Store to dictionary key :\n # novel transcript id\n # value: [transcipt id 1,\n # transcipt id 2] the\n # potential transcripts\n # that a novel transcript\n # can originate from\n annotation.genes[self.gene_id].mother_transcripts_of_novel_transcripts.setdefault(novel_transcript_id, []).append(transcript)\n\n # Create novel exon\n # annotation\n exon_count = 1\n for gtf_exon in annotation.transcripts[transcript].exon_list_sorted_by_end_coord:\n\n if gtf_exon.end < int(novel_exon.split(\":\")[1]):\n\n novelExon = Exon(\n chromosome = gtf_exon.chromosome,\n source=\"TECtool_annotated\",\n feature=\"exon\",\n start=gtf_exon.start,\n end=gtf_exon.end,\n score=\".\",\n strand=\"+\",\n frame=gtf_exon.frame,\n gene_id=self.gene_id,\n transcript_id=novel_transcript_id,\n exon_number=str(exon_count),\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype='novel_splicing',\n exon_id=\"novel_\"+exon_id+\"_\"+novel_transcript_id\n )\n\n novelExon.CDS = gtf_exon.CDS\n novelExon.start_codon = gtf_exon.start_codon\n novelExon.stop_codon = gtf_exon.stop_codon\n\n novelTranscript.novel_exons.append(novelExon)\n\n exon_count += 1\n\n novelExon = Exon(\n chromosome = novel_exon.split(\":\")[0],\n source=\"TECtool_annotated\",\n feature=\"exon\",\n start=str(novel_exon.split(\":\")[1]),\n end=str(novel_exon.split(\":\")[2]),\n score=\".\",\n strand=\"+\",\n frame=None,\n gene_id=self.gene_id,\n transcript_id=novel_transcript_id,\n exon_number=str(exon_count),\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype='novel_splicing', # novel_splicing_exon_last\n exon_id=\"novel_terminal_exon_\"+novel_transcript_id\n )\n\n novelExon.CDS = None\n novelExon.start_codon = None\n novelExon.stop_codon = None\n\n novelTranscript.novel_exons.append(novelExon)\n\n elif annotation.transcripts[transcript].strand == \"-\":\n\n exons_before_novel_one = annotation.transcripts[transcript].get_existing_and_upstream_exons(exon.split(\":\")[1], exon.split(\":\")[2], exon.split(\":\")[3])\n\n # In case the list is not empty\n if len(exons_before_novel_one) > 0:\n\n exons_before_novel_one_name = []\n for x in exons_before_novel_one:\n exons_before_novel_one_name.append(\":\".join([str(x.start), str(x.end)]))\n\n # create unique transcript id based on all the exons found before the novel one\n upstream_coords = hashlib.md5(('_'.join(exons_before_novel_one_name).encode('utf-8'))).hexdigest()\n novel_transcript_id = \"\"\n novel_transcript_id += \"novel_\"+annotation.transcripts[transcript].gene_id\n novel_transcript_id += \"|UE_\"+upstream_coords\n novel_transcript_id += \"|5pSS_\"+str(str(novel_exon.split(\":\")[2]))\n novel_transcript_id += \"|PAS_\"+str(str(novel_exon.split(\":\")[1]))\n\n novelTranscript = Transcript(\n chromosome=annotation.transcripts[transcript].chromosome,\n source=\"TECtool_annotated\",\n feature=\"transcript\",\n start=str(novel_exon.split(\":\")[1]),\n end=str(annotation.transcripts[transcript].end),\n score=annotation.transcripts[transcript].score,\n strand=\"-\",\n frame=\".\",\n gene_id=annotation.transcripts[transcript].gene_id,\n transcript_id=novel_transcript_id,\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype=\"novel_splicing\"\n )\n\n # Add the transcript in the list of novel transcripts\n annotation.genes[self.gene_id].novel_transcripts.append(novelTranscript)\n\n # Store to dictionary key : novel transcript id value: [transcipt id 1, transcipt id 2] \n # the potential transcripts that a novel transcript can originate from \n annotation.genes[self.gene_id].mother_transcripts_of_novel_transcripts.setdefault(novel_transcript_id, []).append(transcript)\n\n # Create novel exon annotation\n exon_count = 1\n # for gtf_exon in annotation.transcripts[transcript].exon_list_sorted_by_end_coord[::-1]:\n for gtf_exon in exons_before_novel_one:\n\n if(gtf_exon.start >= int(novel_exon.split(\":\")[2])): # make sure that this check is enough ... Well IT IS NOT....\n\n novelExon = Exon(\n chromosome = novel_exon.split(\":\")[0],\n source=\"TECtool_annotated\",\n feature=\"exon\",\n start=gtf_exon.start,\n end=gtf_exon.end,\n score=\".\",\n strand=\"-\",\n frame=gtf_exon.frame,\n gene_id=self.gene_id,\n transcript_id=novel_transcript_id,\n exon_number=str(exon_count),\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype='novel_splicing',\n exon_id=\"novel_\"+exon_id+\"_\"+novel_transcript_id\n )\n\n\n novelExon.CDS = gtf_exon.CDS\n novelExon.start_codon = gtf_exon.start_codon\n novelExon.stop_codon = gtf_exon.stop_codon\n\n novelTranscript.novel_exons.append(novelExon)\n\n exon_count += 1\n\n novelExon = Exon(\n chromosome = novel_exon.split(\":\")[0],\n source=\"TECtool_annotated\",\n feature=\"exon\",\n start=str(novel_exon.split(\":\")[1]),\n end=str(novel_exon.split(\":\")[2]),\n score=\".\",\n strand=\"-\",\n frame=None,\n gene_id=self.gene_id,\n transcript_id=novel_transcript_id,\n exon_number=str(exon_count),\n gene_name=annotation.transcripts[transcript].gene_name,\n gene_biotype=annotation.transcripts[transcript].gene_biotype,\n transcript_name=novel_transcript_id,\n transcript_biotype='novel_splicing', # novel_splicing_exon_last\n exon_id=\"novel_terminal_exon_\"+novel_transcript_id\n )\n\n novelExon.CDS = None\n novelExon.start_codon = None\n novelExon.stop_codon = None\n\n novelTranscript.novel_exons.append(novelExon)\n\n else:\n stderr.write(\"[ERROR] Problem with strand info.\")\n sys.exit(-1)", "def camelyon16xml2json(inxml, level):\n root = ET.parse(inxml).getroot()\n annotations_tumor = \\\n root.findall('./Annotations/Annotation[@PartOfGroup=\"Tumor\"]')\n annotations_0 = \\\n root.findall('./Annotations/Annotation[@PartOfGroup=\"_0\"]')\n annotations_1 = \\\n root.findall('./Annotations/Annotation[@PartOfGroup=\"_1\"]')\n annotations_2 = \\\n root.findall('./Annotations/Annotation[@PartOfGroup=\"_2\"]')\n annotations_positive = \\\n annotations_tumor + annotations_0 + annotations_1\n annotations_negative = annotations_2\n\n json_dict = {}\n json_dict['positive'] = []\n json_dict['negative'] = []\n\n for annotation in annotations_positive:\n \n X = list(map(lambda x: float(x.get('X')),\n annotation.findall('./Coordinates/Coordinate')))\n# print(X)\n X = [i/pow(2,level) for i in X]\n Y = list(map(lambda x: float(x.get('Y')),\n annotation.findall('./Coordinates/Coordinate')))\n Y = [i/pow(2,level) for i in Y]\n vertices = np.round([X, Y]).astype(int).transpose().tolist()\n name = annotation.attrib['Name']\n json_dict['positive'].append({'name': name, 'vertices': vertices})\n\n for annotation in annotations_negative:\n X = list(map(lambda x: float(x.get('X')),\n annotation.findall('./Coordinates/Coordinate')))\n X = [i/pow(2,level) for i in X]\n Y = list(map(lambda x: float(x.get('Y')),\n annotation.findall('./Coordinates/Coordinate')))\n Y = [i/pow(2,level) for i in Y]\n vertices = np.round([X, Y]).astype(int).transpose().tolist()\n name = annotation.attrib['Name']\n json_dict['negative'].append({'name': name, 'vertices': vertices})\n\n return json_dict", "def dotheglm(sensitivities,\n eventdir,\n annot_dir):\n # normalize the sensitivities\n from sklearn.preprocessing import normalize\n import copy\n #default for normalization is the L2 norm\n sensitivities_to_normalize = copy.deepcopy(sensitivities)\n for i in range(len(sensitivities)):\n sensitivities_to_normalize[i].samples = normalize(sensitivities_to_normalize[i].samples, axis = 1)\n\n sensitivities_stacked = mv.vstack(sensitivities_to_normalize)\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.targets)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.targets)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # get a list of the event files with occurances of faces\n event_files = sorted(glob(eventdir + '/*'))\n assert len(event_files) == 8\n # get additional events from the location annotation\n location_annotation = pd.read_csv(annot_dir, sep='\\t')\n\n # get all settings with more than one occurrence\n setting = [set for set in location_annotation.setting.unique()\n if (location_annotation.setting[location_annotation.setting == set].value_counts()[0] > 1)]\n\n # get onsets and durations\n onset = []\n duration = []\n condition = []\n for set in setting:\n for i in range(location_annotation.setting[location_annotation['setting'] == set].value_counts()[0]):\n onset.append(location_annotation[location_annotation['setting'] == set]['onset'].values[i])\n duration.append(location_annotation[location_annotation['setting'] == set]['duration'].values[i])\n condition.append([set] * (i + 1))\n # flatten conditions\n condition = [y for x in condition for y in x]\n assert len(condition) == len(onset) == len(duration)\n\n # concatenate the strings\n condition_str = [set.replace(' ', '_') for set in condition]\n condition_str = ['location_' + set for set in condition_str]\n\n # put it in a dataframe\n locations = pd.DataFrame({\n 'onset': onset,\n 'duration': duration,\n 'condition': condition_str\n })\n\n # sort according to onsets to be paranoid\n locations_sorted = locations.sort_values(by='onset')\n\n # this is a dataframe encoding flow of time\n time_forward = pd.DataFrame([{\n 'condition': 'time+',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0}\n for i in range(len(location_annotation) - 1)\n if location_annotation['flow_of_time'][i] in ['+', '++']])\n\n time_back = pd.DataFrame([{\n 'condition': 'time-',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0} for i in range(len(location_annotation) - 1)\n if location_annotation['flow_of_time'][i] in ['-', '--']])\n\n # sort according to onsets to be paranoid\n time_forward_sorted = time_forward.sort_values(by='onset')\n time_back_sorted = time_back.sort_values(by='onset')\n\n scene_change = pd.DataFrame([{\n 'condition': 'scene-change',\n 'onset': location_annotation['onset'][i],\n 'duration': 1.0}\n for i in range(len(location_annotation) - 1)])\n\n scene_change_sorted = scene_change.sort_values(by='onset')\n\n # this is a dataframe encoding exterior\n exterior = pd.DataFrame([{\n 'condition': 'exterior',\n 'onset': location_annotation['onset'][i],\n 'duration': location_annotation['duration'][i]}\n for i in range(len(location_annotation) - 1)\n if (location_annotation['int_or_ext'][i] == 'ext')])\n\n # sort according to onsets to be paranoid\n exterior_sorted = exterior.sort_values(by='onset')\n\n # this is a dataframe encoding nighttime\n night = pd.DataFrame([{'condition': 'night',\n 'onset': location_annotation['onset'][i],\n 'duration': location_annotation['duration'][i]}\n for i in range(len(location_annotation) - 1)\n if (location_annotation['time_of_day'][i] == 'night')])\n\n # sort according to onsets to be paranoid\n night_sorted = night.sort_values(by='onset')\n\n assert np.all(locations_sorted.onset[1:].values >= locations_sorted.onset[:-1].values)\n assert np.all(time_back_sorted.onset[1:].values >= time_back_sorted.onset[:-1].values)\n assert np.all(time_forward_sorted.onset[1:].values >= time_forward_sorted.onset[:-1].values)\n assert np.all(exterior_sorted.onset[1:].values >= exterior_sorted.onset[:-1].values)\n assert np.all(night_sorted.onset[1:].values >= night_sorted.onset[:-1].values)\n assert np.all(scene_change_sorted.onset[1:].values >= scene_change_sorted.onset[:-1].values)\n\n # check whether chunks are increasing as well as sanity check\n chunks = mean_sens_transposed.sa.chunks\n assert np.all(chunks[1:] >= chunks[:-1])\n\n # TR was not preserved/carried through in .a\n # so we will guestimate it based on the values of time_coords\n tc = mean_sens_transposed.sa.time_coords\n TRdirty = sorted(np.unique(tc[1:] - tc[:-1]))[-1]\n assert np.abs(np.round(TRdirty, decimals=2) - TRdirty) < 0.0001\n\n # make time coordinates real seconds\n mean_sens_transposed.sa.time_coords = np.arange(len(mean_sens_transposed)) * TRdirty\n\n # get runs, and runlengths in seconds\n runs = sorted(mean_sens_transposed.UC)\n assert runs == range(len(runs))\n runlengths = [np.max(tc[mean_sens_transposed.sa.chunks == run]) + TRdirty\n for run in runs]\n runonsets = [sum(runlengths[:run]) for run in runs]\n assert len(runs) == 8\n\n # initialize the list of dicts that gets later passed to the glm\n events_dicts = []\n # This is relevant to later stack all dataframes together\n # and paranoidly make sure that they have the same columns\n cols = ['onset', 'duration', 'condition']\n\n for run in runs:\n # get face data\n eventfile = sorted(event_files)[run]\n events = pd.read_csv(eventfile, sep='\\t')\n\n for index, row in events.iterrows():\n\n # disregard no faces, put everything else into event structure\n if row['condition'] != 'no_face':\n dic = {\n 'onset': row['onset'] + runonsets[run],\n 'duration': row['duration'],\n 'condition': row['condition']\n }\n events_dicts.append(dic)\n\n # concatenate all event dataframes\n run_reg = pd.DataFrame([{\n 'onset': runonsets[i],\n 'duration': abs(runonsets[i] - runonsets[i + 1]),\n 'condition': 'run-' + str(i + 1)}\n for i in range(7)])\n\n # get all of these wonderful dataframes into a list and squish them\n dfs = [locations_sorted[cols], scene_change_sorted[cols],\n time_back_sorted[cols], time_forward_sorted,\n exterior_sorted[cols], night_sorted[cols], run_reg[cols]]\n allevents = pd.concat(dfs)\n\n # save all non-face related events in an event file, just for the sake of it\n allevents.to_csv(results_dir + '/' + 'non_face_regs.tsv', sep='\\t', index=False)\n\n # append non-faceevents to event structure for glm\n for index, row in allevents.iterrows():\n dic = {\n 'onset': row['onset'],\n 'duration': row['duration'],\n 'condition': row['condition']\n }\n events_dicts.append(dic)\n\n # save this event dicts structure as a tsv file\n import csv\n with open(results_dir + '/' + 'full_event_file.tsv', 'w') as tsvfile:\n fieldnames = ['onset', 'duration', 'condition']\n writer = csv.DictWriter(tsvfile, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n writer.writerows(events_dicts)\n # save this event file also as json file... can there ever be enough different files...\n import json\n with open(results_dir + '/' + 'allevents.json', 'w') as f:\n json.dump(events_dicts, f)\n\n # do the glm - we've earned it\n hrf_estimates = mv.fit_event_hrf_model(mean_sens_transposed,\n events_dicts,\n time_attr='time_coords',\n condition_attr='condition',\n design_kwargs=dict(drift_model='blank'),\n glmfit_kwargs=dict(model='ols'),\n return_model=True)\n\n mv.h5save(results_dir + '/' + 'sens_glm_avmovie_results.hdf5', hrf_estimates)\n print('calculated the, saving results.')\n\n return hrf_estimates", "def build_exon(exon_info, build=\"37\"):\n try:\n ensembl_exon_id = exon_info[\"ens_exon_id\"]\n except KeyError:\n raise KeyError(\"Exons has to have a ensembl_exon_id\")\n\n try:\n chrom = str(exon_info[\"chrom\"])\n except KeyError:\n raise KeyError(\"Exons has to have a chromosome\")\n\n try:\n start = int(exon_info[\"start\"])\n except KeyError:\n raise KeyError(\"Exon has to have a start\")\n except TypeError:\n raise TypeError(\"Exon start has to be integer\")\n\n try:\n end = int(exon_info[\"end\"])\n except KeyError:\n raise KeyError(\"Exon has to have a end\")\n except TypeError:\n raise TypeError(\"Exon end has to be integer\")\n\n try:\n rank = int(exon_info[\"rank\"])\n except KeyError:\n raise KeyError(\"Exon has to have a rank\")\n except TypeError:\n raise TypeError(\"Exon rank has to be integer\")\n\n try:\n strand = int(exon_info[\"strand\"])\n except KeyError:\n raise KeyError(\"Exon has to have a strand\")\n except TypeError:\n raise TypeError(\"Exon strand has to be integer\")\n\n try:\n exon_id = exon_info[\"exon_id\"]\n except KeyError:\n raise KeyError(\"Exons has to have a id\")\n\n try:\n transcript = exon_info[\"transcript\"]\n except KeyError:\n raise KeyError(\"Exons has to have a transcript\")\n\n try:\n hgnc_id = int(exon_info[\"hgnc_id\"])\n except KeyError:\n raise KeyError(\"Exons has to have a hgnc_id\")\n except TypeError:\n raise TypeError(\"hgnc_id has to be integer\")\n\n exon_obj = Exon(\n exon_id=exon_id,\n chrom=chrom,\n start=start,\n end=end,\n strand=strand,\n rank=rank,\n transcript=transcript,\n hgnc_id=hgnc_id,\n build=build,\n )\n\n return exon_obj", "def convert_to_annolid_format(frame_number,\n masks,\n frame=None,\n model=None,\n min_mask_area=float('-inf'),\n max_mask_area=float('inf'),\n existing_masks=None\n ):\n pred_rows = []\n for mask in masks:\n mask_area = mask.get(\"area\", 0)\n if min_mask_area <= mask_area <= max_mask_area:\n x1 = mask.get(\"bbox\")[0]\n y1 = mask.get(\"bbox\")[1]\n x2 = mask.get(\"bbox\")[0] + mask.get(\"bbox\")[2]\n y2 = mask.get(\"bbox\")[1] + mask.get(\"bbox\")[3]\n score = mask.get(\"predicted_iou\", '')\n segmentation = mask.get(\"segmentation\", '')\n mask_features = get_mask_features(frame, segmentation, model)\n mask_id = generate_mask_id(mask_features, existing_masks)\n instance_name = mask.get(\"instance_name\", f'instance_{mask_id}')\n segmentation = mask_util.encode(segmentation)\n tracking_id = mask.get(\"tracking_id\", \"\")\n\n pred_rows.append({\n \"frame_number\": frame_number,\n \"x1\": x1,\n \"y1\": y1,\n \"x2\": x2,\n \"y2\": y2,\n \"instance_name\": instance_name,\n \"class_score\": score,\n \"segmentation\": segmentation,\n \"tracking_id\": tracking_id\n })\n\n return pred_rows", "def read_intron_pos3(file_intron_exon):\n \n all_intron_poss = []\n mRNA2exons = collections.defaultdict(list) \n for l in open(file_intron_exon):\n #AT1G01010.1_exon1\n #AT1G01010.1_intron1\n d = l.rstrip(\"\\n\").split(\"\\t\")\n exon_or_intron_name = d[3]\n chr_name = d[0]\n start, end = int(d[1])+1, int(d[2])\n strand = d[5]\n mRNA, exon_or_intron_id = exon_or_intron_name.split(\"_\")\n if exon_or_intron_id.startswith(\"i\"):\n all_intron_poss.append((exon_or_intron_name, mRNA, start, end))\n else:\n mRNA2exons[mRNA].append((chr_name, start, end, strand))\n mRNA_pos = {}\n for mRNA, mRNA_exons in mRNA2exons.items():\n total_exon_num = len(mRNA_exons)\n chr_name = mRNA_exons[0][0]\n strand = mRNA_exons[0][3]\n mRNA_start = min([i[1] for i in mRNA_exons])\n mRNA_end = max([i[2] for i in mRNA_exons])\n mRNA_pos[mRNA] = [chr_name, mRNA_start, mRNA_end, strand]\n intron_rel_poss = collections.defaultdict(list)\n for intron_id, mRNA, intron_start, intron_end in all_intron_poss:\n chr_name, mRNA_start, mRNA_end, strand = mRNA_pos[mRNA]\n if strand == \"+\":\n pos3 = int(intron_end) - mRNA_end\n else:\n pos3 = mRNA_start - int(intron_start)\n intron_rel_poss[mRNA].append([pos3, intron_id])\n for mRNA, d in intron_rel_poss.items():\n d.sort(key=lambda x: x[0])\n return intron_rel_poss", "def withinExons(self, type, exStart, exEnd, detailed=0):\n\n genes = {}\n exons = {}\n\n (exChromosome, exStartCoord, exStrand) = exStart.split(':')\n exEndCoord = exEnd.split(':')[1]\n\n # exStart, exEnd are biological (not start<end), but we need them start<end --> swap them if exStrand=='-'\n if exStrand == '-':\n exStartCoord, exEndCoord = exEndCoord, exStartCoord\n \n self.__inform(\"withinExons: analyzing %s, %s, %s, %s\\n\" % (exChromosome, exStrand, exStartCoord, exEndCoord),7)\n \n if self.exonInfo.has_key(type) and \\\n self.exonInfo[type].has_key(exChromosome) and \\\n self.exonInfo[type][exChromosome].has_key(exStrand):\n annStarts = self.exonInfo[type][exChromosome][exStrand].keys()\n for annStartCoord in annStarts:\n annEnds = self.exonInfo[type][exChromosome][exStrand][annStartCoord].keys()\n for annEndCoord in annEnds:\n self.__inform(\"withinExons: chr and strand found. now checking annotations\",7)\n if self.__within(exStartCoord, exEndCoord, annStartCoord, annEndCoord):\n if genes.has_key(self.exonInfo[type][exChromosome][exStrand][annStartCoord][annEndCoord]):\n genes[self.exonInfo[type][exChromosome][exStrand][annStartCoord][annEndCoord][0]] += 1\n else:\n genes[self.exonInfo[type][exChromosome][exStrand][annStartCoord][annEndCoord][0]] = 1\n if detailed == 0:\n exons[self.exonInfo[type][exChromosome][exStrand]\\\n [annStartCoord][annEndCoord][1]] = self.exonInfo[type][exChromosome][exStrand][annStartCoord][annEndCoord][0]\n elif detailed == 1:\n exons[self.exonInfo[type][exChromosome][exStrand]\\\n [annStartCoord][annEndCoord][1]] = (self.exonInfo[type][exChromosome][exStrand][annStartCoord][annEndCoord][0],annStartCoord,annEndCoord)\n return genes.keys(), exons" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current application id.
def getApplicationId(): return app_identity.get_application_id()
[ "def app_id(self):\n return self._app_id or self._modules['default'].data['application']", "def id(cls) -> Global:\n return Global.current_application_id()", "def get_app_id():\n return yaml.safe_load(open(APP_DIR + '/app.yaml'))['application']", "def applicationId():\n try:\n import cspark\n except ImportError as e:\n sys.path.append( os.path.dirname( __file__ ))\n import cspark\n\n if not cspark.spark_running():\n return f\"NoSpark-or-local{os.getpid()}\"\n\n try:\n return applicationIdFromEnvironment()\n except KeyError:\n pass\n\n # Perhaps we are running on the head-end. If so, run a Spark job that finds it.\n try:\n from pyspark import SparkConf, SparkContext\n sc = SparkContext.getOrCreate()\n if \"local\" in sc.getConf().get(\"spark.master\"):\n return f\"local{os.getpid()}\"\n # Note: make sure that the following map does not require access to any existing module.\n appid = sc.parallelize([1]).map(lambda x: \"_\".join(['application'] + os.environ['CONTAINER_ID'].split(\"_\")[1:3])).collect()\n return appid[0]\n except ImportError:\n pass\n\n # Application ID cannot be determined.\n return f\"unknown{os.getpid()}\"", "def app_uuid(self):\n return self.__app_uuid", "def logic_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logic_app_id\")", "def app_bundle_id(self) -> str:\n return pulumi.get(self, \"app_bundle_id\")", "def app_d_id(self) -> str:\n return self._app_d_id", "def get_application_id(self, application_name):\n\n response = self.api.get_projects()\n APIHelper().check_for_response_errors(response)\n\n for application in response.data['data']:\n if application['name'] == application_name:\n return application['id']\n return None", "def get_app_id(\n functions_client: functions.FunctionsManagementClient,\n app_name: str, compartment_id: str) -> str:\n result = pagination.list_call_get_all_results(\n functions_client.list_applications,\n compartment_id\n )\n for app in result.data:\n if app_name == app.display_name:\n return app.id\n\n raise Exception(\"app not found\")", "def get_id(self):\n return self.data[self.system_idx][\"id\"]", "def onboarded_app_pkg_id(self) -> str:\n return self._onboarded_app_pkg_id", "def getAppVersion():\n return os.environ.get('CURRENT_VERSION_ID')", "def current_app(self):\n return self.app", "def androidId(self):\n if self._androidId:\n return self._androidId\n else:\n self._androidId = self.checkin()\n self.config[\"GPlay\"][\"AndroidId\"] = self._androidId\n self.config.write()\n return self._androidId", "def get_app_name(self):\n return self._APP_NAME", "def id(self) -> int:\n return self._context.id", "def appname(self, appId):\n return self.get_apps()[appId]['appName']", "def teams_app_id(self):\n return self.properties.get('teamsAppId', None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the applications email address.
def getApplicationEmail(name): app_id = getApplicationId() assert app_id return "%s@%s.appspotmail.com" % (name, app_id)
[ "def email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email_address\")", "def get_email(self):\r\n return self.email", "def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)", "def provider_email(self) -> str:\n return pulumi.get(self, \"provider_email\")", "def get_email():\n from six.moves.configparser import NoOptionError\n\n email = ''\n\n gitConfig = get_gitConfig()\n if gitConfig:\n try:\n email = gitConfig.get('user', 'email')\n except (KeyError, NoOptionError):\n pass\n\n # if not successful, we improvise\n if not email:\n import socket\n user = get_userName()\n host = socket.gethostname()\n email = \"{user}@{host}\".format(user=user, host=host)\n\n return email", "def master_account_email(self) -> str:\n return pulumi.get(self, \"master_account_email\")", "def mail_address(self):\n return self.project_name + self.base_mail_address", "def default_email_address():\n import socket\n import getpass\n hostname = socket.gethostname()\n username = getpass.getuser()\n return '%s@%s'%(username, hostname)", "def thread_email(self):\n return self._thread_email", "def client_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_email\")", "def get_default_email_address(self):\n return self.teams[\"Default\"].email", "def sender_email(self) -> str:\n return str(self._email.SenderEmailAddress)", "def email_receiver(self) -> str:\n return self._email_receiver", "def location_email_address(self):\n if \"locationEmailAddress\" in self._prop_dict:\n return self._prop_dict[\"locationEmailAddress\"]\n else:\n return None", "def email_sender(self) -> str:\n return self._email_sender", "def notification_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"notification_email\")", "def _get_user_email():\n try:\n user = oauth.get_current_user([\n \"https://www.googleapis.com/auth/userinfo.email\"])\n except (oauth.OAuthRequestError, oauth.OAuthServiceFailureError):\n user = None\n return user.email() if user else None", "def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")", "def get_application_default_account():\n return get_token_info(generate_default_session())['email']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the applications noreply email address.
def getApplicationNoReplyEmail(): return getApplicationEmail('no-reply')
[ "def getApplicationEmail(name):\n app_id = getApplicationId()\n assert app_id\n\n return \"%s@%s.appspotmail.com\" % (name, app_id)", "def email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email_address\")", "def email_reply_to_address(self) -> ConfigNodePropertyString:\n return self._email_reply_to_address", "def provider_email(self) -> str:\n return pulumi.get(self, \"provider_email\")", "def default_email_address():\n import socket\n import getpass\n hostname = socket.gethostname()\n username = getpass.getuser()\n return '%s@%s'%(username, hostname)", "def mail_address(self):\n return self.project_name + self.base_mail_address", "def client_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_email\")", "def GetCanonicalEmail(owner):\n # TODO: Remove this or update to use GAE app's default domain before\n # public release.\n if '@' not in owner:\n owner = '{user}@{domain}'.format(user=owner, domain=DEFAULT_DOMAIN)\n\n return owner", "def sender_email(self) -> str:\n return str(self._email.SenderEmailAddress)", "def get_email(self):\r\n return self.email", "def email_receiver(self) -> str:\n return self._email_receiver", "def notification_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"notification_email\")", "def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")", "def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)", "def get_default_email_address(self):\n return self.teams[\"Default\"].email", "def master_account_email(self) -> str:\n return pulumi.get(self, \"master_account_email\")", "def email_sender(self) -> str:\n return self._email_sender", "def get_email():\n from six.moves.configparser import NoOptionError\n\n email = ''\n\n gitConfig = get_gitConfig()\n if gitConfig:\n try:\n email = gitConfig.get('user', 'email')\n except (KeyError, NoOptionError):\n pass\n\n # if not successful, we improvise\n if not email:\n import socket\n user = get_userName()\n host = socket.gethostname()\n email = \"{user}@{host}\".format(user=user, host=host)\n\n return email", "def thread_email(self):\n return self._thread_email" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Google App Engine "version" of the running instance.
def getAppVersion(): return os.environ.get('CURRENT_VERSION_ID')
[ "def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]", "def get_version() -> str:\n return VERSION", "def version(self):\n\n return self.__entity[\"version\"]", "def do_version():\n v = ApiPool.ping.model.Version(\n name=ApiPool().current_server_name,\n version=ApiPool().current_server_api.get_version(),\n container=get_container_version(),\n )\n log.info(\"/version: \" + pprint.pformat(v))\n return v", "def version(self):\n return self._version or self.env.version # pylint: disable=E1101", "def version(self):\n output = gdb.execute('show version', to_string=True)\n try:\n version = output.split('\\n')[0]\n except:\n version = None\n return version", "def api_version(self):\n return self.apiinfo.version()", "def version(self):\n return self.metadata.version", "def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version", "def engine_version(self) -> typing.Optional[str]:\n return self._values.get('engine_version')", "def api_version(self) -> int:\n from babase._meta import CURRENT_API_VERSION\n\n return CURRENT_API_VERSION", "def version():\n click.echo(pkg_resources.get_distribution(\"cloudshell-autodiscovery\").version)", "def show_version():\n response = \"App Version: \" + str(version)\n return response", "def version(self):\n try:\n print((\"Reading version from\", self.version_file))\n with open(self.version_file) as fh:\n version = fh.read().strip()\n except FileNotFoundError:\n self.log.debug(f\"No version file found at {self.version_file}\")\n return \"0.0.0\"\n return version", "def server_version(self):\n ret = getattr(self, \"_SERVER_VERSION\", \"\")\n return ret", "def version(self):\n return get_product_version(self)", "def version(self):\n return self.get(\"active\", \"\", \"rev\")", "def version(self):\n return self._root.get(\"platformBuildVersionName\", \"\")", "def get_kubernetes_version(self) -> str:\n return self._get_kubernetes_version()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Melange part of the GAE version.
def getMelangeVersion(): return getAppVersion().split('.', 1)[0]
[ "def full_version(self):\n return '%d.%d%s @ %d' % (self._version_major, self._version_minor,\n self._beta, self._id)", "def get_version() -> str:\n return VERSION", "def getAtmVersion(self):\n _getAtmVersion_result = _str_dc(self._swigobj.getAtmVersion())\n return _getAtmVersion_result", "def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]", "def get_required_webots_version():\n return 'R2021a'", "def getAndVersion():\n return getEups().findSetupVersion(\"astrometry_net_data\")[0]", "def version(self):\n return [self.get_control('Version'), '']", "def get_version(self) -> GoProResp:", "def latest_version(self) -> str | None:\n return self._addon_data[ATTR_VERSION_LATEST]", "def ds9Version():\n try:\n v = xpa.get(None, getXpaAccessPoint(), \"about\", \"\").strip()\n return v.splitlines()[1].split()[1]\n except Exception, e:\n print >> sys.stderr, \"Error reading version: %s (%s)\" % (v, e)\n return \"0.0.0\"", "def version(self):\r\n self._is_agol = self._portal.is_arcgisonline\r\n self._product_version = [int(i) for i in self._portal.get_version().split('.')]\r\n return self._product_version", "def get_required_webots_version_short():\n return make_short_version(get_required_webots_version())", "def getAppVersion():\n return os.environ.get('CURRENT_VERSION_ID')", "def getBurpVersion(self):\n # type: () -> str", "def getHopperMinorVersion():\n return HopperLowLevel.getMinorVersion()", "def _get_version_string() -> str:\n return \" GDM Version: {}. Registered extension packages: {}.\".format(\n _version.version, extensions.get_registered_package_info())", "def get_version(v=version.__version__):\n ver = v.split(\" \")\n vtag = ver[1]\n vnum = [int(i) for i in ver[0].split('.')]\n return(vnum, vtag)", "def api_version(self):\n return self.apiinfo.version()", "def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if Melange application is running in "debug mode". "Debug mode" is currently enabled if running locally or if the current Melange version is 'devvin'.
def isDebug(): return isLocal() or getMelangeVersion() == 'devvin'
[ "def debuggable(self):\n return self._root.find(\"application\").get(\n \"debuggable\", \"false\") == \"true\"", "def is_debug(self): # type: () -> bool\n return self._verbosity == DEBUG", "def debug_enable(self) -> bool:\n return pulumi.get(self, \"debug_enable\")", "def debug_build(self) -> bool:\n assert isinstance(self._env['debug_build'], bool)\n return self._env['debug_build']", "def get_debugging():\n global DEBUGGING\n return DEBUGGING == True", "def _debug_logging_enabled():\n return os.getenv(\"DEBUG_LOGGING_ENABLED\", \"false\").lower() == \"true\"", "def is_debugging_enabled(self, section: str) -> bool:\n return self.get_boolean_or_default(section, 'debug', False)", "def get_debug_mode(self):\n return self._debug_mode", "def debug(self):\n debug = 'debug' in self.httprequest.args\n if debug and self.httprequest.args.get('debug') == 'assets':\n debug = 'assets'\n\n # check if request from rpc in debug mode\n if not debug:\n debug = self.httprequest.environ.get('HTTP_X_DEBUG_MODE')\n\n if not debug and self.httprequest.referrer:\n debug = 'debug' in urls.url_parse(self.httprequest.referrer).decode_query()\n return debug", "def getDebugging() -> bool:\n return Deferred.debug", "def is_development():\n name = os.environ.get('SERVER_NAME', '')\n return (\n os.environ.get('SERVER_SOFTWARE', '').startswith('Development')\n or name.startswith('dev-')\n or name.startswith('test')\n or name.startswith('master')\n )", "def is_debugging(self):\n\n if(self.debug_style == \"NA_PRINT_DONT_PARSE\"):\n return 1\n\n else :\n return 0", "def is_dev():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')", "def _is_debug(self, ext):\n if hasattr(dj_settings, 'STATICLINK_DEBUG'):\n return dj_settings.STATICLINK_DEBUG.get(ext, dj_settings.DEBUG)\n return False", "def pxe_debug_enabled(self):\n ret = self._get_attr(\"PXEDebugEnabled\")\n return ret", "def is_available(self) -> bool:\n\n return bool(self.debugger_address)", "def is_dev():\n\treturn os.environ['SERVER_SOFTWARE'].startswith('Dev')", "def is_dev_env() -> bool:\n if os.getenv(\"APP_ENV\") == \"dev\":\n return True\n return False", "def is_local_dev_server():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports CSharpExtension into global context.
def add_csharpml_extension(): AddReference("CSharPyMLExtension")
[ "def when_extension_has_loaded(context):\n uitests.vscode.extension.activate_python_extension(context)", "def given_extension_has_loaded(context):\n uitests.vscode.extension.activate_python_extension(context)", "def test_import_extension(test_module):\n import_path = 'fake_extension:FakeExtension'\n extension = sphinx._import_extension(import_path)\n assert issubclass(extension, Extension)", "def setup_extension_modules(self):\n\n try:\n numpy_include = numpy.get_include()\n\n except AttributeError:\n numpy_include = numpy.get_numpy_include()\n\n # Add the NumPy include directory to the include directories\n # list for each type of compiler\n for cc in self.include_directories.keys():\n self.include_directories[cc].append(numpy_include)\n\n # The main openmoc extension (defaults are gcc and single precision)\n self.extensions.append(\n Extension(name = '_openrk',\n sources = copy.deepcopy(self.sources[self.cc]),\n library_dirs = self.library_directories[self.cc],\n libraries = self.shared_libraries[self.cc],\n extra_link_args = self.linker_flags[self.cc],\n include_dirs = self.include_directories[self.cc],\n swig_opts = self.swig_flags + ['-D' + self.cc.upper()]))", "def _setup_import_hook(cls, extensions: List[Extension]):\n if len(extensions) == 0:\n return\n\n existing = [h for h in sys.meta_path if isinstance(h, _ImportLoadExtInterceptor)]\n if len(existing) > 0:\n hook = existing[0]\n hook.module_to_extension.update({req: e for e in extensions for req in e.reqs})\n else:\n hook = _ImportLoadExtInterceptor(\n module_to_extension={req: e for e in extensions for req in e.reqs}\n )\n sys.meta_path.insert(0, hook)", "def _importCode(code, name, add_to_sys_modules=0):\n import imp\n module = imp.new_module(name)\n \n if add_to_sys_modules:\n sys.modules[name] = module\n \n exec code in module.__dict__\n return module", "def configure_extension(name: str, path: str):\n if isinstance(path, str):\n path = [path]\n return Extension(\n name,\n path,\n language='c++',\n include_dirs=INCLUDE_DIRS,\n libraries=LIBS,\n library_dirs=LIB_DIRS,\n extra_compile_args=COMPILE_ARGS,\n )", "async def load(self, ctx, ext):\n ext_folder = \"extensions\"\n ext_dir = os.path.join(os.path.dirname(__file__), \"..\", ext_folder)\n ext_files = [name for _, name, _ in pkgutil.iter_modules([ext_dir])]\n if ext not in ext_files:\n await ctx.error(f\"{ext} extension not found.\")\n return\n\n ext_name = f\"firetail.extensions.{ext}\"\n was_loaded = ext_name in ctx.bot.extensions\n\n try:\n if was_loaded:\n ctx.bot.reload_extension(ext_name)\n await ctx.success(f'{ext} extension reloaded.')\n else:\n ctx.bot.load_extension(ext_name)\n await ctx.success(f'{ext} extension loaded.')\n except commands.ExtensionFailed as e:\n original_traceback = \"\\n\".join(traceback.format_tb(e.original.__traceback__))\n await ctx.codeblock(original_traceback, title=f\"Exception on loading {ext}\")", "def _patch_import_cli():\n try:\n return importlib.import_module(\"music_bot.cli\")\n except ModuleNotFoundError:\n pass\n\n from pathlib import Path\n sys.path.append(str(Path(__file__).parent.parent))\n\n return importlib.import_module(\"music_bot.cli\")", "def init(context, thing=\"(default)\", **rest):\n if rest:\n raise ExtensionError(\"exthello only supports the 'thing' parameter\")\n\n # We store the configuration globally in this module, which works well\n # enough for most use-cases.\n global the_thing\n the_thing = thing", "def cytonize_extensions():\n extensions = []\n for name, path in CYTHON_SCRITPS:\n extensions.append(configure_extension(name, path))\n return cythonize(extensions, show_all_warnings=True)", "def test_module(modules_tmpdir, test_app):\n fake_extension = modules_tmpdir.join('fake_extension.py')\n fake_extension.write('\\n'.join((\n 'from henson import Extension',\n 'class FakeExtension(Extension):',\n ' def register_cli(self): pass',\n )))", "def load_extension(self, name):\n fullname = 'extensions.%s' % name\n try:\n if HAS_IMPORTLIB:\n mod = importlib.import_module('.' + fullname, package=__package__)\n else:\n mod = __import__(fullname, globals(), locals(), [''], 1)\n except Exception as err:\n import traceback\n traceback.print_exc()\n mod = None\n return mod", "def test_module_add():\n os.chdir(test_solution_dir)\n cli = __import__(\"iotedgedev.cli\", fromlist=['main'])\n runner = CliRunner()\n\n add_module_and_verify(cli.main, runner, \"csharp\")\n # add_module_and_verify(cli.main, runner, \"nodejs\")\n add_module_and_verify(cli.main, runner, \"python\")\n add_module_and_verify(cli.main, runner, \"csharpfunction\")", "def init():\n if _importer not in sys.meta_path:\n sys.meta_path.insert(0, _importer)", "def load_helper(cg, name, from_globals=False):\n if from_globals:\n cg.load_global(COMPILER_HELPERS)\n else:\n cg.load_fast(C_HELPERS)\n cg.load_const(name)\n cg.binary_subscr()", "def test_load_ext_from_entrypoint():\n nlp = spacy.load(\"en_core_web_sm\")\n nlp.add_pipe(\"conll_formatter\", last=True)", "def add_core(self):\n # Add externs and closure dependencies.\n source_base = shakaBuildHelpers.get_source_base()\n match = re.compile(r'.*\\.js$')\n self.include |= set(\n shakaBuildHelpers.get_all_files(\n os.path.join(source_base, 'externs'), match) +\n shakaBuildHelpers.get_all_files(\n os.path.join(source_base, 'third_party', 'closure'), match))\n\n # Check that there are no files in 'core' that are removed\n core_build = Build()\n core_build.parse_build(['+@core'], os.getcwd())\n core_files = core_build.include\n if self.exclude & core_files:\n logging.error('Cannot exclude files from core')\n self.include |= core_files", "def RegisterFileExtensions(defPyIcon, defPycIcon, runCommand):\n # Register the file extensions.\n pythonFileId = RegistryIDPyFile\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT, \".py\", win32con.REG_SZ, pythonFileId\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT, pythonFileId, win32con.REG_SZ, \"Python File\"\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n \"%s\\\\CLSID\" % pythonFileId,\n win32con.REG_SZ,\n CLSIDPyFile,\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n \"%s\\\\DefaultIcon\" % pythonFileId,\n win32con.REG_SZ,\n defPyIcon,\n )\n base = \"%s\\\\Shell\" % RegistryIDPyFile\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT, base + \"\\\\Open\", win32con.REG_SZ, \"Run\"\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n base + \"\\\\Open\\\\Command\",\n win32con.REG_SZ,\n runCommand,\n )\n\n # Register the .PYC.\n pythonFileId = RegistryIDPycFile\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT, \".pyc\", win32con.REG_SZ, pythonFileId\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n pythonFileId,\n win32con.REG_SZ,\n \"Compiled Python File\",\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n \"%s\\\\DefaultIcon\" % pythonFileId,\n win32con.REG_SZ,\n defPycIcon,\n )\n base = \"%s\\\\Shell\" % pythonFileId\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT, base + \"\\\\Open\", win32con.REG_SZ, \"Run\"\n )\n win32api.RegSetValue(\n win32con.HKEY_CLASSES_ROOT,\n base + \"\\\\Open\\\\Command\",\n win32con.REG_SZ,\n runCommand,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A test to ensure changing the message format is prohibitively annoying
def test_verify_message_format(self): def message_assert(message): fields = [('publisher_id', 'publisher_id'), ('event_type', 'event_type'), ('priority', 'WARN'), ('payload', dict(a=3))] for k, v in fields: self.assertEqual(message[k], v) self.assertTrue(len(message['message_id']) > 0) self.assertTrue(len(message['timestamp']) > 0) self.stubs.Set(nova.notifier.no_op_notifier, 'notify', message_assert) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
[ "def test_reformat_email_4(self):\n email = 'test@examplecom'\n self.assertEqual(self.cmd.reformat_email(email), 'Not available')", "def test_reformat_email_1(self):\n email = ''\n self.assertEqual(self.cmd.reformat_email(email), 'Not available')", "def test_reformat_email_2(self):\n email = 'test@example.com'\n self.assertEqual(self.cmd.reformat_email(email), 'test@example.com')", "def test_format_validations(self):\r\n # test posting a person with a badly formatted email field\r\n person = dict(name='Jeffrey', email='bogus!!!email', age=1)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 400\r\n data = loads(response.data)\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'email address' in errors['email'].lower()\r\n\r\n # posting a new person with valid email format should be fine\r\n person = dict(name='John', email='foo@example.com', age=1)\r\n response = self.app.post('/api/test', data=dumps(person))\r\n assert response.status_code == 201\r\n personid = loads(response.data)['id']\r\n\r\n # test patching a person to with badly formatted data\r\n person = dict(name='Jeffrey', email='bogus!!!email', age=24)\r\n response = self.app.patch('/api/test/' + str(personid),\r\n data=dumps(person))\r\n assert 'validation_errors' in data\r\n errors = data['validation_errors']\r\n assert 'email' in errors\r\n assert 'email address' in errors['email'].lower()\r\n\r\n # patching a person with correctly formatted fields should be fine\r\n person = dict(email='foo@example.com')\r\n response = self.app.patch('/api/test/' + str(personid),\r\n data=dumps(person))\r\n data = loads(response.data)\r\n if 'validation_errors' in data and \\\r\n 'email' in data['validation_errors']:\r\n assert 'email address' not in errors['email'].lower()", "def test_encoding_unknown_performative():\n msg = HttpMessage(\n performative=HttpMessage.Performative.REQUEST,\n method=\"some_method\",\n url=\"url\",\n version=\"some_version\",\n headers=\"some_headers\",\n body=b\"some_body\",\n )\n\n with pytest.raises(ValueError, match=\"Performative not valid:\"):\n with mock.patch.object(HttpMessage.Performative, \"__eq__\", return_value=False):\n HttpMessage.serializer.encode(msg)", "def test_msg_from_string(self):\n self.json_message[\"msg_to\"] = [constants.NON_SPECIFIC_INTERNAL_USER]\n self.json_message[\"msg_from\"] = \"01b51fcc-ed43-4cdb-ad1c-450f9986859b\"\n with self.app.app_context():\n g.user = User(self.json_message[\"msg_from\"], \"respondent\")\n try:\n MessageSchema().load(self.json_message)\n except ValidationError:\n self.fail(\"Schema should've been correct and not thrown an error\")", "def test_string_format():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\"Hello {}\".format(\"World!\"))\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_FORMAT_VIOLATION)))", "def test_valid_message():\n id_ = '12345'\n\n msg = Message({'@type': TEST_TYPE, '@id': id_})\n assert msg.type == TEST_TYPE\n assert msg.id == id_\n assert msg.doc_uri == 'test_type/'\n assert msg.protocol == 'protocol'\n assert msg.version == '1.0'\n assert msg.normalized_version == '1.0.0'\n assert msg.name == 'test'\n assert msg.version_info == Semver(1, 0, 0)", "def test_module_formatting(self):\n input_text = \"output text\"\n input_data = {\"data\": \"to show\"}\n output_text = formatting_module.output_format(input_text, input_data)\n self.failUnlessEqual(output_text['message'], input_text)\n self.failUnlessEqual(output_text['data'], \"to show\")", "def testFormat(self):\n seq_set = self.session.create_viral_seq_set()\n\n self.util.stringTypeTest(self, seq_set, \"format\")\n\n self.util.stringPropertyTest(self, seq_set, \"format\")", "def test_decoding_unknown_performative():\n msg = HttpMessage(\n performative=HttpMessage.Performative.REQUEST,\n method=\"some_method\",\n url=\"url\",\n version=\"some_version\",\n headers=\"some_headers\",\n body=b\"some_body\",\n )\n\n encoded_msg = HttpMessage.serializer.encode(msg)\n with pytest.raises(ValueError, match=\"Performative not valid:\"):\n with mock.patch.object(HttpMessage.Performative, \"__eq__\", return_value=False):\n HttpMessage.serializer.decode(encoded_msg)", "def testFormat(self):\n meta = self.session.create_metabolome()\n\n self.util.stringTypeTest(self, meta, \"format\")\n\n self.util.stringPropertyTest(self, meta, \"format\")", "def test_bad_message_no_type():\n with pytest.raises(InvalidMessage):\n Message({'test': 'test'})", "def test_excepts_if_empty_input(self):\n\t\tself.assertRaises(ValueError, self.string_manipulation.format)", "def test_format_typeerror(self):\n self.assertRaises(TypeError, when.format, 'a', '%a')", "def test_eformat(self):\n self.assertIs(self.exceptionForCode(EFORMAT), DNSFormatError)", "def test_bad_message_type(type_str):\n with pytest.raises(InvalidType):\n Message({'@type': type_str})", "def test_msg_to_string(self):\n self.json_message[\"msg_to\"] = [\"01b51fcc-ed43-4cdb-ad1c-450f9986859b\"]\n with self.app.app_context():\n g.user = User(self.json_message[\"msg_from\"], \"respondent\")\n try:\n MessageSchema().load(self.json_message)\n except ValidationError:\n self.fail(\"Schema should've been correct and not thrown an error\")", "def test_bad_message_no_type():\n with pytest.raises(ValueError):\n Message.parse_obj({\"test\": \"test\"})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether point correspondences are in front of both images
def _in_front_of_both_cameras(first_points, second_points, rot, trans): rot_inv = rot for first, second in zip(first_points, second_points): first_z = np.dot(rot[0, :] - second[0] * rot[2, :], trans) / np.dot( rot[0, :] - second[0] * rot[2, :], second) first_3d_point = np.array( [first[0] * first_z, second[0] * first_z, first_z]) second_3d_point = np.dot(rot.T, first_3d_point) - np.dot(rot.T, trans) if first_3d_point[2] < 0 or second_3d_point[2] < 0: return False return True
[ "def is_neighboured (point_1, point_2, size_of_system):\r\n p1 = np.array([point_1%size_of_system, point_1//size_of_system])\r\n p2 = np.array([point_2%size_of_system, point_2//size_of_system])\r\n diff = abs(p1 - p2)\r\n if (diff[0] + diff[1]) == 1:\r\n return True\r\n return False", "def equals(first_img, second_img): \n\n if first_img is None or second_img is None: \n return False \n diff = ImageChops.difference(first_img, second_img)\n if diff.getbbox() != None: \n return False\n else: \n diff = None\n return True", "def isPointOnSurface(*args, **kwargs):\n \n pass", "def showMatches(img1, img2, kp1xy, kp2xy):\n fig = plt.figure()\n s1 = img1.shape\n s2 = img2.shape\n print s1\n print s2\n step = 0\n plt.imshow(img1, extent=(0, s1[1], 0, s1[0]), origin='lower')\n bx = (s1[1]+step)#*0.7\n by = (s1[0]+step)#*0.7\n plt.imshow(img2, extent=(bx, bx+s2[1], by, by+s2[0]), origin='lower')\n a1 = kp1xy\n a2 = kp2xy.copy()\n a2[:,0] += by\n a2[:,1] += bx\n lines = [[p1[::-1], p2[::-1]] for p1, p2 in zip(a1, a2)]\n cmap = cm.prism\n cmap = cm.spring\n cs = cmap(n.linspace(0, 1, len(lines)))\n lc = mc.LineCollection(lines, linewidths=1, colors=cs)\n ax = fig.gca()\n ax.add_collection(lc)\n ax.set_xlim([0, s1[1]+step+s2[1]])\n ax.set_ylim([0, s1[0]+step+s2[0]])", "def points_intersect(points1, points2):", "def volume_does_overlap(self, other:\"StitchSrcVolume\") -> bool:\n return self.x0_global < other.x1_global and \\\n self.x1_global > other.x0_global and \\\n self.y0_global < other.y1_global and \\\n self.y1_global > other.y0_global and \\\n self.z0_global < other.z1_global and \\\n self.z1_global > other.z0_global", "def isToTheLeft(ref1, ref2, target) -> bool:\n\tcVec3D = rightHandRuleCrossProduct(ref1, ref2, target)\n\t# NOTE: In a standard coordinate system, negative indicates being to the right (right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule)\n\t# BUTT in our system (the TkInter Canvas), origin is the top left of the screen and y increases the lower a point is\n\treturn cVec3D.z() < 0", "def overlap_images(gtimage, predimage):\n\n\n gtimage=(numpy.array(gtimage)>127)*1\n predimage=(numpy.array(predimage)>127)*1\n\n intersec = numpy.bitwise_and(gtimage, predimage)\n intersec_val = float(numpy.sum(intersec))\n\n union = numpy.bitwise_or(gtimage, predimage)\n\n union_val = float(numpy.sum(union))\n\n if union_val == 0:\n return 0\n else:\n if float(intersec_val / union_val)>0.5:\n return 1\n else:\n return 0", "def intersects(self, other):\n if self.px + self.px_width < other.x:\n return False\n if other.x + other.width < self.px:\n return False\n if self.py + self.px_height < other.y:\n return False\n if other.y + other.height < self.py:\n return False\n return True", "def masking_pts(pcl):\n return pcl[:, 2] > 0", "def _is_left_neighbor(ax1, ax2):\n return _has_neighbor_distance(ax1, ax2, col_offset=1)", "def __is_at(self, figure):\n try:\n figure.transform_to(self.ground.frame)\n if self.ground.repr == \"cartesian\":\n return figure.x == self.ground.x and figure.y == self.ground.y and figure.z == self.ground.z\n return figure.lon == self.ground.lon and figure.lat == self.ground.lat\n except AttributeError:\n raise LocationError(\"The operation 'is at' needs a figure and a ground with coordinates\")", "def in_vision_range(self, other):\n if(self.withindist(other, self.sight_dist() + other.light_distance())):\n return True\n else:\n return False", "def is_intersecting(self, other):\n for i in range(2):\n if self.get_center_distance(other)[i] > self.attr_from_center[i] + other.attr_from_center[i]:\n return False\n elif self.get_center_distance(other)[i] <= other.attr_from_center[i]:\n return True", "def canSee(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return abs(self.getFacing(dx, dy)) < pi_1_2", "def dans_img(X,Y, x, y):\r\n return 0<=x<X and 0<=y<Y", "def consistent_with(self, other):\n for wcs1, wcs2 in zip(self.wcs, other.wcs):\n try:\n ra, dec = at.get_center_of_projection(wcs1)\n except TypeError: # if this returns None\n return False\n x, y = wcs1.invert(ra, dec)\n x2, y2 = wcs2.invert(ra, dec)\n dx = other.xoffset - self.xoffset\n dy = other.yoffset - self.yoffset\n distsq = dx * dx + dy * dy\n if distsq > 100 and (x-x2)**2 + (y-y2)**2 < 0.25 * distsq:\n return False\n return True", "def bboxes_intersect(points1, points2, dim=1):\n min1 = points1.min(dim)[0]\n max1 = points1.max(dim)[0]\n min2 = points2.min(dim)[0]\n max2 = points2.max(dim)[0]\n center1 = (min1 + max1)/2\n center2 = (min2 + max2)/2\n size1 = max1 - min1\n size2 = max2 - min2\n return ((center1 - center2).abs() * 2 <= size1 + size2).all(-1)", "def intersects_edge(self, pt1, pt2):\n costheta = np.dot(pt1, pt2)\n usq = (1-costheta)/(1+costheta) # u**2; using trig identity for tan(theta/2)\n gamma1 = np.dot(self._v, pt1)\n gamma2 = np.dot(self._v, pt2)\n b = gamma1*(usq-1.0) + gamma2*(usq+1)\n a = -usq*(gamma1+self._d)\n c = gamma1 - self._d\n\n det = b*b - 4*a*c\n if det < 0.0:\n return False\n\n sqrt_det = np.sqrt(det)\n pos = (-b + sqrt_det)/(2.0*a)\n\n if pos >= 0.0 and pos <= 1.0:\n return True\n\n neg = (-b - sqrt_det)/(2.0*a)\n if neg >= 0.0 and neg <= 1.0:\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Triangulation via LinearLS method
def _linear_ls_triangulation(u1, P1, u2, P2): # build A matrix for homogeneous equation system Ax=0 # assume X = (x,y,z,1) for Linear-LS method # which turns it into AX=B system, where A is 4x3, X is 3x1 & B is 4x1 A = np.array([ u1[0] * P1[2, 0] - P1[0, 0], u1[0] * P1[2, 1] - P1[0, 1], u1[0] * P1[2, 2] - P1[0, 2], u1[1] * P1[2, 0] - P1[1, 0], u1[1] * P1[2, 1] - P1[1, 1], u1[1] * P1[2, 2] - P1[1, 2], u2[0] * P2[2, 0] - P2[0, 0], u2[0] * P2[2, 1] - P2[0, 1], u2[0] * P2[2, 2] - P2[0, 2], u2[1] * P2[2, 0] - P2[1, 0], u2[1] * P2[2, 1] - P2[1, 1], u2[1] * P2[2, 2] - P2[1, 2] ]).reshape(4, 3) B = np.array([ -(u1[0] * P1[2, 3] - P1[0, 3]), -(u1[1] * P1[2, 3] - P1[1, 3]), -(u2[0] * P2[2, 3] - P2[0, 3]), -(u2[1] * P2[2, 3] - P2[1, 3]) ]).reshape(4, 1) ret, X = cv2.solve(A, B, flags=cv2.DECOMP_SVD) return X.reshape(1, 3)
[ "def triangulate(self):\n # pre-condition: we should have at least 3 points\n i=0\n lista_t=[]\n assert len(self.points) > 2\n #print self.points[1].x\n gen=group3(len(self.points))\n # print range(math.factorial(len(self.points))/(math.factorial(3)*math.factorial(len(self.points)-3)))\n #print math.factorial(3)\n #print math.factorial(len(self.points)-3)\n for ite in range(math.factorial(len(self.points))/(math.factorial(3)*math.factorial(len(self.points)-3))):\n pos1,pos2,pos3=next(gen)\n #temp=[(self.points[pos1],(self.points[pos2].x,self.points[pos2].y),(self.points[pos3].x,self.points[pos3].y)]\n t=Triangle(self.points[pos1],self.points[pos2],self.points[pos3])\n if not self.are_collinear(self.points[pos1],self.points[pos2],self.points[pos3]):\n #print 'hello'\n # print self.points[pos1],self.points[pos2],self.points[pos3]\n # print self.points[1]\n # print t,pos1,pos2,pos3\n if self.is_delaunay(t):\n #print t\n self.triangles.append(t)\n #print lista_t \n \n # Your implementation here", "def triangulate(start, end, hx, hy):\n\t# if (end[1] - start[1]) % hy != 0 and (end[0] - start[0]) % hx:\n\t# \tprint(\"Error, can't create mesh\")\n\tarea = (hx*hy)/2.0\n\tdiff_y = end[1] - start[1]\n\tdiff_x = end[0] - start[0]\n\tL_init = [start,[start[0]+hx,start[1]],[start[0],start[1]+hy]]\n\tU_init = [[start[0]+hx,start[1]],[start[0],start[1]+hy],[start[0]+hx,start[1]+hy]]\n\ttriangles = []\n\tmesh = [[start[0]+(x*hx), start[1]+(y*hy)] for x in xrange(int(diff_x / hx)+1) for y in xrange(int(diff_y / hy)+1)]\n\tboundary = []\n\tinternal = [] \n\tfor coord in mesh: \n\t\tif coord[0] == start[0] or coord[0] == end[0]:\n\t\t\tboundary.append(coord)\n\t\telif coord[1] == start[1] or coord[1] == end[1]:\n\t\t\tboundary.append(coord)\n\t\telse:\n\t\t\tinternal.append(coord)\n\n\tinternal_dict = {str(x):[] for x in xrange(len(internal))}\n\t# adj_triangles = {}\n\tfor x in xrange(int(diff_x / hx)):\n\t\tinc_x = x*hx\n\t\tfor y in xrange(int(diff_y / hy)):\n\t\t\tinc_y = y*hy\n\t\t\tL_next_y = [[coord[0]+inc_x,coord[1]+inc_y] for coord in L_init]\n\t\t\tU_next_y = [[coord[0]+inc_x,coord[1]+inc_y] for coord in U_init]\n\t\t\tfor coord_L, coord_U in zip(L_next_y,U_next_y):\n\t\t\t\t# if coord_L in internal:\n\t\t\t\ttry:\n\t\t\t\t\tindex = internal.index(coord_L)\n\t\t\t\t\tinternal_dict[str(index)].append(L_next_y)\n\t\t\t\t\t# elif coord_U in internal:\t\t\t\t\t\t\n\t\t\t\t\tindex = internal.index(coord_U)\n\t\t\t\t\tinternal_dict[str(index)].append(U_next_y)\n\t\t\t\texcept ValueError as e:\n\t\t\t\t\tcontinue\n\t\t\t\t\t# print(e)\n\t\t\ttriangles.extend((L_next_y, U_next_y))\n\n\treturn triangles, mesh, boundary, internal, area, internal_dict#adj_triangles ", "def triangulation(e, i):\n vertices = []\n holes = []\n segments = []\n index_point = 0\n\n #-- Slope computation points\n a = [[], [], []]\n b = [[], [], []]\n for ip in range(len(e)-1):\n vertices.append(e[ip])\n if a == [[], [], []] and index_point == 0:\n a = [e[ip][0], e[ip][1], e[ip][2]]\n if index_point > 0 and (e[ip] != e[ip-1]):\n if b == [[], [], []]:\n b = [e[ip][0], e[ip][1], e[ip][2]]\n if ip == len(e) - 2:\n segments.append([index_point, 0])\n else:\n segments.append([index_point, index_point+1])\n index_point += 1\n for hole in i:\n first_point_in_hole = index_point\n for p in range(len(hole)-1):\n if p == len(hole)-2:\n segments.append([index_point, first_point_in_hole])\n else:\n segments.append([index_point, index_point+1])\n index_point += 1\n vertices.append(hole[p])\n holes.append(centroid(hole[:-1]))\n\n #-- Project to 2D since the triangulation cannot be done in 3D with the library that is used\n npolypoints = len(vertices)\n nholes = len(holes)\n #-- Check if the polygon is vertical, i.e. a projection cannot be made.\n #-- First copy the list so the originals are not modified\n temppolypoints = copy.deepcopy(vertices)\n newpolypoints = copy.deepcopy(vertices)\n tempholes = copy.deepcopy(holes)\n newholes = copy.deepcopy(holes)\n #-- Compute the normal of the polygon for detecting vertical polygons and\n #-- for the correct orientation of the new triangulated faces\n #-- If the polygon is vertical\n normal = unit_normal(temppolypoints[0], temppolypoints[1], temppolypoints[2])\n if math.fabs(normal[2]) < 10e-6:\n vertical = True\n else:\n vertical = False\n #-- We want to project the vertical polygon to the XZ plane\n #-- If a polygon is parallel with the YZ plane that will not be possible\n YZ = True\n for i in range(1, npolypoints):\n if temppolypoints[i][0] != temppolypoints[0][0]:\n YZ = False\n continue\n #-- Project the plane in the special case\n if YZ:\n for i in range(0, npolypoints):\n newpolypoints[i][0] = temppolypoints[i][1]\n newpolypoints[i][1] = temppolypoints[i][2]\n for i in range(0, nholes):\n newholes[i][0] = tempholes[i][1]\n newholes[i][1] = tempholes[i][2]\n #-- Project the plane\n elif vertical:\n for i in range(0, npolypoints):\n newpolypoints[i][1] = temppolypoints[i][2]\n for i in range(0, nholes):\n newholes[i][1] = tempholes[i][2]\n else:\n pass #-- No changes here\n\n #-- Drop the last point (identical to first)\n for p in newpolypoints:\n p.pop(-1)\n\n #-- If there are no holes\n if len(newholes) == 0:\n newholes = None\n else:\n for h in newholes:\n h.pop(-1)\n\n #-- Plane information (assumes planarity)\n a = e[0]\n b = e[1]\n c = e[2]\n #-- Construct the plane\n pl = plane(a, b, c)\n \n #-- Prepare the polygon to be triangulated\n poly = {'vertices' : np.array(newpolypoints), 'segments' : np.array(segments), 'holes' : np.array(newholes)}\n #-- Triangulate\n t = triangle.triangulate(poly, \"pQjz\")\n #-- Get the triangles and their vertices\n tris = t['triangles']\n vert = t['vertices'].tolist()\n #-- Store the vertices of each triangle in a list\n tri_points = []\n for tri in tris:\n tri_points_tmp = []\n for v in tri.tolist():\n vert_adj = [[], [], []]\n if YZ:\n vert_adj[0] = temppolypoints[0][0]\n vert_adj[1] = vert[v][0]\n vert_adj[2] = vert[v][1]\n elif vertical:\n vert_adj[0] = vert[v][0] \n vert_adj[2] = vert[v][1]\n vert_adj[1] = get_y(pl, vert_adj[0], vert_adj[2])\n else:\n vert_adj[0] = vert[v][0]\n vert_adj[1] = vert[v][1]\n vert_adj[2] = get_height(pl, vert_adj[0], vert_adj[1])\n tri_points_tmp.append(vert_adj)\n tri_normal = unit_normal(tri_points_tmp[0], tri_points_tmp[1], tri_points_tmp[2])\n if compare_normals(normal, tri_normal):\n tri_points.append(tri_points_tmp)\n else:\n tri_points_tmp = reverse_vertices(tri_points_tmp)\n tri_points.append(tri_points_tmp)\n return tri_points", "def delaunayTriangulation(dataSet):\n edgeDict = triangleSplittingDict(dataSet)\n edges = edgeDict.keys()\n flag = True\n while(flag):\n flag = False\n removableEdges = []\n innerFlag = True\n i = 0\n while innerFlag:\n e = edges[i]\n if i == len(edges) - 1:\n innerFlag = False\n i = i + 1\n if len(edgeDict[e]) == 2 and e[0] != e[1]:#Hacky Fix, need to debug initialTriangulate\n pts = getPts(e, edgeDict[e][0],edgeDict[e][1])\n if isConvexQuad(pts):\n oldTri1 = edgeDict[e][0]\n oldTri2 = edgeDict[e][1]\n if not isLegal(e, oldTri1, oldTri2):\n innerFlag = False\n flag = True\n newTris = [(pts[0], pts[1], pts[3]), (pts[1], pts[2], pts[3])]\n newE = (pts[1], pts[3])\n edgeDict[newE] = newTris\n del edgeDict[e]\n edges = edgeDict.keys()\n edgeDict = updateDict(edgeDict, oldTri1, oldTri2)\n i = 0\n\n return edgeDict.keys()", "def boundary_triangulation(outer_boundary, inner_boundaries, polyline_features = [], point_features = [], src = 'numpy_rpc', cls=None):\n\n\tif cls is None:\n\t\tcls = Mesh\n\n\t# generate planar Delaunay triangulation\n\tvertices = [pt for boundary in [outer_boundary] + inner_boundaries + polyline_features for pt in boundary] + point_features\n\tdelaunay_mesh = delaunay(vertices, src = src, cls = cls)\n\t\n\t# delete false faces with aligned vertices\n\tfor fkey in list(delaunay_mesh.faces()):\n\t\ta, b, c = [delaunay_mesh.vertex_coordinates(vkey) for vkey in delaunay_mesh.face_vertices(fkey)]\n\t\tab = subtract_vectors(b, a)\n\t\tac = subtract_vectors(c, a)\n\t\tif length_vector(cross_vectors(ab, ac)) == 0:\n\t\t\tdelaunay_mesh.delete_face(fkey)\n\n\t# delete faces outisde the borders\n\tfor fkey in list(delaunay_mesh.faces()):\n\t\tcentre = trimesh_face_circle(delaunay_mesh, fkey)[0]\n\t\tif not is_point_in_polygon_xy(centre, outer_boundary) or any([is_point_in_polygon_xy(centre, inner_boundary) for inner_boundary in inner_boundaries]):\n\t\t\tdelaunay_mesh.delete_face(fkey)\n\n\t# topological cut along the feature polylines through unwelding\n\tvertex_map = {geometric_key(delaunay_mesh.vertex_coordinates(vkey)): vkey for vkey in delaunay_mesh.vertices()}\n\tedges = [edge for polyline in polyline_features for edge in pairwise([vertex_map[geometric_key(point)] for point in polyline])]\n\tmesh_unweld_edges(delaunay_mesh, edges)\n\n\treturn delaunay_mesh", "def delaunay_to_tri(d):\n return tri.Triangulation(x=d.x,y=d.y,triangles=d.triangle_nodes)", "def assemble(self, tri):\n\n self.triangulation = tri\n n = len(tri.points)\n self.sourceVec = numpy.zeros( (n,), numpy.float64 )\n\n for i, iabc in tri.triangles.items():\n\n ia, ib, ic = iabc\n pa, pb, pc = tri.points[ia], tri.points[ib], tri.points[ic]\n\n # centroid \n pMid = (pa + pb + pc)/3.0\n fxx = fyy = self.fFunc(pMid)\n\n ga = self.gFunc(pa)\n gb = self.gFunc(pb)\n gc = self.gFunc(pc)\n\n sa = self.sFunc(pa)\n sb = self.sFunc(pb)\n sc = self.sFunc(pc)\n\n xcb = pc[0] - pb[0]\n ycb = pc[1] - pb[1]\n xac = pa[0] - pc[0]\n yac = pa[1] - pc[1]\n xba = pb[0] - pa[0]\n yba = pb[1] - pa[1]\n\n area = -xba*yac + yba*xac\n if area < 0:\n print '*** area = ', area, ' for ia, ib, ic = ', ia, ib, ic\n\n fOverA = 0.25*(fxx + fyy)/area\n\n faa = fOverA * (ycb*ycb + xcb*xcb) \\\n + (ga/ 20. + gb/ 60. + gc/ 60.)*area\n\n fab = fOverA * (ycb*yac + xcb*xac) \\\n + (ga/ 60. + gb/ 60. + gc/120.)*area\n\n fac = fOverA * (ycb*yba + xcb*xba) \\\n + (ga/ 60. + gb/120. + gc/ 60.)*area\n\n fbb = fOverA * (yac*yac + xac*xac) \\\n + (ga/ 60. + gb/ 20. + gc/ 60.)*area\n\n fbc = fOverA * (yac*yba + xac*xba) \\\n + (ga/120. + gb/ 60. + gc/ 60.)*area\n\n fcc = fOverA * (yba*yba + xba*xba) \\\n + (ga/ 60. + gb/ 60. + gc/ 20.)*area\n\n self.mat[ia, ia] = self.mat.get((ia, ia), 0.0) + faa\n self.mat[ia, ib] = self.mat.get((ia, ib), 0.0) + fab\n self.mat[ia, ic] = self.mat.get((ia, ic), 0.0) + fac\n self.mat[ib, ib] = self.mat.get((ib, ib), 0.0) + fbb\n self.mat[ib, ic] = self.mat.get((ib, ic), 0.0) + fbc\n self.mat[ic, ic] = self.mat.get((ic, ic), 0.0) + fcc\n\n # make sure matrix is Hermitian\n self.mat[ib, ia] = self.mat[ia, ib]\n self.mat[ic, ia] = self.mat[ia, ic]\n self.mat[ic, ib] = self.mat[ib, ic]\n\n self.sourceVec[ia] += area*(sa/12.0 + sb/24.0 + sc/24.0)\n self.sourceVec[ib] += area*(sa/24.0 + sb/12.0 + sc/24.0)\n self.sourceVec[ic] += area*(sa/24.0 + sb/24.0 + sc/12.0)", "def triangulate(self, leaflet=\"upper\"):\n # Run the triangulation on each frame's projection.\n self.delaunay_tri[leaflet] = [Delaunay(x) for x in self.projection[leaflet]]", "def delaunay(points: Iterable[Point]) -> Triangulation:\n return Triangulation.from_points(points)", "def _triangulation_(self):\n from sage.homology.simplicial_complex import Simplex\n if self.dimension() < 0: # the empty cube\n return [Simplex(())] # the empty simplex\n v = tuple([max(j) for j in self.tuple()])\n if self.dimension() == 0: # just v\n return [Simplex((v,))]\n simplices = []\n for i in range(self.dimension()):\n for S in self.face(i, upper=False)._triangulation_():\n simplices.append(S.join(Simplex((v,)), rename_vertices=False))\n return simplices", "def tri(self):\n if self._tri is not None:\n return self._tri\n\n try:\n self._tri = Triangulation(self.points)\n self.update_losses(set(), self._tri.simplices)\n return self._tri\n except ValueError:\n # A ValueError is raised if we do not have enough points or\n # the provided points are coplanar, so we need more points to\n # create a valid triangulation\n return None", "def dummy_delaunay():\n dummy_x = np.array([0.0,0.0,1.0])\n dummy_y = np.array([0.0,1.0,1.0])\n d = delaunay.Triangulation(dummy_x,dummy_y)\n return d", "def connectAlg(self):\n # delete old no. of triangles for every point\n self.valences.clear()\n # fill vector of no. of triangles for every point with 0\n for i in range(0, len(self.pts)):\n self.valences.append(0)\n # for every Triangle in tris find the 3 neighbor Triangles\n for t in range(0, len(self.tris)):\n for tn in range(0, len(self.tris)):\n # For first, second and third point of triangle test if it occurs in the other triangle\n test1 = (self.tris[t].iv[0]==self.tris[tn].iv[0] or self.tris[t].iv[0]==self.tris[tn].iv[1] or self.tris[t].iv[0]==self.tris[tn].iv[2])\n test2 = (self.tris[t].iv[1]==self.tris[tn].iv[0] or self.tris[t].iv[1]==self.tris[tn].iv[1] or self.tris[t].iv[1]==self.tris[tn].iv[2])\n test3 = (self.tris[t].iv[2]==self.tris[tn].iv[0] or self.tris[t].iv[2]==self.tris[tn].iv[1] or self.tris[t].iv[2]==self.tris[tn].iv[2])\n # find t0!=t containing b and c (CG21_1 page 19)\n if(test2 and test3 and not test1):\n self.tris[t].it[0]=tn; \n # find t1!=t containing c and a (CG21_1 page 19)\n if(test3 and test1 and not test2):\n self.tris[t].it[1]=tn;\n # find t2!=t containing a and b (CG21_1 page 19)\n if(test1 and test2 and not test3):\n self.tris[t].it[2]=tn;\n # increase no. of triangles for the point of the triangle\n self.valences[self.tris[t].iv[0]] += 1;\n self.valences[self.tris[t].iv[1]] += 1;\n self.valences[self.tris[t].iv[2]] += 1;\n # output results\n print(\"Valence list (First vertex has index 0):\")\n for i in range(0, len(self.pts)):\n print(\" vertex \", i, \": \", self.valences[i])\n print(\"Neighbor triangles of triangles (First triangle has index 0): \")\n for i in range(0, len(self.tris)):\n print(\" Triangle \", i, \": \", end=\" \")\n self.tris[i].printIT()", "def tri_to_cgal(mt):\n Npnt = len(mt.x)\n \n # store vertex handles for future use\n vh = np.zeros( Npnt ,'object')\n DT = CGAL.Constrained_Delaunay_triangulation_2()\n\n for n in range(Npnt):\n pnt = CGAL.Point_2( mt.x[n], mt.y[n] )\n vh[n] = DT.insert( pnt )\n vh[n].set_info(n)\n DT.vh = vh\n return DT", "def cgal_to_tri(cg,use_info=False):\n Npnt = cg.number_of_vertices()\n\n lookup = {} # (x,y) => index into points\n x = np.zeros( Npnt, np.float64)\n y = np.zeros( Npnt, np.float64)\n triangle_nodes = np.zeros( (cg.number_of_faces(),3), np.int32)\n \n for n,v in enumerate(cg.vertices):\n if use_info:\n n = v.info()\n if n is None:\n raise Exception,\"Can't use info() - some vertices are missing data\"\n x[n] = v.point().x()\n y[n] = v.point().y()\n if not use_info:\n lookup[ (x[n],y[n]) ] = n\n\n for i,f in enumerate(cg.faces):\n for k in [0,1,2]:\n pnt = f.vertex(k).point()\n xy = (pnt.x(),pnt.y())\n if use_info:\n n = f.vertex(k).info()\n else:\n n = lookup[xy]\n triangle_nodes[i,k] = n\n return tri.Triangulation(x=x,y=y,triangles=triangle_nodes)", "def triangulate(poly):\n poly = closePoly(poly)\n return list(triList(poly,poly2tri(lowerPoly(poly))))", "def getTriangles(*args, **kwargs):\n \n pass", "def trianglulateMesh(mesh: bpy.types.Mesh):\n\n\t# if we use custom normals, we gotta correct them\n\t# manually, since blenders triangulate is shit\n\tif mesh.use_auto_smooth:\n\t\t# calculate em, so that we can collect the correct normals\n\t\tmesh.calc_normals_split()\n\n\t\t# and now store them, together with the vertex indices,\n\t\t# since those will be the only identical data after triangulating\n\t\tnormalData = list()\n\t\tfor p in mesh.polygons:\n\t\t\tindices = list()\n\t\t\tnormals = list()\n\n\t\t\tfor l in p.loop_indices:\n\t\t\t\tloop = mesh.loops[l]\n\t\t\t\tnrm = loop.normal\n\t\t\t\tnormals.append((nrm.x, nrm.y, nrm.z))\n\t\t\t\tindices.append(loop.vertex_index)\n\n\t\t\tnormalData.append((indices, normals))\n\n\t\t# free the split data\n\t\t# mesh.free_normals_split()\n\n\timport bmesh\n\tbm = bmesh.new()\n\tbm.from_mesh(mesh)\n\tbmesh.ops.triangulate(bm,\n\t\t\t\t\t\t faces=bm.faces,\n\t\t\t\t\t\t quad_method='FIXED',\n\t\t\t\t\t\t ngon_method='EAR_CLIP')\n\tbm.to_mesh(mesh)\n\tbm.free()\n\n\tif mesh.use_auto_smooth:\n\t\tpolygons = list()\n\t\tfor p in mesh.polygons:\n\t\t\tpolygons.append(p)\n\n\t\tsplitNormals = [None] * len(mesh.loops)\n\n\t\tfor nd in normalData:\n\t\t\tfoundTris = 0\n\t\t\ttoFind = len(nd[0])-2\n\n\t\t\tout = False\n\t\t\ttoRemove = list()\n\n\t\t\tfor p in polygons:\n\t\t\t\tfound = 0\n\t\t\t\tfor l in p.loop_indices:\n\t\t\t\t\tif mesh.loops[l].vertex_index in nd[0]:\n\t\t\t\t\t\tfound += 1\n\n\t\t\t\tif found == 3:\n\t\t\t\t\tfoundTris += 1\n\n\t\t\t\t\tfor l in p.loop_indices:\n\t\t\t\t\t\tsplitNormals[l] \\\n\t\t\t\t\t\t\t= nd[1][nd[0].index(mesh.loops[l].vertex_index)]\n\n\t\t\t\t\ttoRemove.append(p)\n\t\t\t\t\tif foundTris == toFind:\n\t\t\t\t\t\tbreak\n\n\t\t\tfor p in toRemove:\n\t\t\t\tpolygons.remove(p)\n\n\t\tif len(polygons) > 0:\n\t\t\tprint(\"\\ntriangulating went wrong?\", len(polygons))\n\t\telse:\n\t\t\tmesh.normals_split_custom_set(splitNormals)", "def dt_allocate(self): \n self.DT = Constrained_Delaunay_triangulation_2()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean airports dataset filtering only US airports and discarding data that is not an airport("large_airport", "medium_airport", "small_airport"). Extract iso regions and cast fields as required.
def get_airports_cleansed(self,dfairport): dfairport.createOrReplaceTempView("Airport") cl_dfairport=self.spark.sql(""" select a.*,substring(iso_region,-2)state_code from Airport a where iso_country='US' and type in ("large_airport", "medium_airport", "small_airport") """) return cl_dfairport
[ "def clean_airports(airports_spark, spark_session):\n spark = spark_session\n airports_spark.createOrReplaceTempView('airports')\n \n airports_spark_cleaned = spark.sql(\"\"\"\n SELECT ident, name, municipality as City, SUBSTRING(iso_region, 4, 5) as State, iata_code\n FROM airports\n WHERE iata_code IS NOT NULL\n \"\"\")\n\n airports_spark_cleaned = airports_spark_cleaned.dropDuplicates()\n airports_spark_cleaned = airports_spark_cleaned.na.drop()\n \n return airports_spark_cleaned", "def filter_by_airport_names(self, airports, letters):\r\n airport_list = [i for i in self.data[airports]]\r\n delete = {i for i in self.data[airports] if not i.startswith(tuple(letters))}\r\n not_relevant_index = []\r\n for index, value in enumerate(airport_list):\r\n if value in delete:\r\n not_relevant_index.append(index)\r\n for i in reversed(range(len(airport_list))):\r\n if i in not_relevant_index:\r\n for keys in self.data.keys():\r\n del self.data[keys][i]", "def clean_data(data):\n # Filter out food trucks missing lat/long data\n trucks_without_lat_long = data[(data[\"Latitude\"] == 0) | (data[\"Longitude\"] == 0)].index\n data.drop(trucks_without_lat_long, inplace = True)\n\n # Filter out food trucks with pending/expired/suspended permits\n trucks_without_valid_permit = data[data[\"Status\"] != \"APPROVED\"].index\n data.drop(trucks_without_valid_permit, inplace = True)", "def filter_flights(flts):\n global spec_airl, spec_dest, spec_wkdy\n if spec_airl:\n flts = flts.loc[flts.apply(lambda flts: flts[\"AIRLINE_CODE\"] in spec_airl, axis=1)]\n if spec_dest:\n flts = flts.loc[flts.apply(lambda flts: flts[\"DESTINATION_IATA\"] in spec_dest, axis=1)]\n if spec_wkdy:\n flts = flts.loc[flts.apply(lambda flts: flts[\"WEEKDAY\"] in spec_wkdy, axis=1)]\n # update spec_airl and remove items, which are not in flights after filtering\n al_code_filtered = flts[\"AIRLINE_CODE\"].to_list() # list of all AIRLINE_CODEs in flights\n spec_airl = [a for a in spec_airl if a in al_code_filtered]\n # update spec_dest and remove items, which are not in flights after filtering\n de_code_filtered = flights[\"DESTINATION_IATA\"].to_list() # list of all AIRLINE_CODEs in flights\n spec_dest = [d for d in spec_dest if d in de_code_filtered]\n # update spec_wkdy and remove items, which are not in flights after filtering\n wkdy_filtered = flts[\"WEEKDAY\"].to_list()\n spec_wkdy = [wd for wd in spec_wkdy if wd in wkdy_filtered]\n return flts", "def iatas_without_country():\n codes_w_country = []\n for v in IATAS_BY_COUNTRIES.values():\n codes_w_country += v\n\n if not len(codes_w_country) == len(set(codes_w_country)):\n print(f\"Total codes ({len(codes_w_country)}) - codes with a country ({len(set(codes_w_country))}) = \"\n f\"{len(codes_w_country) - len(set(codes_w_country))}, please check for double assignment: \", end=\"\")\n print([x for x in codes_w_country if codes_w_country.count(x) > 1])\n\n with open(\"./data/flight_data.csv\", 'r') as file: # open as simple text file\n lines = file.read().splitlines()\n all_codes_in_flts = list()\n for line in lines:\n if line.split(\",\")[7] not in all_codes_in_flts: # iata codes is in 8th position of every line\n all_codes_in_flts.append(line.split(\",\")[7])\n del (all_codes_in_flts[0]) # delete header entry of 8th position\n assigned = [c for c in all_codes_in_flts if c in codes_w_country] # iatas with country\n not_assigned = [c for c in all_codes_in_flts if c not in codes_w_country] # iatas without country\n\n if len(all_codes_in_flts) - len(assigned) == 0:\n return None\n else:\n return not_assigned", "def cleaning(df):\n df['Weather'] = df['Weather'].str.replace('Moderate ', '')\n df['Weather'] = df['Weather'].str.replace(' Showers', '')\n df['Weather'] = df['Weather'].str.replace('Mainly ', '')\n df['Weather'] = df['Weather'].str.replace('Mostly ', '')\n df = df.groupby('Weather').filter(lambda x: len(x) >= 10)\n df['Weather'] = df['Weather'].str.replace('Drizzle', 'Rain')\n df = df[df['Weather'] != 'Fog']\n df = df[df['Weather'] != 'Rain,Fog']\n return df", "def airport_data(airport, arrival_flag):\n current_date = datetime.now().strftime('%Y-%m-%d')\n airport_iata = \"DUB/\" if airport == \"Dublin\" else \"SNN/\" if airport == \"Shannon\" else \"ORK/\"\n dep_or_arr = \"departures/\" if arrival_flag == \"false\" else \"arrivals/\"\n airport_url = \"http://ec2-52-19-19-167.eu-west-1.compute.amazonaws.com/\" + \\\n dep_or_arr + \"airportdate/\" + airport_iata + current_date\n pop_scheduled = \"arrivalScheduled\" if arrival_flag == \"false\" else \"departureScheduled\"\n target_scheduled = \"departureScheduled\" if arrival_flag == \"false\" else \"arrivalScheduled\"\n target_airport = \"arrivalAirport\" if arrival_flag == \"false\" else \"departureAirport\"\n response = requests.get(airport_url)\n data = response.text\n parsed = json.loads(data)\n for json_object in parsed:\n json_object.pop(pop_scheduled)\n json_object.pop('airport')\n json_object.pop('flightDate')\n json_object['idA'] = json_object.pop(target_scheduled)\n json_object['idB'] = json_object.pop('flightIata')\n json_object['targetA'] = json_object.pop(target_airport)\n json_object['targetB'] = json_object.pop('airlineName')\n json_object['targetC'] = json_object.pop('airportIata')\n return {'results': parsed}", "def additionalCleanup(df):\n\n df = removeCancellations(df)\n df = removeTurnbacks(df)\n df = removeDiversions(df)\n df = filterFlights(df)\n return df", "def _validate_iata_airport(self, code):\n for ap in Query.AIRPORT_LIST:\n if ap[\"iata_code\"] == code:\n return ap\n return None", "def read_and_process_airport_data(spark, filename, df_dimension_state_table):\n logging.info(\"Reading airport data\")\n # load the airport codes so we can map them to states\n airport_schema = R([\n Fld(\"ident\", Str()),\n Fld(\"type\", Str()),\n Fld(\"name\", Str()),\n Fld(\"elevation_ft\", Int()),\n Fld(\"continent\", Str()),\n Fld(\"iso_country\", Str()),\n Fld(\"iso_region\", Str()),\n Fld(\"municipality\", Str()),\n Fld(\"gps_code\", Str()),\n Fld(\"iata_code\", Str()),\n Fld(\"local_code\", Str()),\n Fld(\"coordinates\", Str())\n ])\n\n df_airport = spark.read.options(Header=True, Delimter=\",\").csv(\n filename, airport_schema)\n\n # cleanse: we only want the airports in the US which map to the states that we have in the states table\n\n df_airport = df_airport.filter(df_airport.iso_country == \"US\") \\\n .join(df_dimension_state_table, F.substring(df_airport.iso_region, 4, 2) == df_dimension_state_table.state_key,\n \"inner\") \\\n .select(df_airport.ident, df_airport.local_code, df_dimension_state_table.state_key)\n\n return df_airport", "def clean(self):\n self.delete_invalid_geometries(\n query_small_area=lambda feat: \"_part\" not in feat[\"localId\"]\n )\n self.topology()\n self.merge_building_parts()\n self.simplify()\n self.delete_small_geometries()", "def condense_data(in_file, out_file, city):\n \n with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in:\n # set up csv DictWriter object - writer requires column names for the\n # first row as the \"fieldnames\" argument\n out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n trip_writer.writeheader()\n \n ## TODO: set up csv DictReader object ##\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n # set up a dictionary to hold the values for the cleaned and trimmed\n # data point\n new_point = {}\n \n ## TODO: use the helper functions to get the cleaned data from ##\n ## the original data dictionaries. ##\n ## Note that the keys for the new_point dictionary should match ##\n ## the column names set in the DictWriter object above. ##\n \n duration= duration_in_mins(row, city)\n month, hour, day_of_week= time_of_trip(row, city)\n user_type= type_of_user(row, city)\n new_point.update({'duration':duration, 'month':month, 'hour':hour, \n 'day_of_week':day_of_week, 'user_type':user_type})\n \n ## TODO: write the processed information to the output file. ##\n ## see https://docs.python.org/3/library/csv.html#writer-objects ##\n trip_writer.writerow(new_point)", "def import_airport_data(iata, write=1):\n\n with open(\"all_airports.csv\", \"r\", newline=\"\", encoding='utf-8') as file:\n contents = csv.DictReader(file)\n apd = None\n\n for row in contents:\n if row[\"iata_code\"] == iata.upper():\n apd = Airport(row[\"iata_code\"],\n row[\"gps_code\"],\n row[\"municipality\"],\n (row[\"latitude_deg\"], row[\"longitude_deg\"]))\n break\n if not apd:\n raise AirportNotKnown([iata])\n\n if write == 1:\n with open(\"airports.csv\", \"a\", newline=\"\") as file2:\n file_writer = csv.writer(file2, delimiter=\"|\")\n file_writer.writerow([apd.iata,\n apd.icao,\n ascii(apd.name).replace(\"'\", \"\"),\n apd.coord[0], apd.coord[1]])\n return apd", "def filter_data(self):\n self.remove_rows(self.earnings_yield, 0)\n self.remove_rows(self.ret_on_capital, 0)\n self.remove_rows(\"Liq.2meses\", 0)", "def _clean_data(data, icd9_descript_dict, no_onset_age=True):\n x_raw, y_raw = [], []\n\n for idx, line in enumerate(data):\n line = line.split()\n\n try:\n features = []\n features.append('age_' + line[RAW_AGE_COL])\n features.append('gender_' + line[RAW_GENDER_COL])\n\n icd9s = [i.split(':') for i in line[RAW_FIRST_ICD9_COL:]]\n # filter invalid icd9s and sort by onset age in place\n icd9s = [i for i in icd9s if i[0] in icd9_descript_dict]\n icd9s.sort(key=lambda i: int(i[1]))\n\n if no_onset_age:\n icd9s = [i[0] for i in icd9s] # remove onset age\n else:\n icd9s = [':'.join(i) for i in icd9s]\n features.extend(icd9s)\n\n x_raw.append(features)\n y_raw.append(line[RAW_CLASS_COL]) # extract class\n except:\n print('WARNING: error on line #{} with case:'.format(idx))\n print(' '.join(line))\n raise\n\n assert len(x_raw) == len(y_raw)\n\n return x_raw, y_raw", "def get_ports_cleansed(self,dfports):\n dfports.createOrReplaceTempView(\"Port\")\n cl_dfport=self.spark.sql(\"\"\"\n select code as port_code,airport_name,substring_index(airport_name, ',', -1) port_state_code from Port p\n \"\"\")\n return cl_dfport", "def cleaning(dataset):\n dataset = dataset.sort_values('pick')\n # setting the dates\n def convert_dt(epoch):\n if pd.isna(epoch):\n return 'na'\n else:\n temp = time.strftime('%Y-%m-%d', time.localtime(epoch))\n return temp\n dataset['pick'] = dataset['pick'].apply(convert_dt)\n dataset['1st_deliver_attempt'] = dataset['1st_deliver_attempt'].apply(convert_dt)\n dataset['2nd_deliver_attempt'] = dataset['2nd_deliver_attempt'].apply(convert_dt)\n \n # settling the locations\n def extract_region(address):\n result = []\n address = address.lower()\n for key in sla_mat:\n if key in address:\n result.append(key)\n if len(result) == 1:\n return result[0]\n else:\n # some entries have more than one region in the address. for this case we took the end\n result.sort(key = lambda x: address.find(x), reverse = True)\n return result[0] \n dataset['buyeraddress'] = dataset['buyeraddress'].apply(extract_region)\n dataset['selleraddress'] = dataset['selleraddress'].apply(extract_region)\n dataset.columns = ['orderid', 'start', '1st', '2nd', 'buyeraddress', 'selleraddress']\n dataset.reset_index(drop = True, inplace = True)\n return dataset", "def airports():\n\n queryType = \"SQL++ query - scoped to inventory: \"\n partialAirportName = request.args['search']\n\n queryPrep = \"SELECT airportname FROM `travel-sample`.inventory.airport WHERE \"\n sameCase = partialAirportName == partialAirportName.lower() or partialAirportName == partialAirportName.upper() #bool\n\n # The code does some guesswork to determine what the user is typing in.\n # This is based on string length and capitalization. If it believes the\n # string is an FAA or ICAO code, it queries for a match in the 'faa' or\n # 'icao' field. Otherwise, the code assumes a partial airport name, and\n # queries for a substring match at the start of the 'airportname' field\n\n if sameCase and len(partialAirportName) == 3:\n queryPrep += \"faa=$1\"\n queryArgs = [partialAirportName.upper()]\n elif sameCase and len(partialAirportName) == 4:\n queryPrep += \"icao=$1\"\n queryArgs = [partialAirportName.upper()]\n else:\n queryPrep += \"POSITION(LOWER(airportname), $1) = 0\"\n queryArgs = [partialAirportName.lower()]\n\n results = cluster.query(queryPrep, *queryArgs)\n airports = [x for x in results]\n\n # 'context' is returned to the frontend to be shown in the Query Log\n\n context = [queryType + queryPrep]\n\n response = make_response(jsonify({\"data\": airports, \"context\": context}))\n return response", "def clean(data):\n data = data.drop_duplicates()\n # PREPROCESSING\n\n data['CustomerID'] = data['CustomerID'].astype('str')\n data = data[data.CustomerID != \"nan\"]\n \n data['StockCode'] = data['StockCode'].astype('str')\n data['StockCode'] = data['StockCode'].fillna('')\n ## Spaces and special characters are not considered alphabetical\n data['StockCode'] = data['StockCode'].str.replace(' ', '')\n \n ## Delete rows containing StockCode with only alphabetical letters\n data = data[~data.StockCode.str.isalpha()]\n \n data = data[data.StockCode != \"23843\"]\n \n data = data[data.UnitPrice > 0]\n\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean ports dataset to get the port code and state code for only USA
def get_ports_cleansed(self,dfports): dfports.createOrReplaceTempView("Port") cl_dfport=self.spark.sql(""" select code as port_code,airport_name,substring_index(airport_name, ',', -1) port_state_code from Port p """) return cl_dfport
[ "def get_airports_cleansed(self,dfairport):\n dfairport.createOrReplaceTempView(\"Airport\")\n cl_dfairport=self.spark.sql(\"\"\"\n select a.*,substring(iso_region,-2)state_code from Airport a\n where iso_country='US'\n and type in (\"large_airport\", \"medium_airport\", \"small_airport\")\n \"\"\")\n return cl_dfairport", "def filter_ports(\n desired_ports: Iterable[int], bad_ports: Optional[Iterable[int]] = None\n) -> Set[int]:\n return set(desired_ports) - set(bad_ports or used_ports())", "def _filter_by_country(self):\n df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True].copy()\n\n return df", "def get_countries_cleansed(self,dfcountries):\n dfcountries.createOrReplaceTempView(\"Countries\")\n cl_dfcountries=self.spark.sql(\"\"\"\n select code as country_code,country_name,case when country_name like '%INVALID%' or country_name like '%Collapsed%' or country_name like '%No Country Code%' then 'INVALID'\n else 'VALID' end country_status from Countries c \n \"\"\")\n \n return cl_dfcountries", "def clear_of_port_mapping(self, db_filter=None):\n return self.db.delete_row(FROM='wim_port_mappings', WHERE=db_filter)", "def clean_airports(airports_spark, spark_session):\n spark = spark_session\n airports_spark.createOrReplaceTempView('airports')\n \n airports_spark_cleaned = spark.sql(\"\"\"\n SELECT ident, name, municipality as City, SUBSTRING(iso_region, 4, 5) as State, iata_code\n FROM airports\n WHERE iata_code IS NOT NULL\n \"\"\")\n\n airports_spark_cleaned = airports_spark_cleaned.dropDuplicates()\n airports_spark_cleaned = airports_spark_cleaned.na.drop()\n \n return airports_spark_cleaned", "def iatas_without_country():\n codes_w_country = []\n for v in IATAS_BY_COUNTRIES.values():\n codes_w_country += v\n\n if not len(codes_w_country) == len(set(codes_w_country)):\n print(f\"Total codes ({len(codes_w_country)}) - codes with a country ({len(set(codes_w_country))}) = \"\n f\"{len(codes_w_country) - len(set(codes_w_country))}, please check for double assignment: \", end=\"\")\n print([x for x in codes_w_country if codes_w_country.count(x) > 1])\n\n with open(\"./data/flight_data.csv\", 'r') as file: # open as simple text file\n lines = file.read().splitlines()\n all_codes_in_flts = list()\n for line in lines:\n if line.split(\",\")[7] not in all_codes_in_flts: # iata codes is in 8th position of every line\n all_codes_in_flts.append(line.split(\",\")[7])\n del (all_codes_in_flts[0]) # delete header entry of 8th position\n assigned = [c for c in all_codes_in_flts if c in codes_w_country] # iatas with country\n not_assigned = [c for c in all_codes_in_flts if c not in codes_w_country] # iatas without country\n\n if len(all_codes_in_flts) - len(assigned) == 0:\n return None\n else:\n return not_assigned", "def _filter_by_country(self):\n df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True]\n\n return df", "def normalize_input_output_ports(loop_node: Node):\n Loop.remove_unused_ops_from_port_map(loop_node, loop_node.input_port_map, 'internal_layer_id', 'in')\n Loop.remove_unused_ops_from_port_map(loop_node, loop_node.output_port_map, 'internal_layer_id', 'out')\n Loop.remove_unused_ops_from_port_map(loop_node, loop_node.back_edges, 'to_layer')\n\n # remove not connected input/output ports\n Loop.re_numerate_input_ports(loop_node)\n Loop.re_numerate_output_ports(loop_node)", "def read_dicts_from_labels(citi_code, port, state):\n # get all regex patterns\n pattern, port_pattern, state_pattern = regex_patterns()\n \n with open(i94_label_path, 'r') as fp:\n for i, line in enumerate(fp):\n if i > 8 and i < 245:\n match = re.search(pattern, line)\n citi_code['id'].append(match.group(1))\n citi_code['country'].append(match.group(2))\n if i > 301 and i < 893:\n match = re.search(port_pattern, line)\n try:\n port['code'].append(match.group(1))\n port['city'].append(match.group(2))\n port['state_code'].append(match.group(3))\n except:\n port['code'].append(None)\n port['city'].append(None)\n port['state_code'].append(None)\n if i > 980 and i < 1036:\n match = re.search(state_pattern, line)\n state['code'].append(match.group(1))\n state['name'].append(match.group(2))", "def create_staging_tables_from_labels():\n citi_code = {'id':[],\n 'country':[]}\n\n port = {\n 'code': [],\n 'city': [],\n 'state_code': []\n }\n\n state = {\n 'code': [],\n 'name': []\n }\n \n # fill dicts\n read_dicts_from_labels(citi_code, port, state)\n \n # citi code dataframe\n citi_code_df = pd.DataFrame(citi_code)\n citi_code_df = citi_code_df.set_index('id')\n citi_code_df.country = citi_code_df.country.str.capitalize()\n\n # airport dataframe\n port_df = pd.DataFrame(port)\n port_df = port_df.set_index('code')\n port_df['state_code'] = port_df.state_code.str.strip()\n port_df = port_df.dropna(how='all')\n \n values = ['AR (BPS)', 'CA (BPS)', 'CO #ARPT', 'FL #ARPT', 'LA (BPS)',\n 'ME (BPS)', 'MT (BPS)', 'NM (BPS)', 'SC #ARPT', 'TX (BPS)',\n 'VA #ARPT', 'VT (I-91)', 'VT (RT. 5)', 'VT (BP - SECTOR HQ)',\n 'WASHINGTON #INTL', 'WA (BPS)']\n \n # clean state_code\n temp = np.where(port_df.state_code.isin(values), \\\n port_df.state_code.str[:2],\\\n np.where(port_df.state_code.str.len()==2, \\\n port_df.state_code, np.nan))\n\n us_state_codes = np.where(temp=='MX', np.nan, temp)\n port_df['state_code'] = us_state_codes\n port_df = port_df.dropna(how='any')\n \n # states dataframe\n states = pd.DataFrame(state)\n states = states.set_index('code')\n \n # output paths\n citi_code_path = os.path.join(output_dir,'country_code.csv')\n port_path = os.path.join(output_dir,'port_immigration.csv')\n states_path = os.path.join(output_dir,'state_code.csv')\n \n # save the dataframes\n if save_on_s3:\n save_df_on_s3(citi_code_df, citi_code_path)\n save_df_on_s3(port_df,port_path)\n save_df_on_s3(states,states_path)\n else:\n citi_code_df.to_csv(citi_code_path)\n port_df.to_csv(port_path)\n states.to_csv(states_path)", "def normalize_city_codes_dataframe(dataframe):\n\n def parse_city_county_state(area):\n \"\"\" Return a pandas series with the city, county and state, this is used by apply.\n eg, area could be: 'Amesbury, Essex, MA' \"\"\"\n area = area.strip().lower()\n area_list = area.split(', ')\n assert len(area_list) < 4\n city = area_list[0]\n county = ''\n # if there are more than 2 values, the 2nd value is the county.\n if len(area_list) > 2:\n county = area_list[1]\n state = US_STATES_DICT[area_list[-1]]\n return pandas.Series([city, county, state])\n\n new_cols = ['city', 'county', 'state']\n dataframe[new_cols] = dataframe['AREA'].apply(parse_city_county_state)\n # unfortunately, there are multiple codes for the same city\n # and some of them don't work.\n dataframe['CODE'] = dataframe['CODE'].apply(str)\n dataframe['CODE'] = dataframe['CODE'].str.strip()\n dataframe = dataframe.groupby(['city', 'state'])['CODE'].apply('|'.join)\n dataframe = dataframe.reset_index()\n dataframe = normalize_headers_in_dataframe('zillow_city_codes', dataframe)\n return dataframe", "def clean_csdata(self) -> None:", "def prepare_dataset_cities():\n\n df = pd.read_csv(\"worldcitiespop.csv\")\n df = df.drop_duplicates()\n df = df.drop_duplicates(['Country','City','AccentCity','Region'])\n print(\"saving dataset cities\")\n df.to_csv(\"../prepared_datasets/cities.csv\")", "def __getPort(self, row):\n if True:\n \"\"\"Keep address, no ports\"\"\"\n port = row[3].split(':')\n row[3] = port[0]\n port = row[5].split(':')\n row[5] = port[0]\n elif False:\n \"\"\"Keep ports only\"\"\"\n port = row[3].split(':')\n row[3] = port[1]\n port = row[5].split(':')\n row[5] = port[1]\n del (row[-4:])\n del (row[-4])\n del (row[-5])\n return row", "def preprocess(city=''):\n df = read_raw_data(city)\n\n # Drop night time hours\n df = df[~df.Hour.isin(config.dropped_hours)].reset_index(drop=True)\n\n # Convert hour, day, month and city into one-hot vectors\n onehot = ['Hour', 'Day', 'Month']\n if not city:\n onehot.append('City')\n\n dummified = pd.get_dummies(df, columns=onehot, prefix=onehot, prefix_sep='')\n column_names = dummified.columns\n\n # Normalize continuous features\n continuous_cols = dummified.iloc[:, :10]\n discrete_cols = dummified.iloc[:, 10:]\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler = scaler.fit(continuous_cols)\n save_scaler_extrema(scaler.data_min_, scaler.data_max_, column_names[:10])\n\n # Concatenate normalized continous features and discrete features\n norm_continuous_cols = pd.DataFrame(scaler.transform(continuous_cols))\n normalized = pd.concat([norm_continuous_cols, discrete_cols], axis=1,\n ignore_index=True)\n normalized.columns = column_names\n\n save_path = 'data/preprocessed_' + city + '.pkl' if city else 'data/preprocessed.pkl'\n normalized.to_pickle(save_path)\n print('Saved preprocessed data to: ' + save_path)", "def get_pincodes(state, city):\n pincode_data = STATEDATA[state][city]\n return pincode_data", "def get_data():\n points = get_alameda_county_points()\n return filter_ranson_criteria(clean_data(get_weather_data(points)))", "def state_fips(state):\n if state == \"Texas\":\n return '48'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean countries dataset to add a column as valid/invalid
def get_countries_cleansed(self,dfcountries): dfcountries.createOrReplaceTempView("Countries") cl_dfcountries=self.spark.sql(""" select code as country_code,country_name,case when country_name like '%INVALID%' or country_name like '%Collapsed%' or country_name like '%No Country Code%' then 'INVALID' else 'VALID' end country_status from Countries c """) return cl_dfcountries
[ "def preprocess_with_interpolation(training_set):\n \n X = training_set.copy()\n \n X['continent'] = ''\n \n missing = []\n \n for index, row in X.iterrows(): \n \n country = pycountry.countries.get(name = row['Country Name'])\n \n try:\n alpha_2 = country.alpha_2\n continent = pycountry_convert.country_alpha2_to_continent_code(alpha_2)\n except(AttributeError, KeyError):\n missing.append(row['Country Name'])\n \n X.at[index, 'continent'] = continent\n\n missing_series = pd.Series(missing)\n missing_unique = missing_series.unique()\n \n \n for i, row in X[(X['continent'] == '')].iterrows():\n for name in missing_unique:\n \n if(row['Country Name'] == name):\n \n if(name == missing_unique[0]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[1]):\n row['continent'] = 'SA'\n \n if(name == missing_unique[2]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[3]):\n row['continent'] = 'AF'\n \n if(name == missing_unique[4]):\n row['continent'] = 'AF'\n \n if(name == missing_unique[5]):\n row['continent'] = 'AF'\n \n if(name == missing_unique[6]):\n row['continent'] = 'SA'\n \n if(name == missing_unique[7]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[8]):\n row['continent'] = 'AF' \n \n if(name == missing_unique[9]):\n row['continent'] = 'EU' \n \n if(name == missing_unique[10]):\n row['continent'] = 'AF' \n \n if(name == missing_unique[11]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[12]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[13]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[14]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[15]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[16]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[17]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[18]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[19]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[20]):\n row['continent'] = 'OC'\n \n if(name == missing_unique[21]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[22]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[23]):\n row['continent'] = 'EU'\n \n if(name == missing_unique[24]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[25]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[26]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[27]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[28]):\n row['continent'] = 'AF'\n \n if(name == missing_unique[29]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[30]):\n row['continent'] = 'SA'\n \n if(name == missing_unique[31]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[32]):\n row['continent'] = 'NA'\n \n if(name == missing_unique[33]):\n row['continent'] = 'AS'\n \n if(name == missing_unique[34]):\n row['continent'] = 'AS'\n \n \n \n return X", "def clean_data(self, data: pd.DataFrame) -> pd.DataFrame:", "def perform_data_clean(df):\n # Perform data cleanup\n for col, value in NAN_VALUE_MAP.items():\n df[col] = df[col].fillna(value)\n\n for col in BOOLEAN_VALUE_COLUMNS:\n df[col] = df[col].astype(\"bool\")", "def _feature_country_process(self):\n if 'Country' not in self.df_invoice.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self.df_invoice.shape[0]\n \n df_invoice_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_new = df_invoice_new.append(\\\n self._df_invoice[self.df_invoice['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice = df_invoice_new\n del(df_invoice_new)\n \n rows_after = self._df_invoice.shape[0] \n P5_SegmentClassifier.print_stat_rows(\"Countries filtering : \"\\\n , rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice.columns \\\n if col not in 'Country']\n \n self._df_invoice = self._df_invoice[list_col_to_keep] \n\n return", "def iatas_without_country():\n codes_w_country = []\n for v in IATAS_BY_COUNTRIES.values():\n codes_w_country += v\n\n if not len(codes_w_country) == len(set(codes_w_country)):\n print(f\"Total codes ({len(codes_w_country)}) - codes with a country ({len(set(codes_w_country))}) = \"\n f\"{len(codes_w_country) - len(set(codes_w_country))}, please check for double assignment: \", end=\"\")\n print([x for x in codes_w_country if codes_w_country.count(x) > 1])\n\n with open(\"./data/flight_data.csv\", 'r') as file: # open as simple text file\n lines = file.read().splitlines()\n all_codes_in_flts = list()\n for line in lines:\n if line.split(\",\")[7] not in all_codes_in_flts: # iata codes is in 8th position of every line\n all_codes_in_flts.append(line.split(\",\")[7])\n del (all_codes_in_flts[0]) # delete header entry of 8th position\n assigned = [c for c in all_codes_in_flts if c in codes_w_country] # iatas with country\n not_assigned = [c for c in all_codes_in_flts if c not in codes_w_country] # iatas without country\n\n if len(all_codes_in_flts) - len(assigned) == 0:\n return None\n else:\n return not_assigned", "def main_sanitize_data(self):\n # Sanitize column names\n self.data.columns = self.data.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')\n\n # Mandatory Sanitization\n self.data = self.data.apply(self.mandatory_sanitization)\n\n # Specific Column Sanitization\n self.data['business'] = self.data['business'].loc[self.data['business'].notnull()].apply(self.sanitize_business_name)\n self.data['title'] = self.data['title'].str.capitalize().str.replace(\".\", \"\")\n self.data['first_name'] = self.data['first_name'].str.capitalize()\n self.data['last_name'] = self.data['last_name'].str.capitalize()\n self.data['date_of_birth'] = self.data['date_of_birth'].loc[self.data['date_of_birth'].notnull()].apply(self.sanitize_date)\n self.data['home_number'] = self.data['home_number'].loc[self.data['home_number'].notnull()].apply(self.sanitize_landline_numbers)\n self.data['fax_number'] = self.data['fax_number'].loc[self.data['fax_number'].notnull()].apply(self.sanitize_landline_numbers)\n self.data['mobile_number'] = self.data['mobile_number'].loc[self.data['mobile_number'].notnull()].apply(self.sanitize_mobile_numbers)\n self.data['notes'] = self.data['notes'].loc[self.data['notes'].notnull()].apply(self.sanitize_notes)\n\n # Convert nan to None\n self.data = self.data.where(pd.notnull(self.data), None)\n \n print(\"Data Sanitization Successful\")\n return True", "def clean_csdata(self) -> None:", "def _filter_countries(data, map=False):\n #headers = data[0].keys()\n headers = ['name', 'region', 'adminregion', 'iso2Code', 'capitalCity', u'longitude',\n 'latitude', 'incomeLevel', 'id', 'lendingType']\n results = [headers]\n for row in data:\n if not _is_country(row['iso2Code']):\n #TODO: remove them from the cache\n continue\n if map:\n results.append([_country_conversion_map(i, row[i]) for i in headers])\n else:\n results.append([row[i] for i in headers])\n return results", "def verify_country(rec, orig):\n pass", "def clean_data(df):\n # Missing observation types for some obstructions; add it in if an\n # obstruction_type was filled in\n df.loc[(df.observ_type.isnull()) & df.obstruction_type.notnull(),\n 'observ_type'] = 'OBSTRUCTION'\n # \"Failing_shim\" only filled in if \"N\" -- fill in the NaNs\n df.loc[df.failing_shim.isnull(), 'failing_shim'] = \"N\"\n # Missing observation types for some heigh differences; add it in if\n # a height difference was filled in\n df.loc[(df.observ_type.isnull()) & (df.level_difference_type.notnull()),\n 'observ_type'] = 'HEIGHTDIFF'\n # Missing observation types for some surface conditions; add it in if a\n # surface condition was filled in\n df.loc[(df.observ_type.isnull()) & (df.surface_condition.notnull()),\n 'observ_type'] = 'SURFCOND'\n # Missing observation types for some other features; add them in if\n # an other feature was filled in\n df.loc[(df.observ_type.isnull()) & (df.other_feature.notnull()),\n 'observ_type'] = 'OTHER'\n # Missing observations for some cross slopes; add them in if the\n # cross slope is not null and no other values are present for other\n # surface conditions\n df.loc[(df.observ_type.isnull()) & (df.isolated_cross_slope.notnull()) &\n (df.surface_condition.isnull()) & (df.height_difference.isnull()),\n 'observ_type'] = 'XSLOPE'\n\n return df", "def clean_data(self):\r\n self.all_data.drop(len(self.all_data) - 1, inplace = True)", "def remove_not_right_country(self, df):\n # Use the package pycountry to get the language from the country code\n if len(self.country) == 2:\n if self.country == 'uk':\n country = pycountry.countries.get(alpha_2='GB'.upper())\n else:\n country = pycountry.countries.get(alpha_2=self.country.upper())\n elif len(self.country) == 3:\n country = pycountry.countries.get(alpha_3=self.country.upper())\n elif len(self.country) == 4:\n country = pycountry.countries.get(alpha_4=self.country.upper())\n else:\n raise\n return df[df['socio1. In which country do you work?'] == country.name]", "def cleandata(df):\r\n df = clean_column_names(df)\r\n print(\"Columns headers cleaned\")\r\n df_dup = drop_duplicate(df, keep='first')\r\n print(\"Dropped duplicate rows\")\r\n df = remove_outlier_IQR(df_dup)\r\n print(\"Outliers removed\")\r\n df = impute_missing_value(df)\r\n print(\"Missing Values imputed\")\r\n return df", "def add_clean_cats(df):\n df['cats'] = df['categories'].apply(clean_cats)", "def prepare_dataset_cities():\n\n df = pd.read_csv(\"worldcitiespop.csv\")\n df = df.drop_duplicates()\n df = df.drop_duplicates(['Country','City','AccentCity','Region'])\n print(\"saving dataset cities\")\n df.to_csv(\"../prepared_datasets/cities.csv\")", "def clean_data(path):\n\n\tfeat_names = [\n\t\t\t\t\"age\",\n\t\t\t\t\"bp\",\n\t\t\t\t\"sg\",\n\t\t\t\t\"al\",\n\t\t\t\t\"su\", # normal->1, abnormal->0\n\t\t\t\t\"rbc\", # normal->1, abnormal->0\n\t\t\t\t\"pc\", # present->1, notpresent->0\n\t\t\t\t\"pcc\", # present->1, notpresent->0\n\t\t\t\t\"ba\",\n\t\t\t\t\"bgr\",\n\t\t\t\t\"bu\",\n\t\t\t\t\"sc\",\n\t\t\t\t\"sod\",\n\t\t\t\t\"pot\",\n\t\t\t\t\"hemo\",\n\t\t\t\t\"pcv\",\n\t\t\t\t\"wbcc\",\n\t\t\t\t\"rbcc\",\n\t\t\t\t\"htn\", #yes->1, no->0\n\t\t\t\t\"dm\", #yes->, no->0\n\t\t\t\t\"cad\", # yes->1, no->0\n\t\t\t\t\"appet\", # good->1, poor->0\n\t\t\t\t\"pe\", # yes->1, no->0\n\t\t\t\t\"ane\", # yes->1, no->0\n\t\t\t\t\"class\", # cdk->1, notcdk->0\t\t\n\t]\n\t\n\tfile_data = pd.read_csv(\n\t\t\t\t\tpath, \n\t\t\t\t\tskiprows = range(0, 29), \n\t\t\t\t\tsep = ',',\n\t\t\t\t\theader = None,\n\t\t\t\t\tna_values=['?','\\t?', '\\t'],\n\t\t\t\t\tnames=feat_names#,\n\t)\n\tpd.set_option('display.max_rows', None)\n\n\tfile_data.replace({\n\t\t\t\t\t\t\"rbc\":{\n\t\t\t\t\t\t\t\"normal\":1,\n\t\t\t\t\t\t\t\"abnormal\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"pc\":{\n\t\t\t\t\t\t\t\"normal\":1,\n\t\t\t\t\t\t\t\"abnormal\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"pcc\":{\n\t\t\t\t\t\t\t\"present\":1,\n\t\t\t\t\t\t\t\"notpresent\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ba\":{\n\t\t\t\t\t\t\t\"present\":1,\n\t\t\t\t\t\t\t\"notpresent\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"htn\":{\n\t\t\t\t\t\t\t\"yes\":1,\n\t\t\t\t\t\t\t\"no\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"dm\":{\n\t\t\t\t\t\t\t\"yes\":1,\n\t\t\t\t\t\t\t\" yes\":1,\n\t\t\t\t\t\t\t\"no\": 0,\n\t\t\t\t\t\t\t\"\\tyes\":1,\n\t\t\t\t\t\t\t\"\\tno\":0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"cad\":{\n\t\t\t\t\t\t\t\"yes\":1,\n\t\t\t\t\t\t\t\"no\": 0,\n\t\t\t\t\t\t\t\"\\tno\":0\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"appet\":{\n\t\t\t\t\t\t\t\"good\":1,\n\t\t\t\t\t\t\t\"poor\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"pe\":{\n\t\t\t\t\t\t\t\"yes\":1,\n\t\t\t\t\t\t\t\"no\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ane\":{\n\t\t\t\t\t\t\t\"yes\":1,\n\t\t\t\t\t\t\t\"no\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"class\":{\n\t\t\t\t\t\t\t\"ckd\\t\":1,\n\t\t\t\t\t\t\t\"ckd\":1,\n\t\t\t\t\t\t\t\"notckd\":0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"sg\":{\n\t\t\t\t\t\t\t1005:1.005,\n\t\t\t\t\t\t\t1015:1.015,\n\t\t\t\t\t\t\t1025:1.025,\n\t\t\t\t\t\t}\n\t},\n\tinplace = True)\n\tdata = file_data.values\n\n\treturn data", "def _clean_data(data, icd9_descript_dict, no_onset_age=True):\n x_raw, y_raw = [], []\n\n for idx, line in enumerate(data):\n line = line.split()\n\n try:\n features = []\n features.append('age_' + line[RAW_AGE_COL])\n features.append('gender_' + line[RAW_GENDER_COL])\n\n icd9s = [i.split(':') for i in line[RAW_FIRST_ICD9_COL:]]\n # filter invalid icd9s and sort by onset age in place\n icd9s = [i for i in icd9s if i[0] in icd9_descript_dict]\n icd9s.sort(key=lambda i: int(i[1]))\n\n if no_onset_age:\n icd9s = [i[0] for i in icd9s] # remove onset age\n else:\n icd9s = [':'.join(i) for i in icd9s]\n features.extend(icd9s)\n\n x_raw.append(features)\n y_raw.append(line[RAW_CLASS_COL]) # extract class\n except:\n print('WARNING: error on line #{} with case:'.format(idx))\n print(' '.join(line))\n raise\n\n assert len(x_raw) == len(y_raw)\n\n return x_raw, y_raw", "def clean(data):\n data = data.drop_duplicates()\n # PREPROCESSING\n\n data['CustomerID'] = data['CustomerID'].astype('str')\n data = data[data.CustomerID != \"nan\"]\n \n data['StockCode'] = data['StockCode'].astype('str')\n data['StockCode'] = data['StockCode'].fillna('')\n ## Spaces and special characters are not considered alphabetical\n data['StockCode'] = data['StockCode'].str.replace(' ', '')\n \n ## Delete rows containing StockCode with only alphabetical letters\n data = data[~data.StockCode.str.isalpha()]\n \n data = data[data.StockCode != \"23843\"]\n \n data = data[data.UnitPrice > 0]\n\n return data", "def clean_data(df):\n # Resolve categories and expand them to actual columns.\n categories_df = _resolve_categories(df['categories'])\n df = df.drop(columns=['categories'])\n df = pd.concat([df, categories_df], axis=1)\n\n # drop duplicates\n df = _drop_duplicates(df)\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Places a token on the board at some given coordinates. 0, 0 is the topleft. `player` is either 'X' or 'O'
def place_token(self, x, y, player): self.rows[y][x] = player
[ "def place_token(self, x, y, token):\n if x == 0:\n x_letter = 'a'\n elif x == 1:\n x_letter = 'b'\n else:\n x_letter = 'c'\n y_letter = str(y + 1)\n key = x_letter + y_letter\n self.pos_to_token[key] = token", "def place_token(self, x, y, token):\n (self._rows[y])[x] = token", "def placeSymbol(self,posY, posX, player):\n \"\"\"posX and posY are referenced to the board as carthesian values\"\"\"\n print((posY,posX))\n if posY > 2 or posX > 2 or posX < 0 or posY < 0:\n raise Exception.TTTException(4)\n if type(posX) is not int or type(posY) is not int:\n raise Exception.TTTException(5)\n if self.board[posY][posX] is ' ' and self.play == 1 and self.stack.canInsert(player) is True:\n self.stack.pop()\n li = list(self.board[posY])\n li[posX] = player\n self.board[posY] = li\n self.stack.push(player)\n self.win(posY, posX, player)\n self.printBoard()\n print(self.stack.getStack())\n elif self.play == 0:\n raise Exception.TTTException(2)\n elif self.board[posY][posX] is not ' ':\n raise Exception.TTTException(1)\n else:\n raise Exception.TTTException(3)", "def get_player_move(board, player_token):\n\n # Make use of the raw_input to ask the user a question. Make sure only\n # valid inputs work (use is_space_free function). The question should be\n # asked until the player gives a correct place for their token (a while\n # loop can help do that).", "def spawn(self, tile_y, tile_x):\n self.tile_y = tile_y #y position on grid\n self.tile_x = tile_x #x position on grid\n self.level.frame[self.tile_y][self.tile_x] == 'O' #token for presence of spawned sprite\n self.y = self.tile_y * TILESIZE #y position on screen\n self.x = self.tile_x * TILESIZE #x position on screen", "def mark(board, player, row, col):\n \n board[row][col] = player\n grid_rowA = board[0]\n grid_rowB = board[1]\n grid_rowC = board[2]\n print_board(grid_rowA, grid_rowB, grid_rowC)\n\n return board, grid_rowA, grid_rowB, grid_rowC", "def display_token(game):\n display = game.get_board().get_display()\n for player in game.get_players():\n token = player.getTokenName()\n position = player.getPosition()\n draw_token(display, token, position)", "def makeMove(self, pos, player):\n self.board[pos] = player", "def play_move(token, current_board):\n row_index = current_board.retrieve_next_empty_row_index(token.column)\n current_board.place_token(row_index, token.column, token.color)\n print('\\npost move in play move', current_board)\n return current_board", "def place_piece(board, x, y, player):\n can_place = isfree(board, x, y)\n if can_place:\n board[(x,y)] = player\n return can_place", "def _insert(self, player, row, column):\n self._rows[row][column] = player", "def place_player(self, gridpos=(0,0)):\n x,y = gridpos\n if x < 0 or x > self.gridsize-1 or y < 0 or y > self.gridsize-1:\n # Restrict movement to within the grid\n return\n tile = self.grid[x][y]\n if tile:\n if type(tile) == Wall:\n # Don't move if the square is a wall\n return\n elif type(tile) == Teleporter:\n State.teleport = tile.destination\n return\n elif type(tile) == Key and tile.visible:\n tile.pick_up()\n elif type(tile) == Door and tile.locked:\n # Door is locked, don't move\n return\n old_x,old_y = State.player.gridpos\n State.player.gridpos = gridpos\n State.player.pos = self.calc_pos(gridpos)\n self.grid[old_x][old_y] = None\n self.grid[x][y] = State.player", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = player\n return player if self.is_win(player) else 0", "def mark_square(self, column, row, player):\n\n player.make_move(self.board, row, column)", "def set_player_position(self, position):", "def place_player(self, player):\n # -- get previous position of the item\n pos = np.argwhere(self.matrix == player.item_value)\n if len(pos) != 0:\n # -- delete the item\n pos = pos[0]\n self.matrix[pos[0], pos[1]] = MapItemList.EMPTY\n\n # -- place the item\n self.place(player.box_x, player.box_y, player.item_value)", "def play_game(self, player):\n pos = input(f\"Player {player}, please enter your desired position: \").strip()\n while not self.is_valid_input(pos):\n pos = input(\"Please enter a valid position: \").strip()\n self.board[int(pos)] = player\n self.print_board()", "def _mark_board_for_user(self, user_command):\n # Make sure the user has entered valid coordinates for her mark\n # and if so, mark the board for the user\n user_command_parts = user_command.split(',')\n if len(user_command_parts) == 2:\n row = int(user_command_parts[0].strip()) - 1\n col = int(user_command_parts[1].strip()) - 1\n valid_row_range = xrange(self.board.row_count)\n valid_col_range = xrange(self.board.column_count)\n if row in valid_row_range and col in valid_col_range:\n # Make sure a mark does not already exist at the coordinates \n if self.board.matrix[row][col] == self.board.CELL_NO_PLAYER:\n # Mark the board at the coordinate for the player\n self.board.matrix[row][col] = self.user_player_id\n # End turn and allow the computer player to take a turn\n self.current_player_id = self.computer_player_id", "def place_token(self, token, verbose=False):\n chosen_place_name = self.mind.choose_place_name_to_put_token()\n for place_card in self.game.board:\n if place_card.name == chosen_place_name:\n token.place = place_card\n if verbose:\n logger.info('{} puts the {} token on {}'.format(self.name,\n token.name,\n place_card.name))\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Places a token on the board at some given coordinates. 0, 0 is the topleft. `player` is either 'X' or 'O'
def place_token(self, x, y, token): if x == 0: x_letter = 'a' elif x == 1: x_letter = 'b' else: x_letter = 'c' y_letter = str(y + 1) key = x_letter + y_letter self.pos_to_token[key] = token
[ "def place_token(self, x, y, player):\n self.rows[y][x] = player", "def place_token(self, x, y, token):\n (self._rows[y])[x] = token", "def placeSymbol(self,posY, posX, player):\n \"\"\"posX and posY are referenced to the board as carthesian values\"\"\"\n print((posY,posX))\n if posY > 2 or posX > 2 or posX < 0 or posY < 0:\n raise Exception.TTTException(4)\n if type(posX) is not int or type(posY) is not int:\n raise Exception.TTTException(5)\n if self.board[posY][posX] is ' ' and self.play == 1 and self.stack.canInsert(player) is True:\n self.stack.pop()\n li = list(self.board[posY])\n li[posX] = player\n self.board[posY] = li\n self.stack.push(player)\n self.win(posY, posX, player)\n self.printBoard()\n print(self.stack.getStack())\n elif self.play == 0:\n raise Exception.TTTException(2)\n elif self.board[posY][posX] is not ' ':\n raise Exception.TTTException(1)\n else:\n raise Exception.TTTException(3)", "def get_player_move(board, player_token):\n\n # Make use of the raw_input to ask the user a question. Make sure only\n # valid inputs work (use is_space_free function). The question should be\n # asked until the player gives a correct place for their token (a while\n # loop can help do that).", "def spawn(self, tile_y, tile_x):\n self.tile_y = tile_y #y position on grid\n self.tile_x = tile_x #x position on grid\n self.level.frame[self.tile_y][self.tile_x] == 'O' #token for presence of spawned sprite\n self.y = self.tile_y * TILESIZE #y position on screen\n self.x = self.tile_x * TILESIZE #x position on screen", "def mark(board, player, row, col):\n \n board[row][col] = player\n grid_rowA = board[0]\n grid_rowB = board[1]\n grid_rowC = board[2]\n print_board(grid_rowA, grid_rowB, grid_rowC)\n\n return board, grid_rowA, grid_rowB, grid_rowC", "def display_token(game):\n display = game.get_board().get_display()\n for player in game.get_players():\n token = player.getTokenName()\n position = player.getPosition()\n draw_token(display, token, position)", "def makeMove(self, pos, player):\n self.board[pos] = player", "def play_move(token, current_board):\n row_index = current_board.retrieve_next_empty_row_index(token.column)\n current_board.place_token(row_index, token.column, token.color)\n print('\\npost move in play move', current_board)\n return current_board", "def place_piece(board, x, y, player):\n can_place = isfree(board, x, y)\n if can_place:\n board[(x,y)] = player\n return can_place", "def _insert(self, player, row, column):\n self._rows[row][column] = player", "def place_player(self, gridpos=(0,0)):\n x,y = gridpos\n if x < 0 or x > self.gridsize-1 or y < 0 or y > self.gridsize-1:\n # Restrict movement to within the grid\n return\n tile = self.grid[x][y]\n if tile:\n if type(tile) == Wall:\n # Don't move if the square is a wall\n return\n elif type(tile) == Teleporter:\n State.teleport = tile.destination\n return\n elif type(tile) == Key and tile.visible:\n tile.pick_up()\n elif type(tile) == Door and tile.locked:\n # Door is locked, don't move\n return\n old_x,old_y = State.player.gridpos\n State.player.gridpos = gridpos\n State.player.pos = self.calc_pos(gridpos)\n self.grid[old_x][old_y] = None\n self.grid[x][y] = State.player", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = player\n return player if self.is_win(player) else 0", "def mark_square(self, column, row, player):\n\n player.make_move(self.board, row, column)", "def set_player_position(self, position):", "def place_player(self, player):\n # -- get previous position of the item\n pos = np.argwhere(self.matrix == player.item_value)\n if len(pos) != 0:\n # -- delete the item\n pos = pos[0]\n self.matrix[pos[0], pos[1]] = MapItemList.EMPTY\n\n # -- place the item\n self.place(player.box_x, player.box_y, player.item_value)", "def play_game(self, player):\n pos = input(f\"Player {player}, please enter your desired position: \").strip()\n while not self.is_valid_input(pos):\n pos = input(\"Please enter a valid position: \").strip()\n self.board[int(pos)] = player\n self.print_board()", "def _mark_board_for_user(self, user_command):\n # Make sure the user has entered valid coordinates for her mark\n # and if so, mark the board for the user\n user_command_parts = user_command.split(',')\n if len(user_command_parts) == 2:\n row = int(user_command_parts[0].strip()) - 1\n col = int(user_command_parts[1].strip()) - 1\n valid_row_range = xrange(self.board.row_count)\n valid_col_range = xrange(self.board.column_count)\n if row in valid_row_range and col in valid_col_range:\n # Make sure a mark does not already exist at the coordinates \n if self.board.matrix[row][col] == self.board.CELL_NO_PLAYER:\n # Mark the board at the coordinate for the player\n self.board.matrix[row][col] = self.user_player_id\n # End turn and allow the computer player to take a turn\n self.current_player_id = self.computer_player_id", "def place_token(self, token, verbose=False):\n chosen_place_name = self.mind.choose_place_name_to_put_token()\n for place_card in self.game.board:\n if place_card.name == chosen_place_name:\n token.place = place_card\n if verbose:\n logger.info('{} puts the {} token on {}'.format(self.name,\n token.name,\n place_card.name))\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request access to public server, returning port and IP address.
def _server(): url = 'https://104.131.128.139/tcp' headers = {'X-Auth-Key': 'abc', 'X-Auth-Secret': 'abc'} try: return requests.get(url, headers=headers, verify=False).json() except requests.exceptions.ConnectionError: logging.error('server is unreachable') sys.exit(1)
[ "def page1(self):\n result = request101.GET('/whatIsMyIPAddress')\n return result", "def page66(self):\n self.token__ = \\\n timestr()\n result = request6601.GET('/whatIsMyIPAddress' +\n '?_=' +\n self.token__)\n return result", "def check_access(self, uid, host, port):\n pass", "def port(self):\n\n return self.server_address[1]", "def mondrian_server_internal_url():\n return 'http://127.0.0.1:8080'", "def get_ip_publica(self):\n \n self.ip_origen = urllib.request.urlopen('http://ip.42.pl/raw').read().decode('utf-8')", "def connect_public():\n return connect(\"public\")", "def get_server_access(self, server_name):\n server = self.servers.get(server_name)\n ui.debug(\"access for\", server_name, \":\", server)\n if not server:\n return None\n return server.access", "def get_addr(self):\n return self._ip + ':' + str(self._port)", "def show_ip(): #TODO\n pass", "def server_host(self):\n return self.server['host']", "def unit_public_ip(self):\n return self.environment.unit_get('public-address')", "def getPublicIp():\n try:\n data = str(urlopen('http://checkip.dyndns.com/').read())\n #data = '<html><head><title>Current IP Check</title></head><body>Current IP Address: 65.96.168.198</body></html>\\r\\n'\n externalip = re.compile(r'Address: (\\d+\\.\\d+\\.\\d+\\.\\d+)').search(data).group(1)\n\n except:\n externalip = None\n raise\n finally:\n return externalip", "def get_server_addr(self):\n raise NotImplementedError", "def get_ip():\n try:\n r = requests.get('https://api.ipify.org').text\n return r\n except ConnectionError:\n return 'No Connection'", "def __init__(self, ip, port):\n self.ip = ip\n self.port = port\n \n # TCP/IP connection\n self.connect()", "def get_server_url(): # type: () -> str\n\n server = '127.0.0.1'\n if 'DOCKER_HOST' in os.environ:\n server = urlparse.urlparse(os.environ['DOCKER_HOST']).hostname\n return 'http://{}:{}/'.format(server, PORT)", "def get_public_ip(self):\n try:\n self.meta_data = IcsMeta.get_meta_data()\n return self.meta_data[\"public-ipv4\"]\n except KeyError:\n return None", "def viewip(self) :\n\t\ttry :\n\t\t\treturn self._viewip\n\t\texcept Exception as e:\n\t\t\traise e" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Example for fetching each profile that is linked to a Bungie.net account, then finding the data that you last played on each profile.
async def main(): myAccount = await client.get_user("MembershipID") # 4611686018464828555 for example for profile in myAccount.destinyMemberships: print("{0}: {1}: {2}".format(profile.displayName, conversionDict[profile.membershipType], profile.membershipId)) myProfile = await client.get_profile(profile.membershipId, profile.membershipType, components=["Profiles"]) print(myProfile.profile.data.dateLastPlayed) client.close()
[ "def get_profiles(remote):\n\n profiles = remote.find_profile({\"name\": \"edu-*-frontend\"})\n my_profiles = []\n \n for profile in profiles:\n my_profiles.append(profile[:(len(profile)-9)])\n\n return my_profiles", "def fetch_qarnot_profiles():\n global script_dialog\n\n # disable submit button\n script_dialog.SetEnabled(\"SubmitButton\", False)\n # display loading message\n script_dialog.SetItems(\"QarnotProfileCombo\", [\"Loading profiles...\"])\n profile_list = script_dialog.findChild(\n ThinkboxUI.Controls.Scripting.ComboControl.ComboControl,\n \"QarnotProfileCombo\",\n )\n # set italic font\n profile_list_font = profile_list.font()\n profile_list_font.setItalic(True)\n profile_list.setFont(profile_list_font)\n\n # fetch profiles\n q_render_deadline.refresh_connection()\n qarnot_profiles = q_render_deadline.get_available_profiles()\n\n return qarnot_profiles", "def list_profiles(profilejson):\n listofprofiles = ''\n for nextprofile in profilejson['items']:\n if listofprofiles == '' :\n listofprofiles = nextprofile['name']\n else:\n listofprofiles = listofprofiles + \"; \" + nextprofile['name']\n\n return(listofprofiles)", "def fetch_all_user_profiles():\n users_queryset = User.objects.all()\n profiles_queryset = []\n for user in users_queryset:\n profiles_queryset.append({\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'last_login': user.last_login,\n 'date_joined': user.date_joined\n })\n return profiles_queryset", "def search_profile(cls, param):\n\n for profile in cls.profile_list:\n while (profile.profile_name == param) or (profile.profile_username == param) or (profile.profile_email == param):\n return profile", "def listProfiles(args=None):\n com = comar.Link() #communicating with comar deamon\n com.localize() #set language for translated replies\n links = queryLinks(com)\n profiles = queryProfiles(com)\n \n profiles.sort(key=lambda x: x.devname + x.name) #profiles are sorted by device_name + name\n \n name_title = \"\" # _(\"Profile\")\n state_title = \"\" # _(\"Status\")\n addr_title = \"\" # _(\"Address\")\n \n #name_size and state_size are set to the maximum length of name/state of profiles\n # -for ljust operations in output format-\n name_size = max(max(map(lambda x: len(x.name), profiles)), len(name_title))\n state_size = max(max(map(lambda x: len(x.get_state()), profiles)), len(state_title))\n \n cstart = \"\"\n cend = \"\"\n link_list = links.items()\n link_list.sort(key=lambda x: x[1].name)\n profile_names_list=[]\n for script, link in link_list:\n link_profiles = filter(lambda x: x.script == script, profiles)\n if len(link_profiles) > 0:\n print \"%s:\" % link.name\n for profile in link_profiles:\n line = \" %s%s%s | %s%s%s | %s%s%s\" % (\n cstart,\n profile.name.ljust(name_size),\n cend, cstart,\n profile.get_state().center(state_size),\n cend, cstart,\n profile.get_address(),\n cend\n )\n print line\n profile_names_list.append(profile.name) \n return profile_names_list # returns all profile_names defined on comp.", "def get_profiles(self, company, title, num_profiles):\n\n # Checks DB first and then scrapes from linkedin\n logger.info(\"Looking for profiles that's worked at {} as {}\".format(company, title))\n profiles = list(set(Profile.objects.filter(experience__company__icontains=company, \n experience__title__icontains=title)))\n if not profiles:\n exclude_usernames = [p.username for p in profiles]\n logger.info(\"Found {} existing profiles in database, looking for {} more.\"\n .format(len(profiles), max(0, num_profiles - len(profiles))))\n\n total_num_profiles_collected = len(profiles)\n self.scraper.get_profiles(company, title, \n (num_profiles - total_num_profiles_collected), exclude_usernames)\n\n profiles = list(set(Profile.objects.filter(experience__company__icontains=company, \n experience__title__icontains=title)))\n logger.info(\"Found a total of {} profiles\".format(len(profiles)))\n return profiles", "def get_profiles(profileurl):\n return (get_f5json(localhost_to_hostname(profileurl)))", "def profiles():\n profs = UserProfile.query.order_by(UserProfile.lastname).all()\n return render_template('profiles.html', users=profs)", "def cli(ctx, **kwds):\n profile_names = profiles.list_profiles(ctx, **kwds)\n print(profile_names)", "def profile_list():\n conf = api.Config()\n\n for profile in conf.profile_sections():\n data = conf._profile_general(profile)\n\n try:\n _print_profile(profile, data)\n except KeyError:\n print(\n log.format(\n f\"Invalid or incomplete profile '{profile}'\",\n color=\"red\",\n bold=False,\n )\n )", "async def com_fetch_unclaimed_profiles(ctx):\n\n await ctx.channel.trigger_typing()\n\n ph.fetch_unclaimed_profiles()\n\n if len(ph.unclaimed_profiles) == 0:\n await ctx.send(\n \"There are no unclaimed profiles :critfail:\"\n )\n\n else:\n await ctx.send(\n \"The current unclaimed profiles are:\\n\" + \"```\" +\n '\\n'.join(ph.unclaimed_profiles_list) + \"```\"\n )", "def get_friends_profiles(self):\n friends = Friend.objects.friends(self)\n profiles = [get_object_or_404(UserProfile, username=friend.username) for friend in\n friends]\n return profiles", "def getProfile(self, uid):\n\n url = base_url + uid + \"?fields=id, name, gender, relationship_status, \\\npicture.height(200).width(200), link, statuses.limit(1).fields(message,updated_time) \\\n&access_token=\" + self.accessToken\n try:\n data = urllib.urlopen(url).read()\n except URLError:\n try:\n data = urllib.urlopen(url).read()\n except URLError:\n return None \n return data", "def get_profile_name_image_and_bio(self):\n print() \n print('===== fetching your data =====') \n \n bot = self.bot\n bot.get(f\"https://twitter.com/{self.username}/photo\") \n time.sleep(2)\n \n # fetching profile image\n img = bot.find_element_by_xpath('//*[@id=\"layers\"]/div[2]/div/div/div/div/div/div[2]/div[2]/div[1]/div/div/div/div/div/img')\n src = img.get_attribute('src')\n # download the image\n urllib.request.urlretrieve(src, \"user_photo.png\")\n img_hash = str(imagehash.average_hash(Image.open('user_photo.png')))\n \n bot.get(f\"https://twitter.com/{self.username}\") \n \n time.sleep(2)\n # fetching profile name\n profile_name = bot.find_element_by_xpath(\"/html/body/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[2]/div/div/div[1]\") \n print(f\"your profile name: {profile_name.text}\")\n \n # fetching profile bio\n profile_bio = bot.find_element_by_xpath(\"/html/body/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[3]/div/div\")\n # couldn't remove instances where \\n (newline) exists\n # profile_bio = profile_bio.text.replace(r'\\n', '')\n profile_bio = profile_bio.text\n print(f\"your profile bio: {profile_bio}\")\n \n # saving to db for future references\n me = Me(username=self.username, bio=profile_bio, image_hash=img_hash,\n profile_name=profile_name.text)\n session.add(me)\n session.commit()\n \n print()\n print('fetched your profile data and saved into db')\n time.sleep(2)", "def iterate_profiles(browser, low, high):\n logging.info(\"iterating profiles from %d to %d...\", low, high)\n for i in range(low, high):\n #widgets = [progressbar.Percentage(), progressbar.Bar()]\n #bar = progressbar.ProgressBar(widgets=widgets, min_value=low, max_value=high).start()\n scheme = \"https:\"\n path = \"//mytrinnet.trincoll.edu/s/1490/index-3Col.aspx\"\n query = \"?sid=1490&gid=1&pgid=275&cid=735&mid=\" + str(i)\n fragment = \"#/PersonalProfile\"\n url = scheme + path + query + fragment\n browser.visit(url)\n time.sleep(0.8)\n if not is_valid_person(high - low, parse_HTML_to_person(i,\n browser.html)):\n logging.critical(\"breaking at %d\", i)\n return i + 1\n log_data()\n return high", "def get_next_contacts(self, current):\n \n offset = 0\n new_contacts = 0\n current_contacts = []\n num_contacts_in_last_query = Crawler.CONTACTS_PER_PROFILE\n while num_contacts_in_last_query == Crawler.CONTACTS_PER_PROFILE:\n # HTTP request and update cookies for next calls\n print \"\\tGetting contacts list: %s\" % Crawler.PROFILE_CONTACTS.format(id=current[\"id\"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset)\n contact_contacts_info = requests.get(Crawler.PROFILE_CONTACTS.format(id=current[\"id\"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset), cookies=self.cookies)\n self.cookies = contact_contacts_info.cookies\n\n # Update offset\n offset += Crawler.CONTACTS_PER_PROFILE\n \n print \"\\tParsing data\"\n json_content = json.loads(contact_contacts_info.text.replace(\"\\\\\\\"\", \"\")) # Quick trick to avoid problems with &quot;\n try:\n possible_new_contacts = json_content[\"content\"][\"connections\"][\"connections\"]\n except KeyError, e:\n print \"\\tERROR > JSON file: no such content.connections.connections\"\n #print \"\\tERROR > %s\" % contact_contacts_info.text.encode('utf-8')\n break\n except ValueError, e:\n print \"\\tERROR > JSON file: no such content.connections.connections\"\n #print \"\\tERROR > %s\" % contact_contacts_info.text.encode('utf-8')\n break\n \n num_contacts_in_last_query = len(possible_new_contacts)\n\n for sub_contact in possible_new_contacts:\n # Get data from relevant fields\n # On failure: continue to next contact\n try:\n headline = unicode(sub_contact[\"headline\"]) # JSON can output: integers, None, strings, doubles..\n memberID = int(sub_contact[\"memberID\"])\n distance = int(sub_contact[\"distance\"])\n full_name = unicode(sub_contact[\"fmt__full_name\"])\n except KeyError, e:\n print \"\\tERROR > JSON file: contact details - %s\" % e\n #print \"\\tERROR > %s\" % sub_contact.encode('utf-8')\n continue\n except ValueError, e:\n print \"\\tERROR > JSON file: contact details - %s\" % e\n #print \"\\tERROR > %s\" % sub_contact.encode('utf-8')\n continue\n except TypeError, e:\n print \"\\tERROR > JSON file: contact details - %s\" % e\n #print \"\\tERROR > %s\" % sub_contact.encode('utf-8')\n continue\n\n # Try to add the contact to the list to be tested\n if self.add_to_be_tested({\"id\": memberID, \"details\": \"%s [%s][distance=%d]\" % (full_name, headline.lower(), distance), \"fullname\": full_name, \"headline\": headline, \"depth\": current[\"depth\"] +1}):\n new_contacts += 1\n return new_contacts", "def extract_profile(self):\n\n log.info(\"Extract profile from account page\")\n\n if not hasattr(\"self\", \"soup_profile\"):\n self.get_account_soup()\n soup = self.soup_profile\n\n self.account[\"profile\"][\"first_name\"] = self.from_soup_get_profile_first_name(soup)\n self.account[\"profile\"][\"last_name\"] = self.from_soup_get_profile_last_name(soup)\n self.account[\"profile\"][\"user_name\"] = self.from_soup_get_profile_user_name(soup)\n self.account[\"profile\"][\"date_of_birth\"] = self.from_soup_get_profile_date_of_birth(soup)\n self.account[\"profile\"][\"gender\"] = self.from_soup_get_profile_gender(soup)\n self.account[\"profile\"][\"phone\"] = self.from_soup_get_profile_phone_number(soup)\n self.account[\"profile\"][\"email\"] = self.from_soup_get_profile_email(soup)\n self.account[\"profile\"][\"member_since\"] = self.from_soup_get_profile_member_since(soup)\n self.account[\"profile\"][\"bike_angel_since\"] = self.from_soup_get_profile_bike_angel_since(soup)\n\n self.account[\"trips\"][\"lifetime\"] = self.from_soup_get_lifetime_stats(soup)\n\n self.account[\"my_statistics\"][\"number_of_trips\"] = self.from_soup_get_lifetime_stats_number_of_trips(soup)\n self.account[\"my_statistics\"][\"total_usage_time\"] = self.from_soup_get_lifetime_stats_total_usage_time(soup)\n self.account[\"my_statistics\"][\"distance_traveled\"] = self.from_soup_get_lifetime_stats_distance_traveled(soup)\n self.account[\"my_statistics\"][\"gas_saved\"] = self.from_soup_get_lifetime_stats_gas_saved(soup)\n self.account[\"my_statistics\"][\"co2_reduced\"] = self.from_soup_get_lifetime_stats_co2_reduced(soup)\n\n self.account[\"last_trip\"][\"date\"] = self.from_soup_get_last_trip_dates(soup)\n self.account[\"last_trip\"][\"station\"] = self.from_soup_get_last_trip_stations(soup)\n self.account[\"last_trip\"][\"trip_time\"] = self.from_soup_get_last_trip_time(soup)\n\n self.account[\"bike_key\"][\"number\"] = self.from_soup_get_bike_key_number(soup)\n self.account[\"bike_key\"][\"status\"] = self.from_soup_get_bike_key_status(soup)\n\n self.account[\"membership_status\"][\"current\"][\"type\"] = self.from_soup_get_membership_current_type(soup)\n self.account[\"membership_status\"][\"current\"][\"status\"] = self.from_soup_get_membership_current_status(soup)\n self.account[\"membership_status\"][\"current\"][\"expiration\"] = self.from_soup_get_membership_current_expiration(\n soup\n )\n\n self.account[\"membership_status\"][\"next\"][\"type\"] = self.from_soup_get_membership_next_type(soup)\n self.account[\"membership_status\"][\"next\"][\"status\"] = self.from_soup_get_membership_next_status(soup)\n self.account[\"membership_status\"][\"next\"][\"start\"] = self.from_soup_get_membership_next_start(soup)\n self.account[\"membership_status\"][\"next\"][\"expiration\"] = self.from_soup_get_membership_next_expiration(soup)\n\n self.account[\"billing_summary\"][\"next_billing_date\"] = self.from_soup_get_billing_summary_next_billing_date(\n soup\n )\n self.account[\"billing_summary\"][\"current_balance\"] = self.from_soup_get_billing_summary_current_balance(soup)\n\n self.account[\"billing_information\"][\"postal_code\"] = self.from_soup_get_billing_info_postal_code(soup)\n\n if self.ba:\n # these should work because try/except but we'll be safe\n log.info(\"Extracting bikeangels from profile\")\n self.account[\"my_statistics\"][\"bike_angels_current\"] = self.from_soup_get_ba_points_current(soup)\n self.account[\"my_statistics\"][\"bike_angels_annual\"] = self.from_soup_get_ba_points_annual(soup)\n self.account[\"my_statistics\"][\"bike_angels_lifetime\"] = self.from_soup_get_ba_points_lifetime(soup)\n\n self.account[\"last_trip\"][\"bike_angels_points\"] = self.from_soup_get_last_trip_bike_angels_points(soup)\n\n log.debug(self.account)\n return self.account", "def get_profiles(config):\n\n basepath = os.path.dirname(__file__)\n profiles_path = os.path.join(basepath,config['input_path'],'scrape_profiles.csv')\n log_output = os.path.join(basepath,config['log_path'],'outstanding_profiles.csv')\n output_dir = os.path.join(basepath,config['output_path'],'profiles')\n profiles_df = pd.read_csv(profiles_path)\n freq = 200\n max_rows = profiles_df.shape[0] + 1\n\n for start_row in range(0, max_rows, freq):\n end_row = min(start_row + freq, max_rows)\n\n output_path = output_dir + '/profiles_{time}_{s_row}_{e_row}.csv'\\\n .format(s_row=start_row, e_row=end_row, time=datetime.now().strftime(\"%H%M%S\"))\n\n cmd = 'scrapy runspider ' + basepath + '/spiders/amazon_profiles.py -o {output_path} '\\\n '-a config=\"{profiles_path},{log_file},{s_row},{e_row},main\"' \\\n .format(output_path=output_path, profiles_path=profiles_path, log_file=log_output, s_row=start_row, e_row=end_row, time=datetime.now().strftime(\"%H%M%S\"))\n call(cmd, shell=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict the target value for example. Consider each possible value, choose the most likely, by looking at each attribute independently.
def predict(self, example): possible_values = self.dataset.values[self.dataset.target] def class_probability(targetval): return product([self.P(targetval, a, example[a]) for a in self.dataset.inputs]) #removed ',1' from the arglist as product takes a list of vals to mult return argmax(possible_values, class_probability)
[ "def predict(self, test_example):\r\n\r\n probs = self.features[0].get_probs(test_example[0])\r\n for i, feature in enumerate(test_example):\r\n probs *= self.features[i].get_probs(feature)\r\n total_examples = sum(self.total)\r\n probs *= self.total\r\n return CLASS_LABELS[np.argmax(probs)]", "def predict(model, X_test):", "def predict(self):\n\t\treturn self.y_pred", "def predict(self, X):\n return predicted_value", "def predict(self, src) -> Tuple[label, confidence]:\n ...", "def predict(self, test_data, predict_proba = False, pred_class_and_proba = False):\n pass", "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "def predict(features, weights):\n\n _, activations = forwardPropagation(features, weights)\n return np.argmax(activations[-1],axis=0)", "def predict_rent():\n\n test_X, test_Y, model = train_model()\n predicted_values = model.predict(predicted_values)\n return test_X.as_matrix(), test_Y.as_matrix(), predicted_values", "def choose_attribute(self, attrs, examples):\n return argmax_random_tie(attrs, key=lambda a: self.information_gain(a, examples))", "def predictInstance(self, fv):\n score = self.bias + dot(fv, self.lw) \n #score = random.random() - 0.5\n if score > 0:\n label = 1\n else:\n label = -1\n return (label, score)", "def predict(self, state, head):\n return self.models[head].predict(state)", "def predict(self, x):\n res = 0\n for arbre in self.arbres:\n res += arbre.predict(x)\n if res >= 0:\n return 1\n return -1", "def naive_bayes_predict(data, model):\n # TODO: INSERT YOUR CODE HERE FOR USING THE LEARNED NAIVE BAYES PARAMETERS\n # TO CLASSIFY THE DATA\n\n d, n = data.shape #features = d / examples = n ; #row/#col\n num_classes = model['p(y)'].size\n ddata = data\n idata = 1 - ddata #inverted probability\n fmodel = 1 - model['p(x|y)'] #inverted cond probability\n\n predicted_data = np.zeros((num_classes,n))\n\n for i in range(num_classes):\n \n prior = model['p(y)'][i]\n cond = model['p(x|y)'][i, :] #length d vector\n \n fcond = fmodel[i, :] #false conditional prob\n \n # Error bound checking for np.log for a zero argument to avoid the Runtime Warning#######\n logPrior = 0\n if (prior <= 0):\n logPrior = -999999999\n else:\n logPrior = np.log(prior)\n\n logCond = np.zeros(len(cond))\n flogCond = np.zeros(len(fcond))\n for j in range(cond.shape[0]):\n if cond[j] <= 0:\n logCond[j] = -999999999\n else:\n logCond[j] = np.log(cond[j])\n flogCond[j] = np.log(fcond[j])\n #########################################################################################\n \n # model class by feat and data is feat by examples\n # dot product cancels out feat so you are given class by example\n\n #i by exmaple aka 1 by n\n sum_scores = logCond.dot(ddata) + flogCond.dot(idata) + logPrior \n\n predicted_data[i, :] = sum_scores\n\n max_scores = predicted_data.argmax(0) #max within each col, where each col is an example; # 1 x n\n return max_scores", "def predict_probability(data, weights):\n pred = None\n ## YOUR CODE HERE\n ## END YOUR CODE\n return pred[...,None]", "def predict(self, observation, action):\n return self.sess.run([self.obs_output, self.reward_output], \n feed_dict = {self.obs_input: observation, \n self.act_input: action})", "def predict(self, X):\n\n # List with size X.shape[0] and each value is a dict too,\n # Ex: [{0:0.2, 1:0.7}, {1:0.3, 2:0.5}]\n list_label_instance = []\n\n # For each classifier in self.models, predict the labels for X\n for model in self.models:\n clf = model.clf\n pred = clf.predict(X)\n weight = model.weight\n for i, label in enumerate(pred.tolist()):\n if i == len(list_label_instance): # maintain the dictionary\n list_label_instance.append({label: weight})\n else:\n try:\n list_label_instance[i][label] += weight\n except:\n list_label_instance[i][label] = weight\n\n predict_weighted_voting = []\n for dic in list_label_instance:\n max_value = max(dic.items(), key=operator.itemgetter(1))[0] # return the key of max value in a dict\n predict_weighted_voting.append(max_value)\n\n return predict_weighted_voting", "def predict(self):\n raise NotImplementedError(\"Child class must implement this method\")", "def predict(self,unlabeled):\r\n y_pred = unlabeled['label']\r\n if(self.main_transformer!=None):\r\n X,y = self.main_transformer.transform(unlabeled)\r\n y_pred = self.model_main.predict(X)\r\n pred_probs = self.model_main.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.alpha):\r\n y_pred[i] = 'Unsorted'\r\n unsorted = unlabeled.loc[y_pred == 'Unsorted']\r\n if(self.small_transformer!=None and len(unsorted)!=0):\r\n X,y = self.small_transformer.transform(unsorted)\r\n y = self.model_small.predict(X)\r\n pred_probs = self.model_small.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.beta):\r\n y[i] = 'Unsorted'\r\n y_pred[y_pred=='Unsorted'] = y\r\n return y_pred", "def predict(y, x, m, options=\"\"):\n\n def info(s):\n print(s)\n\n if scipy and isinstance(x, np.ndarray):\n x = np.ascontiguousarray(x) # enforce row-major\n elif sparse and isinstance(x, sparse.spmatrix):\n x = x.tocsr()\n elif not isinstance(x, (list, tuple)):\n raise TypeError(\"type of x: {0} is not supported!\".format(type(x)))\n\n if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, np.ndarray))):\n raise TypeError(\"type of y: {0} is not supported!\".format(type(y)))\n\n predict_probability = 0\n argv = options.split()\n i = 0\n while i < len(argv):\n if argv[i] == '-b':\n i += 1\n predict_probability = int(argv[i])\n elif argv[i] == '-q':\n info = print_null\n else:\n raise ValueError(\"Wrong options\")\n i+=1\n\n solver_type = m.param.solver_type\n nr_class = m.get_nr_class()\n nr_feature = m.get_nr_feature()\n is_prob_model = m.is_probability_model()\n bias = m.bias\n if bias >= 0:\n biasterm = feature_node(nr_feature+1, bias)\n else:\n biasterm = feature_node(-1, bias)\n pred_labels = []\n pred_values = []\n\n if scipy and isinstance(x, sparse.spmatrix):\n nr_instance = x.shape[0]\n else:\n nr_instance = len(x)\n\n if predict_probability:\n if not is_prob_model:\n raise TypeError('probability output is only supported for logistic regression')\n prob_estimates = (c_double * nr_class)()\n for i in range(nr_instance):\n if scipy and isinstance(x, sparse.spmatrix):\n indslice = slice(x.indptr[i], x.indptr[i+1])\n xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)\n else:\n xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)\n xi[-2] = biasterm\n label = liblinear.predict_probability(m, xi, prob_estimates)\n values = prob_estimates[:nr_class]\n pred_labels += [label]\n pred_values += [values]\n else:\n if nr_class <= 2:\n nr_classifier = 1\n else:\n nr_classifier = nr_class\n dec_values = (c_double * nr_classifier)()\n for i in range(nr_instance):\n if scipy and isinstance(x, sparse.spmatrix):\n indslice = slice(x.indptr[i], x.indptr[i+1])\n xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)\n else:\n xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)\n xi[-2] = biasterm\n label = liblinear.predict_values(m, xi, dec_values)\n values = dec_values[:nr_classifier]\n pred_labels += [label]\n pred_values += [values]\n\n if len(y) == 0:\n y = [0] * nr_instance\n ACC, MSE, SCC = evaluations(y, pred_labels)\n\n if m.is_regression_model():\n info(\"Mean squared error = %g (regression)\" % MSE)\n info(\"Squared correlation coefficient = %g (regression)\" % SCC)\n else:\n info(\"Accuracy = %g%% (%d/%d) (classification)\" % (ACC, int(round(nr_instance*ACC/100)), nr_instance))\n\n return pred_labels, (ACC, MSE, SCC), pred_values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> _int('5') 5 >>> _int('Abacate') nan
def _int(v): try: return int(v) except Exception: return float("nan")
[ "def _to_int(string):\n if string == \"\":\n return 0\n return int(string)", "def conv_int(i):\n return int(i) if i.isdigit() else i", "def int_conv(string):\n try:\n intstring=int(string)\n except:\n intstring=999\n return intstring", "def _to_int( self, str ):\n tmp = 1\n try:\n tmp = int( str)\n except ValueError:\n pass\n\n return tmp", "def int_literals_to_int(t):\n try:\n if str(t) == str(int(t)):\n return int(t) # Example case: t == \"42\"\n else:\n return t # Example case: t == \"00001234\"\n except ValueError as e:\n return t # Example case: t == \"some text\" or t == \"000012ab\"", "def _tryint(v):\r\n try:\r\n return int(v)\r\n except:\r\n return 0", "def str2num(s) :\n try: return int(s)\n except exceptions.ValueError:\n try: return float(s)\n except exceptions.ValueError: return( s )", "def int_or_zero(s):\n return 0 if not s else int(s)", "def parseInt(text):\n return int(text or 0)", "def number(string):\n try:\n return int(string)\n except (ValueError, OverflowError):\n # Unclear on why sometimes it's overflow vs value error, but this should work.\n return long(string)", "def num(s):\n\ttry:\n\t\treturn int(s)\n\texcept:\n\t\treturn float(s)", "def _number(s):\n try:\n n = int(s)\n return n\n except ValueError:\n pass\n try:\n n = float(s)\n return n\n except ValueError:\n raise GlifLibError(\"Could not convert %s to an int or float.\" % s)", "def env_str_to_int(varname, val):\n try:\n return int(val)\n except Exception:\n raise ValueError(\"Invalid value for \" + repr(varname) +\n \" should have a decimal integer value but is \" + repr(str(val)))", "def number_type(text):\n return int(text)", "def test_string_to_integer(self):\n \n # Test something which is meant to throw an error\n result = app.string_to_integer(\"5.0\")\n assert result == \"Error.\"\n \n # Test something which is meant to work successfully\n result = app.string_to_integer(\"900\")\n assert result == 900", "def parse_int(s):\n try:\n return int(s)\n except ValueError:\n return None", "def asNumeral(value):", "def str_to_int(s: str) -> int:\n def len_of_int(n: int) -> int:\n \"\"\"This function counts the number of digits in an integer.\n\n :param n: integer.\n :type n: int.\n :returns int.\n \"\"\"\n n_copy = n\n result = 0\n while n_copy > 0:\n result += 1\n n_copy = n_copy // 10\n return result\n if len(s) == 1:\n return ord(s[0])\n elif len(s) == 0:\n return 0\n else:\n add = ord(s[len(s) - 1])\n int_s = str_to_int(s[:len(s) - 1]) * (10 ** len_of_int(add)) + add\n return int_s", "def integerize(self, v):\n return v if type(v) == int else int(v)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> _float('5') 5.0 >>> _float('Abacate') nan
def _float(v): try: return float(v) except Exception: return float("nan")
[ "def FloatConv(string):\n try:\n v = float(string)\n except ValueError:\n v = numpy.NaN\n return v", "def nan(self):\r\n\t\treturn float(\"nan\")", "def _plain_float(value: str) -> float:\n if _plain_float_re.fullmatch(value) is None:\n raise ValueError\n\n return float(value)", "def parse_float(word):\n try:\n return float(word)\n except ValueError:\n return 0.0", "def parse_float(s):\n try:\n f = float(s)\n except ValueError:\n return None\n return None if math.isnan(f) else f", "def Working_with_Infinity_and_NaNs():\n a = float(\"inf\")\n print(a) # inf\n b = float(\"-inf\")\n print(b) # -inf\n c = float(\"nan\")\n print(c) # nan", "def _parse_float(state: str) -> float:\n fstate = float(state)\n if not math.isfinite(fstate):\n raise ValueError\n return fstate", "def posfloat(string):\n try:\n value = float(string)\n except ValueError as err:\n raise argparse.ArgumentTypeError(err)\n if value < 0.0:\n raise argparse.ArgumentTypeError(f\"invalid value {string}: value must \"\n \"be greater than or equal to 0.0\")\n return value", "def read_float(value):\n\tif not isinstance(value, str):\n\t\treturn value\n\tif not value:\n\t\treturn 0.0\n\tstripped = _strip_number(value)\n\treturn float(stripped)", "def get_sane_double(s):\n try:\n float(s)\n return float(s)\n except ValueError:\n return 0.0", "def is_float(s: str) -> bool:\n\n try:\n out = float(s)\n except:\n return False\n return True", "def _get_sanitized_float(self, input_float):\n if input_float == float('inf'):\n return _INF_FLOAT_VALUE\n elif input_float == float('-inf'):\n return -_INF_FLOAT_VALUE\n elif math.isnan(input_float):\n return None\n else:\n return input_float", "def get_float(string):\n if not string:\n return float(0)\n try:\n return float(string)\n except:\n try:\n cleaned = clean_number(string)\n floated = re.findall(r'[\\d.]+', cleaned)[0]\n return float(floated)\n except:\n pass\n try:\n string = string[:clean_number(string).find('.')]\n return float(filter(type(string).isdigit, string))\n except:\n pass\n try:\n return float(filter(type(string).isdigit, string))\n except:\n return float(0)", "def asFloatOrNone(val):\n\n # check for NaN first in case ieee floating point is in use\n # (in which case float(val) would return something instead of failing)\n\n if hasattr(val, \"lower\") and val.lower() in (\"nan\", \"?\"):\n return None\n else:\n return float(val)", "def is_float(string):\n try:\n return True if float(string) != 0 else False\n except ValueError:\n return False", "def parse_float(string):\r\n try:\r\n #Attempt parsing string to float\r\n return float(string)\r\n except ValueError:\r\n return None", "def parseDouble(text):\n return float(text or 0)", "def float_f(f):\n return '{:.0f}'.format(f)", "def toFloat(x, notFloat=np.nan):\n try:\n float(x)\n return float(x)\n except ValueError:\n return notFloat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> _selected("peixe abacate", "peixe") True
def _selected(x, y): return y in x
[ "def _has_selection(obj: object) -> bool:\n if obj is None or not isinstance(obj, str):\n return False\n return obj.lower() not in [\"\", \"no selection\"]", "def is_selected(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.selected\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)", "def test_selected_values(self):\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'PK-IS',\r\n str(self.form['state'])))\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'PK-PB',\r\n str(self.form['state_required'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '44000',\r\n str(self.form['postcode'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '46000',\r\n str(self.form['postcode_required'])))", "def showSelectionInTitle(string):\n pass", "def is_selected(self,index):\n return self._animalSelect[index]", "def _select(self, selector):\n if isinstance(selector, str):\n selector = {\"name\": selector}\n elif isinstance(selector, int):\n selector = {\"number\": selector}\n log.debug(\"Selecting: {0}\".format(selector))\n assert isinstance(selector, dict)\n return [item for item in self\n if all(getattr(item, key) == selector[key] for key in selector)]", "def test_tv_item_select(self):\n # Find by a path with indexes\n itm = self.ctrl.get_item((0, 2, 3))\n self.assertEqual(itm.is_selected(), False)\n\n # Select\n itm.select()\n self.assertEqual(itm.is_selected(), True)\n\n # A second call to Select doesn't remove selection\n itm.select()\n self.assertEqual(itm.is_selected(), True)\n\n itm = self.ctrl.get_item((0, 3, 2))\n itm.ensure_visible()\n self.assertEqual(itm.is_selected(), False)\n coords = itm.children(control_type='Text')[0].rectangle().mid_point()\n itm.click_input(coords=coords, absolute=True)\n self.assertEqual(itm.is_selected(), True)", "def isSelected(self, *args) -> \"SbBool\":\n return _coin.SoSelection_isSelected(self, *args)", "def test_select(self):\n self.edit.set_edit_text(\"Some text\")\n\n self.edit.select(0, 0)\n self.assertEqual((0, 0), self.edit.selection_indices())\n\n self.edit.select()\n self.assertEqual((0, 9), self.edit.selection_indices())\n\n self.edit.select(1, 7)\n self.assertEqual((1, 7), self.edit.selection_indices())\n\n self.edit.select(5, 2)\n self.assertEqual((2, 5), self.edit.selection_indices())\n\n self.edit.select(\"me t\")\n self.assertEqual((2, 6), self.edit.selection_indices())\n\n self.assertRaises(RuntimeError, self.edit.select, \"123\")", "def has_selection(self, name=\"default\"):\n\t\treturn self.get_selection(name) != None", "def test_quick_selection(self):\r\n\r\n data = [8, 7, 9, 0, 1, 3, 6, 5, 2, 4]\r\n k = 2\r\n\r\n _, result = sa.quick_selection(data, k)\r\n\r\n self.assertEqual(result, 1)", "def three_arg_sel(arg1: str, arg2: str, test: str) -> str:\n return arg2 if arg1 == test else arg1", "def is_entry_selected_by_key(entry, k, v):\n\n if k == 'year':\n return int(entry[k]) in v\n elif k in 'author':\n author_names = entry[k].split(', ')\n for name in v:\n if name in author_names:\n return True\n return False\n elif k == 'author_first':\n return is_author_selected(entry, v, 'first')\n elif k == 'author_corresponding':\n return is_author_selected(entry, v, 'corresponding')\n else:\n raise ValueError('Wrong selection keys!')", "def _checkSelection(self, selection, current=None, selected=()):\n self.assertIs(selection.getCurrentItem(), current)\n self.assertEqual(selection.getSelectedItems(), selected)", "def test_select(self):\n self.dlg.set_focus()\n self.combo_editable.select(u'Letters')\n self.assertEqual(self.combo_editable.selected_text(), u'Letters')\n self.assertEqual(self.combo_editable.selected_index(), 1)\n self.combo_editable.select(2)\n self.assertEqual(self.combo_editable.selected_text(), u'Special symbols')\n self.assertEqual(self.combo_editable.selected_index(), 2)\n\n self.combo_fixed.select(u'Last Item')\n self.assertEqual(self.combo_fixed.selected_text(), u'Last Item')\n self.assertEqual(self.combo_fixed.selected_index(), 2)\n self.combo_fixed.select(1)\n self.assertEqual(self.combo_fixed.selected_text(), u'Item 2')\n self.assertEqual(self.combo_fixed.selected_index(), 1)\n\n self.combo_simple.select(u'The Simplest')\n self.assertEqual(self.combo_simple.selected_text(), u'The Simplest')\n self.assertEqual(self.combo_simple.selected_index(), 2)\n self.combo_simple.select(0)\n self.assertEqual(self.combo_simple.selected_text(), u'Simple 1')\n self.assertEqual(self.combo_simple.selected_index(), 0)", "def verify_selected(self, timeout=TIMEOUT):\n\t\treturn self.extend().selected().verify(timeout)", "def get_choice(menu):\n while True:\n c = raw_input(\"? \")\n for i in menu:\n if str(i[0]) == c:\n return i\n print(\"unknown: \" + c)", "def test_select_entry_interactive(self):\n with TemporaryDirectory() as directory:\n touch(os.path.join(directory, \"foo.gpg\"))\n touch(os.path.join(directory, \"bar.gpg\"))\n touch(os.path.join(directory, \"baz.gpg\"))\n # Select entries using the command line filter 'a' and then use\n # interactive selection to narrow the choice down to 'baz' by\n # specifying the unique substring 'z'.\n program = PasswordStore(directory=directory)\n with CaptureOutput(input=\"z\"):\n entry = program.select_entry(\"a\")\n assert entry.name == \"baz\"", "def is_author_selected(entry, names, select_field=''):\n\n author_names = entry['author'].split(', ')\n k = 'author_' + select_field\n if select_field == 'first':\n if author_names[0] in names:\n return True\n elif k in entry:\n authorFirst_names = entry[k].split(', ')\n for name in authorFirst_names:\n if name in names:\n return True\n return False\n elif select_field == 'corresponding':\n if not k in entry:\n return False\n else:\n authorCorr_names = entry[k].split(', ')\n for name in authorCorr_names:\n if name in names:\n return True\n return False\n elif select_field == '':\n return is_author_selected(entry, names, 'first') or is_author_selected(entry, names, 'corresponding')\n else:\n raise ValueError(\"Wrong select_field ('first', 'corresponding', or '')!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a skeleton stuffed animal Halloween themed item.
def create(self, has_glow="", **kwargs): return Skeleton(has_glow, **kwargs)
[ "def generateItem(self, platform):\n blockItem = Block(IMG_PATH_BLOCK, platform.x+platform.w/2-15, platform.y-50, (30, 30))\n healthItem = Health(platform.x+platform.w/2-15, platform.y-50, (30, 30))\n coinItem = Coin(platform.x+platform.w/2-15, platform.y-50, (30, 30))\n self.otherMovableObjects.append(choice([healthItem, coinItem, blockItem]))", "def company_skeleton():\n\n E, P, B, D, F = EPBDF()\n entities = ['Paul', 'Roger', 'Quinn', 'Sally', 'Thomas',\n 'Case', 'Adapter', 'Laptop', 'Tablet', 'Smartphone',\n 'Accessories', 'Devices']\n\n entity_types = {'Paul': E, 'Roger': E, 'Quinn': E, 'Sally': E, 'Thomas': E,\n 'Case': P, 'Adapter': P, 'Laptop': P, 'Tablet': P, 'Smartphone': P,\n 'Accessories': B, 'Devices': B}\n skeleton = RelationalSkeleton(company_schema(), True)\n p, r, q, s, t, c, a, l, ta, sm, ac, d = ents = tuple([SkItem(e, entity_types[e]) for e in entities])\n skeleton.add_entities(*ents)\n for emp, prods in ((p, {c}), (q, {c, a, l}), (s, {l, ta}), (t, {sm, ta}), (r, {l})):\n for prod in prods:\n skeleton.add_relationship(SkItem(emp.name + '-' + prod.name, D), {emp, prod})\n for biz, prods in ((ac, {c, a}), (d, {l, ta, sm})):\n for prod in prods:\n skeleton.add_relationship(SkItem(biz.name + '-' + prod.name, F), {biz, prod})\n\n return skeleton", "def create_skeleton_image(img_read, filename, folder):\n img_skeletonized = skeletonize(img_read)\n\n imsave(folder + filename + \"_skeleton\" + '.png', img_as_uint(img_skeletonized))\n\n return img_skeletonized", "def bite():\n winsound.PlaySound('bite2.wav', winsound.SND_ASYNC)\n win.tracer(0)\n food.goto(random.randint(-260, 260), random.randint(-260, 260))\n new_tail = turtle.Turtle()\n new_tail.color(SNAKE_COLOR)\n new_tail.shape('circle')\n new_tail.speed(0)\n new_tail.pu()\n if segments:\n segments[-1].shape('square')\n new_tail.goto(segments[-1].xcor(), segments[-1].ycor())\n else:\n new_tail.goto(head.xcor(), head.ycor())\n segments.append(new_tail)\n win.tracer(1)", "def get_skeleton():\n return copy.deepcopy(SKELETON)", "def skeleton_image(folder, image_file, threshold=50, area_thresh=50, figsize=(10, 10), show=False):\n # Median filtered image.\n fname = '{}/{}'.format(folder, image_file)\n image0 = sio.imread(fname)\n image0 = np.ceil(255* (image0[:, :, 1] / image0[:, :, 1].max())).astype(int)\n image0 = skimage.filters.median(image0)\n filt = 'filt_{}.png'.format(image_file.split('.')[0])\n sio.imsave(folder+'/'+filt, image0)\n\n #threshold the image\n binary0 = binary_image(folder, filt, threshold=threshold, close=True, show=False)\n clean = 'clean_{}'.format(filt)\n\n #label image\n short_image, props = label_image(folder, clean, area_thresh=area_thresh, show=False)\n short = 'short_{}'.format(clean)\n short_image = short_image > 1\n # Skeletonize\n skeleton0 = skeletonize(short_image)\n\n branch_data = csr.summarise(skeleton0)\n branch_data_short = branch_data\n\n #Remove small branches\n mglia = branch_data['skeleton-id'].max()\n nbranches = []\n\n ncount = 0\n for i in range(1, mglia+1):\n bcount = branch_data[branch_data['skeleton-id']==i]['skeleton-id'].count()\n if bcount > 0:\n ids = branch_data.index[branch_data['skeleton-id']==i].tolist()\n nbranches.append(bcount)\n for j in range(0, len(ids)):\n branch_data_short.drop([ids[j]])\n\n ncount = ncount + 1\n if show:\n fig, ax = plt.subplots(figsize=(10, 10))\n draw.overlay_euclidean_skeleton_2d(image0, branch_data_short,\n skeleton_color_source='branch-type', axes=ax)\n plt.savefig('{}/skel_{}'.format(folder, short))\n\n return skeleton0, branch_data_short, nbranches, short_image, props", "def _create_skeleton(self, width, height, bb_thickness=0.05):\n base_link = urdf.Link('base_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n back_link = urdf.Link('back_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0.5),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n fixed_joint = urdf.Joint('fixed_backboard',\n urdf.Parent('base_link'),\n urdf.Child('back_link'),\n urdf.Origin(xyz=(0, 0, height/2.0 + 0.05), rpy=(0, 0, 0)),\n type='fixed')\n\n self._links.append(base_link)\n self._links.append(back_link)\n self._joints.append(fixed_joint)", "def draw_skeleton(self):\n raise NotImplementedError", "def _create_entities(self):\n self.wall = []\n self.draw_wall()\n self.snake = Snake(((screen_width//2),(screen_height//2)))\n self.stones = Stone()\n self.apples = Apple()\n self.stones.create(self) # Create one stone\n self.apples.create(self) # Create one apple", "def create_bone( self, node, scene ):\r\n\r\n\t\t#node_is_bone = get_node_properties( node, property_name = 'p_bone_name' )\r\n\t\tnode_is_bone = True\r\n\t\tif node_is_bone:\r\n\t\t\tbone = Node_Bone( node, scene )\r\n\t\t\tself.bones.append( bone )", "def make_skeleton(\n class_name: str,\n nodes: List[Node],\n subs: Optional[List[SubAnnotation]] = None,\n slot_names: Optional[List[str]] = None,\n) -> Annotation:\n return Annotation(\n AnnotationClass(class_name, \"skeleton\"), {\"nodes\": nodes}, subs or [], slot_names=slot_names or []\n )", "def __create_item(self) -> str:\n flattened_outcomes = Quick_Python.flatten(self.outcomes)\n outcome = random.sample(flattened_outcomes, 1)[0]\n if isinstance(outcome, dict):\n outcome, amount = list(outcome.items())[0]\n print(f\"Crafting {outcome}: Replacing amount {self.amount} with {amount}\")\n self.amount = amount\n return \" \".join(item for item in [self.prefix, outcome] if item)", "def check_skeleton(self):\n\n assert_equal(simplicial_mesh(array([[0]]),array([[0]])).skeleton(0), \\\n set([simplex([0])])) \n assert_equal(simplicial_mesh(array([[0],[1]]),array([[0,1]])).skeleton(0), \\\n set([simplex([0]),simplex([1])])) \n assert_equal(simplicial_mesh(array([[0],[1]]),array([[0,1]])).skeleton(1), \\\n set([simplex([0,1])]))\n assert_equal(simplicial_mesh(array([[0],[1],[2]]),array([[0,1],[1,2]])).skeleton(0),\\\n set([simplex([0]),simplex([1]),simplex([2])]))\n assert_equal(simplicial_mesh(array([[0],[1],[2]]),array([[0,1],[1,2]])).skeleton(1),\\\n set([simplex([0,1]),simplex([1,2])]))\n assert_equal(simplicial_mesh(array([[0,0],[1,0],[0,1]]),array([[0,1,2]])).skeleton(1),\\\n set([simplex([0,1]),simplex([1,2]),simplex([2,0])]))", "def generate_hairstyle(hairstyle_attributes, is_male):\n\n hair_type = {\"Bald\", \"Straight_Hair\", \"Wavy_Hair\", \"Receding_Hairline\"}\n\n # To create grammatically correct order of description\n arranged_attributes = []\n colours = list(set(hairstyle_attributes) - hair_type)\n if len(colours) > 1:\n # Combines two colours into one attribute\n colour = \"\"\n for i, _colour in enumerate(colours):\n if i == 0:\n _colour = _colour.lower().split(\"_\")[0] + \"ish\"\n _colour = _colour.lower().split(\"_\")[0]\n colour += _colour + \" \"\n arranged_attributes.append(\n colour.strip()\n ) # Strip to remove trailing whitespace\n elif len(colours) == 1:\n colour = colours[0].lower().split(\"_\")[0]\n arranged_attributes.append(colour)\n style = set(hairstyle_attributes) & {\"Straight_Hair\", \"Wavy_Hair\"}\n arranged_attributes.extend(list(style))\n bald_rec = set(hairstyle_attributes) & {\"Receding_Hairline\", \"Bald\"}\n arranged_attributes.extend(list(bald_rec))\n\n if len(arranged_attributes) == 1:\n attribute = arranged_attributes[0].lower().split(\"_\")[0]\n if attribute == \"bald\":\n return \"He is bald.\" if is_male else \"She is bald.\"\n if random.random() <= 0.5:\n sentence = \"His\" if is_male else \"Her\"\n return sentence + \" hair is \" + attribute + \".\"\n else:\n sentence = \"He\" if is_male else \"She\"\n return sentence + \" has \" + attribute + \" hair.\"\n\n # Adding variation in sentence structure\n if random.random() <= 0.5:\n sentence = \"His\" if is_male else \"Her\"\n sentence += \" hair is\"\n for i, attribute in enumerate(arranged_attributes):\n attribute = attribute.lower().split(\"_\")[0]\n if len(arranged_attributes) - 1 == i:\n sentence = sentence[:-1]\n if attribute == \"bald\":\n attribute = \"he\" if is_male else \"she\"\n attribute += (\n \" is \" + random.choice([\"going\", \"partially\"]) + \" bald\"\n )\n return sentence + \" and \" + attribute + \".\"\n return sentence + \" and \" + attribute + \".\"\n sentence += \" \" + attribute + \",\"\n else:\n sentence = \"He\" if is_male else \"She\"\n sentence += \" has\"\n for i, attribute in enumerate(arranged_attributes):\n attribute = attribute.lower().split(\"_\")[0]\n if len(arranged_attributes) - 1 == i:\n sentence = sentence[:-1]\n if attribute == \"bald\":\n sentence += \" hair\"\n attribute = \"he\" if is_male else \"she\"\n attribute += (\n \" is \" + random.choice([\"going\", \"partially\"]) + \" bald\"\n )\n return sentence + \" and \" + attribute + \".\"\n return sentence + \" and \" + attribute + \" hair.\"\n sentence += \" \" + attribute + \",\"", "def make_head_model(anat_img, out_dir):\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n if not os.path.exists(os.path.join(out_dir,'outer_skin_surface')):\n\n cmd ='cd {out_dir}; mri_watershed -surf surf {anat_img} brain.mgz'.format(out_dir=out_dir, anat_img=anat_img)\n os.system(cmd)\n\n for f in ['lh.surf_brain_surface','lh.surf_inner_skull_surface','lh.surf_outer_skin_surface','lh.surf_outer_skull_surface']:\n cmd = 'mv {out_dir}/{f} {out_dir}/{f2}'.format(f=f,f2=f.split('lh.surf_')[1],out_dir=out_dir)\n os.system(cmd)\n else:\n print('head model exists!')", "def export_bones(self, arm, parent_block):\n # the armature was already exported as a NiNode\n # now we must export the armature's bones\n assert( arm.type == 'ARMATURE' )\n\n # find the root bones\n # dictionary of bones (name -> bone)\n bones = dict(list(arm.data.bones.items()))\n root_bones = []\n for root_bone in list(bones.values()):\n while root_bone.parent in list(bones.values()):\n root_bone = root_bone.parent\n if root_bones.count(root_bone) == 0:\n root_bones.append(root_bone)\n\n if (arm.getAction()):\n bones_ipo = arm.getAction().getAllChannelIpos() # dictionary of Bone Ipos (name -> ipo)\n else:\n bones_ipo = {} # no ipos\n\n bones_node = {} # maps bone names to NiNode blocks\n\n # here all the bones are added\n # first create all bones with their keyframes\n # and then fix the links in a second run\n\n # ok, let's create the bone NiNode blocks\n for bone in list(bones.values()):\n # create a new block for this bone\n node = self.create_ninode(bone)\n # doing bone map now makes linkage very easy in second run\n bones_node[bone.name] = node\n\n # add the node and the keyframe for this bone\n node.name = self.get_full_name(bone.name).encode()\n if self.properties.game in ('OBLIVION', 'FALLOUT_3'):\n # default for Oblivion bones\n # note: bodies have 0x000E, clothing has 0x000F\n node.flags = 0x000E\n elif self.properties.game in ('CIVILIZATION_IV', 'EMPIRE_EARTH_II'):\n if bone.children:\n # default for Civ IV/EE II bones with children\n node.flags = 0x0006\n else:\n # default for Civ IV/EE II final bones\n node.flags = 0x0016\n elif self.properties.game in ('DIVINITY_2',):\n if bone.children:\n # default for Div 2 bones with children\n node.flags = 0x0186\n elif bone.name.lower()[-9:] == 'footsteps':\n node.flags = 0x0116\n else:\n # default for Div 2 final bones\n node.flags = 0x0196\n else:\n node.flags = 0x0002 # default for Morrowind bones\n self.export_matrix(bone, 'localspace', node) # rest pose\n\n # bone rotations are stored in the IPO relative to the rest position\n # so we must take the rest position into account\n # (need original one, without extra transforms, so extra = False)\n bonerestmat = self.get_bone_rest_matrix(bone, 'BONESPACE',\n extra = False)\n try:\n bonexmat_inv = mathutils.Matrix(\n self.get_bone_extra_matrix_inv(bone.name))\n except KeyError:\n bonexmat_inv = mathutils.Matrix()\n bonexmat_inv.identity()\n if bone.name in bones_ipo:\n self.export_keyframes(\n bones_ipo[bone.name], 'localspace', node,\n bind_mat = bonerestmat, extra_mat_inv = bonexmat_inv)\n\n # does bone have priority value in NULL constraint?\n for constr in arm.getPose().bones[bone.name].constraints:\n # yes! store it for reference when creating the kf file\n if constr.name[:9].lower() == \"priority:\":\n self.bone_priorities[\n self.get_bone_name_for_nif(bone.name)\n ] = int(constr.name[9:])\n\n # now fix the linkage between the blocks\n for bone in list(bones.values()):\n # link the bone's children to the bone\n if bone.children:\n self.debug(\"Linking children of bone %s\" % bone.name)\n for child in bone.children:\n # bone.children returns also grandchildren etc.\n # we only want immediate children, so do a parent check\n if child.parent.name == bone.name:\n bones_node[bone.name].add_child(bones_node[child.name])\n # if it is a root bone, link it to the armature\n if not bone.parent:\n parent_block.add_child(bones_node[bone.name])", "def add_shank():\n items['shank'] = {'description': 'A really sharp stick',\n 'health': '-5'\n }", "def head_random():\n eyesfunc = part_eyes_basic\n mouthfunc = part_mouth_braces\n x = random.randint(1,3)\n if x == 1:\n mouthfunc = part_mouth_mustache\n elif x == 2:\n eyesfunc = part_eyes_winking\n else:\n mouthfunc = part_mouth_surprised\n eyesfunc = part_eyes_spunkyglasses\n \n head_with_two(eyesfunc, mouthfunc)", "def generate_item(self):\n\n loop = 1\n while loop:\n row = random.randint(0, 14)\n column = random.randint(0, 14)\n if self.level.maze[row][column] == \"0\":\n self.pos_y = row\n self.pos_x = column\n self.y_pix = row * sprite_size\n self.x_pix = column * sprite_size\n loop = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a ThingSpeakWriter object for manual verification
def write_test(): writer = ThingSpeakWriter(c.L2_M_5A2_WRITE_KEY) test_date = datetime.now().strftime("%Y-%m-%d") test_time = datetime.now().strftime("%H:%M:%S") fields = {c.TEST_FIELD: "{} {}".format(test_date, test_time)} logging.info('Writing {} {} to {}'.format(test_date, test_time, c.TEST_FIELD)) writer.write(fields) read_url = c.READ_URL.format( CHANNEL_FEED=c.L2_M_5A2_FEED, READ_KEY=c.L2_M_5A2_READ_KEY) logging.info('Check results here: {}'.format(read_url))
[ "def makeTxt():\n print('start')\n model = KeyedVectors.load_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\LSTM-PICO-Detection-master\\\\other_spyder\\\\Extended embeddings\\\\2019-07-19-09-34-51-bigrams_FINAL.bin', binary=True)#, limit = 20 for tests\n model.save_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\Data\\\\extended.txt', binary=False)\n print('done creating text files')", "def create_docx():\n text = generate_speech()\n document = Document()\n file_name = input(\"Enter a name for your file with speech: \")\n\n try:\n speech_heading = input(\"Create a heading for your speech: \")\n document.add_heading(f\"{speech_heading}\", 0)\n document.add_paragraph(text).alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n document.save(f\"{file_name}.docx\")\n except:\n print(\"\\nFailed to create a file.\")\n print(f\"Check that there is no file {file_name}.docx opened in the same\\\n directory...\\n\")\n exit()\n else:\n input(f\"Done! Find the speech you have generated in file {file_name}.docx \")", "def createWav(songInfo):\n songName = songInfo[0]\n songNotes = songInfo[1]\n ps.make_wav(songNotes, fn = songName, silent = True)", "def make_file_writer(cls, fname, translatedict={}):\n filehandle = open(fname, 'w')\n outputhandler = XMLWriter(filehandle)\n return cls(outputhandler, None, translatedict)", "def _write_wakeword_to_disk(self, audio, metadata):\n filename = join(self.saved_wake_words_dir,\n '_'.join(str(metadata[k]) for k in sorted(metadata)) +\n '.wav')\n with open(filename, 'wb') as f:\n f.write(audio.get_wav_data())", "def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd(), voice_name='mei',\n openjtalk_binpath='/usr/bin',\n openjtalk_dicpath='/var/lib/mecab/dic/open-jtalk/naist-jdic',\n openjtalk_voicepath='/usr/share/hts-voice/{voice_name}/{voice_name}_{emotion}.htsvoice'):\n open_jtalk = [openjtalk_binpath + '/open_jtalk']\n mech = ['-x', openjtalk_dicpath]\n htsvoice = ['-m', openjtalk_voicepath.format(voice_name=voice_name, emotion=emotion)]\n speed = ['-r', str(speed)]\n outwav = ['-ow', os.path.join(output_dir, output_file)]\n cmd = open_jtalk + mech + htsvoice + speed + outwav\n c = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n c.stdin.write(text.encode('utf-8'))\n c.stdin.close()\n c.wait()\n return os.path.join(output_dir, output_file)", "def bethefirsttowrite(self):\n qtype = self.questtype[6]\n\n \"\"\"if random.randrange(100) < 25:\n DONT = \"not \"\n else:\"\"\"\n _not_ = \"\"\n\n answer = random.choice(list(self.data[qtype]))\n question = (\n f\"/me ▬▬▬G▬O▬T▬T▬A▬▬G▬O▬▬F▬A▬S▬T▬▬▬▬ PogChamp QUICK! PogChamp \"\n f\"Be the first to {_not_}write {answer} ! ▬▬▬G▬O▬T▬T▬A▬▬G▬O▬▬F▬A▬S▬T▬▬▬▬\"\n )\n\n return {\"bethefirsttowrite\": {\"question\": question, \"answer\": answer}}", "def makebot(input=\"string\", verbose=bool, nooverwrite=bool, checkdepends=bool, checkres=int, output=\"string\"):\n pass", "def create(botname, consumer_key, consumer_secret, access_token, access_token_secret):\n\n print \"\"\"*********************\n* Welcome to PyBot! *\n*********************\n\nThis script will help you set things up.\n\n\"\"\"\n if consumer_key is None or consumer_secret is None:\n # Case 1: Nothing is provided.\n consumer_key, consumer_secret = _consumer_tokens()\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n access_token, access_token_secret = _access_tokens(auth)\n elif access_token is None or access_token_secret is None:\n # Case 2: Consumer* is provided.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n access_token, access_token_secret = _access_tokens(auth)\n\n # Create the directory, configuration file, and skeleton bot script.\"\"\"\n botdir = \"%s/%s\" % (os.path.abspath(\".\"), botname)\n _write_bot(botname, botdir, consumer_key, consumer_secret, access_token, access_token_secret)\n\n print \"\"\"Your bot \\\"%s\\\" is ready to rock! Start it up with the following command:\n\n python pybot.py start -n %s\"\"\" % (botname, botname)", "def create_wato_file(device_type):\n with open(os.path.join(CMK_CONF_PATH, DEVICE_TYPES[device_type]['tags'][0], '.wato'), 'w') as file:\n file.write(\"{'lock': 'Folders managed by automated script.', 'attributes': {'tag_device_type': '%s'}, 'num_hosts': %s, 'title': u'%s'}\" % (DEVICE_TYPES[device_type]['tags'][0], len(format_device_list(query_sql(device_type))), device_type))", "def make_iostring_writer(cls, translatedict={}):\n strbuf = cStringIO.StringIO()\n outputhandler = XMLWriter(strbuf)\n return cls(outputhandler, None, translatedict), strbuf", "def _write_and_speak(self, speak):\n # A helper function for the running thread\n def thread_write_and_speak():\n # Only update the time in full letters when necessary\n if self._write_time or self._speak_time:\n self._do_write_time()\n\n # And if requested, say it aloud\n if self._speak_time and speak:\n self._do_speak_time()\n\n # Now detach a thread to do the big job\n thread = threading.Thread(target=thread_write_and_speak)\n thread.start()", "def test_speech(self):\n\n output = open('example/output.txt','w',encoding='utf-8')\n # load sample data\n with open('example/speech.txt','r',encoding='utf-8') as input:\n for speech in input:\n speech = speech.strip('\\n')\n result,path = self.rule_match(speech)\n self.write_output(speech, result, path, output)", "def test_create_writable_protocol():\n f = _WritableFile()\n WeldxFile(f, tree=dict(test=\"yes\"), mode=\"rw\")\n new_file = TestWeldXFile.make_copy(f.to_wrap)\n assert WeldxFile(new_file)[\"test\"] == \"yes\"", "def write(text):", "def test_build(self):\n message = 'testing pyspamsteg'\n passphrase = 'pyspamsteg'\n \n ct = pyspamsteg.create( message, passphrase )\n pt = pyspamsteg.read( ct, passphrase )\n\n self.assertEquals( pt, message )", "def writeHubFile(hubTxtPath, hubName):\n with open(hubTxtPath, 'w') as hubFile:\n hubFile.write('''\nhub {hubName}\nshortLabel {hubName}\nlongLabel {hubName}\ngenomesFile genomes.txt\nemail NoEmail\n'''.format(hubName=hubName))", "def make_story(settings):\n print('Starting conversationalist. Getting tweets...')\n adapter = settings.get('adapter')\n api = settings['api']\n timeline_json_output_file = settings['timeline_out']\n timeframe_hours = int(settings.get('timeframe', 24))\n title = settings.get('title', 'Story')\n twitter_username = settings['username']\n write = settings['write']\n timeline = Timeline(api, twitter_username, (timeframe_hours * -1))\n print(\"...saving Timeline as JSON file...\")\n timeline.to_json(timeline_json_output_file)\n conversation = Conversation(title=title, adapter=adapter)\n conversation.load(timeline_json_output_file)\n print(\"...writing story file...\")\n page_location = write(conversation, settings['story_out'])\n print('...conversationalist done.')\n return page_location", "def w(t, obj):\n return Action(WRITE, t, obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Proxmox VE API client.
async def get_client(conf): from proxmoxer import ProxmoxAPI host = conf[CONF_HOST] port = conf[CONF_PORT] user = conf[CONF_USERNAME] realm = conf[CONF_REALM] password = conf[CONF_PASSWORD] verify_ssl = conf[CONF_VERIFY_SSL] proxmox = ProxmoxAPI( host, user=user + '@' + realm, password=password, port=port, verify_ssl=verify_ssl) return proxmox
[ "def get_nova_client(session):\n from novaclient import client\n return client.Client('2', session=session)", "def zeep_client(self) -> ZeepClient:\n if self._zeep_client is None:\n self._zeep_client = Oauth2Request.get_soap_client()\n return self._zeep_client", "def get_client(api_version=constants.DEFAULT_IRONIC_API_VERSION):\n if CONF.ironic.auth_strategy == 'noauth':\n # To support standalone ironic without keystone\n args = {'token': 'noauth',\n 'endpoint': CONF.ironic.endpoint}\n else:\n # To support keystone authentication\n global KEYSTONE_SESSION\n if not KEYSTONE_SESSION:\n KEYSTONE_SESSION = create_keystone_session(\n constants.IRONIC_GROUP_NAME)\n args = {'session': KEYSTONE_SESSION,\n 'region_name': CONF.ironic.os_region}\n\n args['os_ironic_api_version'] = api_version\n args['max_retries'] = CONF.ironic.max_retries\n args['retry_interval'] = CONF.ironic.retry_interval\n\n # initialize an IronicClient instance\n return client.Client(1, **args)", "def exante_api_demo_client():\n\n logger.info(f\"[+] Clinet ID from .test.env: {CLIENT_ID}\")\n\n api_client = ExanteAPI(\n client_id=CLIENT_ID,\n application_id=APPLICATION_ID,\n shared_key=SHARED_KEY,\n api_scopes=[\n Scope.Symbols,\n Scope.Change,\n Scope.Crossrates,\n Scope.Feed,\n Scope.Accounts,\n Scope.AccountsSummary,\n Scope.Orders,\n ],\n environment=Environment.Demo,\n )\n\n return api_client", "def get_sidecar_client(self):\n return client.Client(\n username = self.username,\n password = self.password,\n user_id = self.user_id,\n auth_url = self.auth_url,\n auth_token = self.auth_token,\n tenant_id = self.tenant_id,\n tenant_name = self.tenant_name,\n project_name = self.project_name,\n project_id = self.project_id,\n region_name = self.region_name,\n insecure = self.insecure,\n timeout = self.timeout,\n user_domain_id = self.user_domain_id,\n user_domain_name = self.user_domain_name,\n project_domain_id = self.project_domain_id,\n project_domain_name = self.project_domain_name\n )", "def make_client(instance):\n volume_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS\n )\n\n LOG.debug('instantiating volume client')\n client = volume_client(\n username=instance._username,\n api_key=instance._password,\n project_id=instance._tenant_name,\n auth_url=instance._auth_url,\n )\n\n return client", "def getClient():\n\n contextID = MaxPlus.Core.EvalMAXScript(\"fabric.ContextId\").Get()\n if contextID == '':\n client = FabricEngine.Core.createClient()\n contextID = client.getContextID()\n MaxPlus.Core.EvalMAXScript(\"fabric.ContextId = \\\"\" + str(contextID) + \"\\\"\")\n\n options = {\n 'contextID': contextID,\n 'reportCallback': fabricCallback,\n 'guarded': True\n }\n\n client = FabricEngine.Core.createClient(options)\n\n return client", "def _client(self) -> hvac.Client:\n if \"session\" not in self.kwargs:\n # If no session object provide one with retry as per hvac documentation:\n # https://hvac.readthedocs.io/en/stable/advanced_usage.html#retrying-failed-requests\n adapter = HTTPAdapter(\n max_retries=Retry(\n total=3,\n backoff_factor=0.1,\n status_forcelist=[412, 500, 502, 503],\n raise_on_status=False,\n )\n )\n session = Session()\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n self.kwargs[\"session\"] = session\n\n _client = hvac.Client(url=self.url, **self.kwargs)\n if self.auth_type == \"approle\":\n self._auth_approle(_client)\n elif self.auth_type == \"aws_iam\":\n self._auth_aws_iam(_client)\n elif self.auth_type == \"azure\":\n self._auth_azure(_client)\n elif self.auth_type == \"gcp\":\n self._auth_gcp(_client)\n elif self.auth_type == \"github\":\n self._auth_github(_client)\n elif self.auth_type == \"kubernetes\":\n self._auth_kubernetes(_client)\n elif self.auth_type == \"ldap\":\n self._auth_ldap(_client)\n elif self.auth_type == \"radius\":\n self._auth_radius(_client)\n elif self.auth_type == \"token\":\n self._set_token(_client)\n elif self.auth_type == \"userpass\":\n self._auth_userpass(_client)\n else:\n raise VaultError(f\"Authentication type '{self.auth_type}' not supported\")\n\n if _client.is_authenticated():\n return _client\n else:\n raise VaultError(\"Vault Authentication Error!\")", "def client(self) -> hvac.v1.Client:\n if self._client is None:\n raise RuntimeError('No client; must authenticate')\n return self._client", "def _get_client(self):\n options = {\n 'webdav_hostname': 'https://'+self.stg_auth.get_credential('hostname'),\n 'webdav_login': self.stg_auth.get_credential('login'),\n 'webdav_password': self.stg_auth.get_credential('password')\n }\n return Client(options=options)", "def remote_client():\n from .utils import client_factory\n\n return client_factory()(raise_server_exceptions=False)", "def get_client(self) -> DandiAPIClient:\n return DandiAPIClient(self.api_url)", "def connect_with_vault(vu, vt):\n try:\n client = hvac.Client(url=vu, token=vt)\n return client\n except KeyError as e:\n print(\"Error Connecting with Vault. Type Error:\", e)", "def _init_client():\n return _Client(_ARM_WS_URL)", "def make_client(instance):\n\n # Defer client imports until we actually need them\n from cinderclient import extension\n from cinderclient.v1.contrib import list_extensions\n from cinderclient.v1 import volume_snapshots\n from cinderclient.v1 import volumes\n\n # Monkey patch for v1 cinderclient\n volumes.Volume.NAME_ATTR = 'display_name'\n volume_snapshots.Snapshot.NAME_ATTR = 'display_name'\n\n volume_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS\n )\n LOG.debug('Instantiating volume client: %s', volume_client)\n\n # Set client http_log_debug to True if verbosity level is high enough\n http_log_debug = utils.get_effective_log_level() <= logging.DEBUG\n\n extensions = [extension.Extension('list_extensions', list_extensions)]\n\n # Remember interface only if it is set\n kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)\n\n client = volume_client(\n session=instance.session,\n extensions=extensions,\n http_log_debug=http_log_debug,\n region_name=instance._region_name,\n **kwargs\n )\n\n return client", "def get_monasca_client():\n\n monasca_endpoint = get_monasca_endpoint()\n # Monasca client v1.7.1 used in pike is old, so get its client via\n # old-fashioned way (credentials)\n # the pike version also cannot reliably discover its own endpoint,\n # so it is specified here\n mon_client = Mon_client(\n api_version=\"2_0\",\n endpoint=monasca_endpoint,\n auth_url=CONF.keystone_authtoken.auth_url,\n username=CONF.keystone_authtoken.username,\n password=CONF.keystone_authtoken.password,\n project_name=CONF.keystone_authtoken.project_name,\n project_domain_name=CONF.keystone_authtoken.project_domain_name,\n user_domain_name=CONF.keystone_authtoken.user_domain_name,\n insecure=CONF.keystone_authtoken.insecure\n )\n\n return mon_client", "def client(self):\n\n access_token = self.merchant.access_token \\\n if self.merchant is not None else None\n\n return self.client_from_settings(\n access_token=access_token\n )", "def exante_api_demo_client_no_scopes():\n\n api_client = ExanteAPI(\n client_id=CLIENT_ID,\n application_id=APPLICATION_ID,\n shared_key=SHARED_KEY,\n api_scopes=[],\n environment=Environment.Demo,\n )\n\n return api_client", "def get_client():\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }