query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Returns a dictionary mapping models to list of reports. Only reports that the user is allowed to access are returned.
|
def get_reports_by_model(user):
reports_by_model = {}
for report in _registry.values():
if report.check_permission(user):
reports_for_model = reports_by_model.setdefault(report.model, [])
reports_for_model.append(report)
return reports_by_model
|
[
"def GetAnalysisReports(self):\n return self._store.GetAnalysisReports()",
"def resourceReports(self):\n self.resourceReportList = {}\n for k, v in self.activeResources.iteritems():\n self.resourceReportList[k] = v.report()\n for k, v in self.inactiveResources.iteritems():\n self.resourceReportList[k] = v.report()",
"def reports(self):\n return self.decoder.reports.getall()",
"def reports(self):\n try:\n for post in self.subreddit.mod.reports(limit=None):\n for mod_report in post.mod_reports:\n yield (str(mod_report[0]), mod_report[1], post)\n except prawcore.PrawcoreException as exception:\n logging.error(\"Error fetching reports: %s\", exception)",
"def models_related_to_report(data):\n if data['report'] == 'animation' or data['report'] == 'optimizerAnimation':\n return []\n res = ['simulationGrid']\n res.append(_non_opt_fields_to_array(data.models.beam))\n for container in ('conductors', 'conductorTypes'):\n for m in data.models[container]:\n res.append(_non_opt_fields_to_array(m))\n if data['report'] != 'fieldComparisonReport':\n res.append(template_common.report_fields(data, data['report'], _REPORT_STYLE_FIELDS))\n return res",
"def _get_reports():\n if not CONFIG:\n raise ConfigError(\"Configuration is not passed\")\n\n try:\n return CONFIG[\"reports\"]\n except KeyError:\n raise ConfigError(\"Reports configurations are missing from config\")",
"def authorized_for_reports(self):\n if self.userobject is None:\n return False\n return self.userobject.may_run_reports or self.userobject.superuser",
"def student_reports(self):\n return self.studentreport_set.all().prefetch_related(\n \"project\",\n \"modifier\",\n \"project__project_owner\"\n ).order_by(\n \"project__position\",\n \"project__project_owner__last_name\",\n \"-project__year\",\n \"-project__number\")",
"def report(self):\n used_mans = [dom for dom, man in self.managements.items() if man.area > 0]\n report = [self.managements[str(dom)] for dom in used_mans]\n report.sort(key=lambda x: x.pct_coverage, reverse=True)\n return [man.as_dict() for man in report]",
"def custom_report_list(request):\n org_id = int(request.GET.get('select_org', '0'))\n ctx = base_context()\n ctx.update({\n 'org_id': org_id,\n 'customReports': view_util.custom_reports(org_id)\n })\n\n return render_to_response('bmsapp/customReports.html', ctx)",
"def category_reports_for_user(context, category):\n user = context[\"user\"]\n return category.reports.for_user(user)",
"def show_work_records(user, is_me):\n records = WorkRecord.objects.filter(user=user)\n return {\"user\": user, \"records\": records, \"is_me\": is_me}",
"def test_list(self):\n report = NGReportFactory.create()\n response = self.get(reverse('list_ng_reports'))\n self.assertTemplateUsed(response, 'list_ng_reports.html')\n eq_(response.context['pageheader'], 'Activities for Reps')\n eq_(response.status_code, 200)\n eq_(set(response.context['reports'].object_list),\n set([report]))",
"def export_data(\n cls,\n user_id: str\n ) -> Dict[str, Union[bool, List[str], None]]:\n rights_model = cls.get_by_id(user_id)\n\n if rights_model is None:\n return {}\n\n return {\n 'can_review_translation_for_language_codes': (\n rights_model.can_review_translation_for_language_codes),\n 'can_review_voiceover_for_language_codes': (\n rights_model.can_review_voiceover_for_language_codes),\n 'can_review_questions': rights_model.can_review_questions,\n 'can_submit_questions': rights_model.can_submit_questions\n }",
"def show_student_records(user, is_me):\n records = StudentRecord.objects.filter(user=user)\n return {\"user\": user, \"records\": records, \"is_me\": is_me}",
"def generate_report(self, docs, threshold, report_all=False):\n\n bad_eid_reporters = defaultdict(list)\n other_eid_reporters = defaultdict(list)\n bad_eid_student = dict()\n\n # get all bad students\n for dd in docs:\n if not dd.group:\n logging.warning(\"%s listed no group members\" % dd.student)\n for s in dd.group:\n if self.poorly_ranked(s, threshold):\n bad_eid_student[s.eid] = s\n bad_eid_reporters[s.eid].append((dd.student, s.ranking))\n\n # second pass to get all ratings for bad students\n if report_all:\n for dd in docs:\n for s in dd.group:\n if s.eid in bad_eid_student and not self.poorly_ranked(s, threshold):\n other_eid_reporters[s.eid].append((dd.student, s.ranking))\n\n res = {}\n RREntry = namedtuple('RREntry', ['student', 'bad_rankers', 'pos_rankers'])\n for eid in bad_eid_student:\n res[eid] = RREntry(bad_eid_student[eid], bad_eid_reporters[eid], other_eid_reporters[eid])\n\n return res",
"def filter_granted(self, queryset):\n return Dataset.filter_by_user(self.request.user)",
"def query_reports(self, report_requests=None, **kwargs):\n\n if report_requests is None:\n report_requests = self.get_report_provider_method_names()\n\n report_key_names = None\n\n all_reports_requested_as_strings = True\n for r in report_requests:\n if not isinstance(r, str):\n all_reports_requested_as_strings = False\n break\n\n if all_reports_requested_as_strings:\n report_key_names = report_requests\n else:\n # THIS DEPENDS ON CPYTHON TO WORK. PyPy or Jython = no go.\n caller_method = None\n try:\n curframe = inspect.currentframe()\n caller_frame = inspect.getouterframes(curframe, 2)\n caller_method = caller_frame[1][3]\n except:\n pass\n if isinstance(caller_method, str) and hasattr(self, caller_method):\n report_key_names = [caller_method]\n\n if report_key_names is None:\n raise Exception(\"Cant determine report key names.\")\n\n def process_report_request_type(report_request, **kwargs):\n if isinstance(report_request, str): # Convert string to dict by executing AnalyticsAPI[report_request](**kwargs)\n report_request = getattr(self, report_request)(execute=False, **{ k:v for k,v in kwargs.items() if k in ('start_date', 'end_date') })\n\n return dict(report_request, # Add required common key/vals, see https://developers.google.com/analytics/devguides/reporting/core/v4/basics.\n viewId=self.view_id,\n pageSize=report_request.get('pageSize', self.owner.extra_config.get('analytics_page_size', DEFAULT_GOOGLE_API_CONFIG['analytics_page_size']))\n )\n\n formatted_report_requests = [ process_report_request_type(r, **kwargs) for r in report_requests ]\n\n # Google only permits 5 requests max within a batchRequest, so we need to chunk it up if over this -\n report_request_count = len(formatted_report_requests)\n if report_request_count > 5:\n raw_result = { \"reports\" : [] }\n for chunk_num in range(report_request_count // 5 + 1):\n chunk_num_start = chunk_num * 5\n chunk_num_end = min([chunk_num_start + 5, report_request_count])\n for chunk_raw_res in self._api.reports().batchGet(body={ \"reportRequests\" : formatted_report_requests[chunk_num_start:chunk_num_end] }).execute().get('reports', []):\n raw_result['reports'].append(chunk_raw_res)\n else:\n raw_result = self._api.reports().batchGet(body={ \"reportRequests\" : formatted_report_requests }).execute()\n\n # We get back as raw_result:\n # { \"reports\" : [{ \"columnHeader\" : { \"dimensions\" : [Xh, Yh, Zh], \"metricHeaderEntries\" : [{ \"name\" : 1h, \"type\" : \"INTEGER\" }, ...] }, \"data\" : { \"rows\": [{ \"dimensions\" : [X,Y,Z], \"metrics\" : [1,2,3,4] }] } }, { .. }, ....] }\n raw_result['requests'] = formatted_report_requests\n raw_result['report_key_names'] = report_key_names\n # This transforms raw_result[\"reports\"] into more usable data structure for ES and aggregation\n # e.g. list of JSON items instead of multi-dimensional table representation\n return self.transform_report_result(\n raw_result,\n date_increment=kwargs.get('increment')\n )",
"def reports(self, interval='yearly'):\n return (account.report.create_report(interval) for account in self._accounts)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if a report exists.
|
def report_exists(report_id):
return report_id in _registry
|
[
"def report_exists(account_id: str, profile_id: str, report_name: str) -> object:\n service = _get_service()\n request = service.reports().list(profileId=profile_id)\n response = request.execute()\n if logging.get_verbosity() == 1: # Debug.\n pp = pprint.PrettyPrinter(indent=2)\n logging.debug(pp.pformat(response))\n for report in response['items']:\n if report['name'] == report_name:\n logging.info('Found report id: %s with report name: %s', report['id'],\n report_name)\n return report\n return None",
"def has_report_step( self , report_step ):\n return cfunc.has_report_step( self , report_step )",
"def hasReport(cfgInterface):\n if not hasService(cfgInterface):\n return False\n loggerSvc = cfgInterface.cmsConfig.service(\"MessageLogger\")\n\n if not loggerSvc.has_key(\"fwkJobReports\"):\n return False\n \n reports = loggerSvc['fwkJobReports']\n reportNames = []\n for item in reports[2]:\n reportNames.append(unQuote(item))\n\n return \"FrameworkJobReport.xml\" in reportNames",
"def HasAnalysisReports(self):\n return self._store.HasAnalysisReports()",
"def checkUnusedReports(arguments, reportName):\n Reports = ''\n #reports we dont use in production\n if arguments.unusedreports:\n Reports = arguments.unusedreports\n\n if Reports!='':\n if reportName in Reports:\n return 1",
"def checkJobReport(cfgInterface):\n if hasReport(cfgInterface):\n return\n insertReport(cfgInterface)\n insertEventLogger(cfgInterface)\n return",
"def test_report_not_found(builddir):\n runner = CliRunner()\n result = runner.invoke(\n main.cli, [\"--path\", builddir, \"report\", \"test1.py\", \"raw.loc\"]\n )\n assert result.exit_code == 0, result.stdout\n assert \"Not found\" in result.stdout",
"def test_no_reports(self):\n Report.objects.all().delete()\n response = self._get()\n self._check_report(response)",
"def test_view_nonexistent_report_page(self):\n c = Client()\n response = c.get(reverse('reports_view_report',\n kwargs={'display_name': self.up.display_name,\n 'year': '2011',\n 'month': 'January'}))\n self.assertTemplateUsed(response, '404.html')",
"def sample_sheet_exists(self) -> bool:\n LOG.info(\"Check if sample sheet exists\")\n return self.sample_sheet_path.exists()",
"def test_report(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': '2019-01-31'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n report = models.Report.objects.first()\n self.assertTrue(models.SubReport.objects.filter(report=report).exists())",
"def test_report_not_existing_format(builddir):\n runner = CliRunner()\n result = runner.invoke(\n main.cli, [\"--path\", builddir, \"report\", _path, \"--format\", \"non-existing\"]\n )\n assert result.exit_code == 2, result.stdout\n assert \"Not found\" not in result.stdout",
"def isRecordExistSummary(self):\n self.createConn()\n sql = \"SELECT * FROM Summary WHERE book1='{b1}' AND book2='{b2}' \".format(b1=self.book1, b2=self.book2)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def check_expiry_report(report_data):\n errors = []\n warnings = ['\\nExpiry Report Warnings:\\n']\n for student in report_data:\n if student[1] in (None, ''):\n warnings.append('Name is missing for student with Student ID '\n '{}'.format(student[0]))\n if student[2] in (None, ''):\n warnings.append('Email is missing for student with Student '\n 'ID {}'.format(student[0]))\n if student[3] in (None, ''):\n warnings.append('Course is missing for student with Student '\n 'ID {}'.format(student[0]))\n if student[4] in (None, ''):\n errors.append('Expiry is missing for student with '\n 'Student ID {}'.format(student[0]))\n # Check if any errors have been identified, save error log if they have\n if len(errors) > 0:\n ft.process_error_log(errors, 'Expiry Report')\n # Check if any warnings have been identified, save error log if they have\n if len(warnings) > 1:\n return True, warnings\n else:\n return False, warnings",
"def test_enable_enabled_report(self):\n self.client.get(f'{self.url}/InstallReport/')\n # There are two included by default.\n self.assertEqual(Report.objects.count(), 2)",
"def exists(self, identifier):\n return False",
"def check_registry_exists(self, registry):\n check = Registry.query(Registry.value == registry).fetch()\n if len(check) > 0:\n return True\n return False",
"def isRecordExist(self):\n self.createConn()\n sql = \"SELECT * FROM Story WHERE book1='{b1}' AND book2='{b2}' AND title ='{t}'\".format(b1=self.book1, b2=self.book2, t=self.title)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def reports_dir():\n return _mkifnotexists(\"web/reports\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a report instance for using its ID. If the user does not have the correct permission for the report, PermissionDenied is raised.
|
def get_report_by_id(report_id, user):
report = _registry[report_id]
if not report.check_permission(user):
raise PermissionDenied
return report
|
[
"def get_report_instance(report_id):\n for cls in Report.__subclasses__():\n if cls.get_report_id() == report_id:\n return cls()\n return None",
"def get(self, crash_report_id):\n report = db.getCrashReport(crash_report_id)\n if report:\n return report\n else:\n abort(404)",
"def get_report(self, report_id):\n req = self.get(f'/reports/{report_id}')\n resp = req.json()\n raw = resp.pop('raw_report')\n report = self._process_report_dict(resp)\n if raw is not None:\n raw_report = deserialize_raw_report(raw)\n processed_fxobs = self.get_raw_report_processed_data(\n report_id, raw_report, resp['values'])\n report = report.replace(raw_report=raw_report.replace(\n processed_forecasts_observations=processed_fxobs))\n return report",
"def get(self, id):\n logged_user = h.default_user() \n incident = model.get_specific_incident(id)\n if incident[7] == logged_user:\n if incident:\n incidents = h.incident_serializer(incident)\n return({\"incident\":incidents})\n return({\"message\":\"incident not found\"}) \n return({\"message\":\"Access denied\"})",
"def view(ctx, report_id):\n if report_id > 0:\n session = ctx.obj['session']\n r = session.get('http://127.0.0.1:8000/standalone/viewreport/' + ctx.obj['username'] + '/' + str(report_id) + '/')\n click.echo(r.text)\n else:\n click.echo(\"Incorrect syntax. Use: secwit secure view <report_id>\")",
"def detail(request, report_id):\n report = get_object_or_404(Report, pk=report_id)\n\n if (report.pk in request.session.get(\"report_ids\", []) and\n report.created_by.is_active and\n report.created_by_id != request.user.pk and\n not request.user.is_active):\n # if the report was created by an active user and they aren't logged in\n # as that user, force them to re-login\n return login_required(lambda request: None)(request)\n\n if (report.pk in request.session.get(\"report_ids\", []) and\n not report.created_by.is_active and\n report.created_by_id != request.user.pk and\n not request.user.is_active):\n # if the user submitted the report, allow them to masquerade as that\n # user for the life of this request\n request.user = report.created_by\n\n if not report.is_public:\n if request.user.is_anonymous:\n messages.info(request, \"If this is your report, please use the login system below to authenticate yourself.\")\n return login_required(lambda request: None)(request)\n elif not can_view_private_report(request.user, report):\n raise PermissionDenied()\n\n # there are a bunch of forms that can be filled out on this page, by\n # default, they can't be filled out\n comment_form = None\n image_formset = None\n invite_form = None\n management_form = None\n # this tells us which form was filled out since there are many on the page\n submit_flag = request.POST.get(\"submit_flag\")\n\n # process the comment form only if they are allowed to leave comments\n if can_create_comment(request.user, report):\n ImageFormSet = get_image_formset(user=request.user)\n PartialCommentForm = functools.partial(CommentForm, user=request.user, report=report)\n\n if request.POST and submit_flag == CommentForm.SUBMIT_FLAG:\n image_formset = ImageFormSet(request.POST, request.FILES, queryset=Image.objects.none())\n comment_form = PartialCommentForm(request.POST, request.FILES)\n if comment_form.is_valid() and image_formset.is_valid():\n comment = comment_form.save()\n image_formset.save(user=comment.created_by, fk=comment)\n messages.success(request, \"Comment Added!\")\n if can_claim_report(request.user, report):\n if report.claimed_by is None:\n report.claimed_by = request.user\n report.save()\n messages.success(request, \"Report claimed!\")\n return safe_redirect(request, request.get_full_path())\n\n else:\n comment_form = PartialCommentForm()\n image_formset = ImageFormSet(queryset=Image.objects.none())\n\n # handle all the management forms\n if can_manage_report(request.user, report):\n # Confirming the report form...\n if request.POST and submit_flag == ManagementForm.SUBMIT_FLAG:\n management_form = ManagementForm(request.POST, instance=report)\n if management_form.is_valid():\n management_form.save()\n messages.success(request, \"Updated!\")\n return safe_redirect(request, request.get_full_path())\n else:\n management_form = ManagementForm(instance=report)\n\n # Inviting experts...\n if request.POST and submit_flag == InviteForm.SUBMIT_FLAG:\n invite_form = InviteForm(request.POST)\n if invite_form.is_valid():\n invite_report = invite_form.save(request.user, report)\n message = \"%d invited\" % (len(invite_report.invited))\n if invite_report.already_invited:\n message += \" (%d already invited)\" % len(invite_report.already_invited)\n messages.success(request, message)\n return safe_redirect(request, request.get_full_path())\n else:\n invite_form = InviteForm()\n\n # filter down the comments based on the user's permissions\n comments = Comment.objects.filter(report=report)\n images = Image.objects.filter(Q(report=report) | Q(comment__report=report))\n if request.user.is_anonymous:\n comments = comments.filter(visibility=Comment.PUBLIC)\n images = images.filter(visibility=Image.PUBLIC)\n elif request.user.is_active or Invite.objects.filter(user=request.user, report=report).exists():\n # no need to filter for these folks\n pass\n else:\n # the logged in user is the person who reported\n comments = comments.filter(Q(visibility=Comment.PUBLIC) | Q(visibility=Comment.PROTECTED))\n images = images.filter(Q(visibility=Image.PUBLIC) | Q(visibility=Image.PROTECTED))\n\n invites = list(i.user.email for i in Invite.objects.filter(report=report).select_related(\"user\"))\n\n return render(request, \"reports/detail.html\", {\n \"report\": report,\n \"comments\": comments,\n \"images\": list(images),\n \"category_id_to_species_id\": category_id_to_species_id_json(),\n \"invites\": invites,\n # all the forms\n \"image_formset\": image_formset,\n \"comment_form\": comment_form,\n \"invite_form\": invite_form,\n \"management_form\": management_form,\n })",
"def report_definition(connection, report_id):\n connection._validate_application_selected()\n response = connection.session.get(url=connection.base_url + '/api/v2/reports/' + report_id)\n if not response.ok:\n response_handler(response, \"Error getting report definition. Check report ID.\")\n return response",
"def get(*, db_session, status_report_id: int) -> Optional[StatusReport]:\n return db_session.query(StatusReport).filter(StatusReport.id == status_report_id).first()",
"def _get_report_from_name(self, report_name):\n res = super(ReportXML, self)._get_report_from_name(report_name)\n\n if res:\n return res\n\n report_obj = self.env['ir.actions.report']\n qwebtypes = ['qweb-pdf', 'qweb-html','pentaho']\n conditions = [('report_type', 'in', qwebtypes), ('report_name', '=', report_name)]\n context = self.env['res.users'].context_get()\n return report_obj.with_context(context).search(conditions, limit=1)",
"def permissionById(self, id: str) -> Permission:",
"def retrieve(self, request, pk=None):\n # Check if user is admin of this org\n try:\n user = request.user\n user_org = UserOrganizationAccess.objects.filter(user=user.profile).get(is_admin=True)\n organization = user_org.organization\n if user_org :\n patient = models.Patient.objects.get(uuid=pk)\n logger.info('%s : %s %s viewed patient record with %s for Organization %s' % (PHI_ADMIN, str(user.first_name + ' ' + user.last_name),\n {'userID': request.user.profile.uuid, 'email': request.user.username},\n {'UUID': patient.uuid}, str(user_org.organization)))\n\n # Assumption: A patient can only have 1 active episode at a time\n # Get the active episodes for that patient\n episode = patient.episodes.get(is_active=True)\n physician_id = None\n if episode.primary_physician:\n physician_id = episode.primary_physician.uuid\n\n # Org has access to patient\n org_has_access = models.OrganizationPatientsMapping.objects.filter(organization=organization).filter(patient=patient)\n if org_has_access.exists():\n user_profile_ids = models.UserEpisodeAccess.objects.filter(episode_id=episode.uuid).filter(organization=organization).values_list('user_id', flat=True)\n serializer = PatientWithUsersAndPhysiciansSerializer({'id': patient.uuid, 'patient': patient, 'userIds': list(user_profile_ids), 'physicianId': physician_id})\n return Response(serializer.data)\n return Response(status=status.HTTP_401_UNAUTHORIZED, data={'success': False, 'error': errors.ACCESS_DENIED})\n except Exception as e:\n logger.error(str(e))\n return Response(status=status.HTTP_400_BAD_REQUEST, data={'success': False, 'error': errors.UNKNOWN_ERROR})",
"def get_by_id(exporter_id):\n return Exporter.get_by_id(exporter_id)",
"def find_record_with_id(self, id, **kwargs):\r\n return self.get_scoped_query(**kwargs).filter_by(id=id).first_or_404()",
"def getReport(self,reportId):\n self.__expectString(reportId)\n if len(reportId) != 15 and len(reportId) != 18:\n raise Exception('Expected 15 character or 18 character string, received {} character string'.format(len(reportId)))\n elif len(sub('[a-zA-z0-9]','',reportId)) > 0:\n raise Exception('Passed string cannot contain any special characters (i.e. \"!\",\"@\",\"#\")')\n with requests.session() as s:\n response = s.get(\"https://{}/{}?export\".format(self.Org.sf_instance,reportId), headers=self.Org.headers, cookies={'sid': self.Org.session_id})\n \n def parseReponse(responseObject):\n # Separate trailing report data from regular data\n # then split remaining data by '\\n'\n bigList = responseObject.text.split('\\n\\n\\n')[0].split('\\n')\n\n # Pull headers from first split group\n headers = bigList[0].split(',')\n\n #Crop off extra \"\"\n for i in range(0,len(headers)):\n headers[i] = headers[i][1:-1]\n\n # Initialize dictionary\n bigDict = {}\n for i in headers:\n bigDict[i] = []\n\n indexKeyMatcher = {}\n for i in range(0,len(headers)):\n indexKeyMatcher[i] = headers[i]\n\n # Separate header data from bigList\n bigList = bigList[1:]\n\n # Comma separate each sub-list\n # and add to dictionary\n for i in range(0,len(bigList)):\n data = bigList[i].split('\",')\n #Crop off extra \"\"\n for subIndex in range(0,len(data)):\n if subIndex == len(data)-1:\n data[subIndex] = data[subIndex][1:-1]\n else:\n data[subIndex] = data[subIndex][1:]\n for col in range(0,len(data)):\n bigDict[indexKeyMatcher[col]].append(data[col])\n # bigDict[i] = data\n return bigDict\n \n return pd.DataFrame(parseReponse(response))",
"async def get_permission_by_id(self,id):\r\n async with self._db.acquire() as conn:\r\n result= await conn.execute(Permission.select().where((Permission.c.permission_id == id)))\r\n permission= await result.fetchone()\r\n if permission is not None:\r\n return permission\r\n else:\r\n return None",
"async def read_report(self, uid, ifrom=None):\n _iform = ifrom or self.id\n report = await self.memory.store.get(\n key=self.memory.key(_iform, 'workflows', 'instances', uid)\n )\n if not report:\n raise KeyError(\"Can't find workflow id context %s in memory\", uid)\n return pickle.loads(report)",
"def retrieveByID (self, id):\n return self.getOne (\"where departmentID = %d\" % id)",
"def get_protection_job_by_id(self, id):\n try:\n self.logger.info('get_protection_job_by_id called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for get_protection_job_by_id.')\n self.validate_parameters(id=id)\n\n # Prepare query URL\n self.logger.info(\n 'Preparing query URL for get_protection_job_by_id.')\n _url_path = '/public/protectionJobs/{id}'\n _url_path = APIHelper.append_url_with_template_parameters(\n _url_path, {'id': id})\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for get_protection_job_by_id.')\n _headers = {'accept': 'application/json'}\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for get_protection_job_by_id.'\n )\n _request = self.http_client.get(_query_url, headers=_headers)\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request,\n name='get_protection_job_by_id')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info(\n 'Validating response for get_protection_job_by_id.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(_context.response.raw_body,\n ProtectionJob.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise",
"def authorized_for_reports(self):\n if self.userobject is None:\n return False\n return self.userobject.may_run_reports or self.userobject.superuser"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A fast implementation of the forward pass for a convolutional layer based on im2col and col2im.
|
def conv_forward_im2col(x, w, b, conv_param):
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) // stride + 1
out_width = (W + 2 * pad - filter_width) // stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
|
[
"def forward_convolution(conv_W, conv_b, data):\n\n conv_channels, _, conv_width, conv_height = conv_W.shape\n\n input_channels, input_width, input_height = data.shape\n\n output = np.zeros((conv_channels, input_width - conv_width + 1, input_height - conv_height + 1))\n\n for x in range(input_width - conv_width + 1):\n for y in range(input_height - conv_height + 1):\n for output_channel in range(conv_channels):\n output[output_channel, x, y] = np.sum(\n np.multiply(data[:, x:(x + conv_width), y:(y + conv_height)], conv_W[output_channel, :, :, :])) + conv_b[output_channel]\n\n\n return output",
"def conv2d_forward(X, F, P, S):\n N, In_H, In_W, In_C = X.shape\n Out_C, In_C, Fil_H, Fil_W = F.shape\n\n Out_H = compute_out_dimension(In_H, Fil_H, P, S)\n Out_W = compute_out_dimension(In_W, Fil_W, P, S)\n\n padded_input = np.pad(X, pad_width=((0, 0), (P, P), (P, P), (0, 0)),\n mode='constant', constant_values=0)\n\n out = np.zeros((N, Out_H, Out_W, Out_C))\n\n for b in range(N):\n for f in range(Out_C):\n for i in range(Out_H):\n for j in range(Out_W):\n for c in range(In_C):\n for f_i in range(Fil_H):\n for f_j in range(Fil_W):\n out[b, i, j, f] += padded_input[b, i + f_i, j + f_j, c] * F[f, c, f_i, f_j]\n\n return out",
"def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):",
"def conv_backward_im2col(dout, cache):\n x, w, b, conv_param, x_cols = cache\n stride, pad = conv_param['stride'], conv_param['pad']\n\n db = np.sum(dout, axis=(0, 2, 3))\n\n num_filters, _, filter_height, filter_width = w.shape\n dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)\n dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)\n\n dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)\n # dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)\n dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],\n filter_height, filter_width, pad, stride)\n\n return dx, dw, db",
"def convolve(self, img):",
"def Convolution(image, convFilter):\r\n startDistance = math.sqrt(len(convFilter))//2\r\n # convFilter = convFilter.reverse()\r\n if math.sqrt(len(convFilter))%2 != 1:\r\n print(\"Not Valide filter size\")\r\n return\r\n length = int(math.sqrt(len(convFilter)))\r\n arr = np.asarray(image.shape)\r\n out = np.zeros(arr, dtype=np.uint8)\r\n for k in range(arr[2]): # Channels\r\n for i in range(arr[0]): # Columns/x\r\n for j in range(arr[1]): # Rows/y\r\n sx = i-startDistance\r\n sy = j-startDistance\r\n temp = 0\r\n for y in range(length):\r\n for x in range(length):\r\n # if i < 0 or j < 0 or i\r\n deltaX = sx + x\r\n deltaY = sy + y\r\n if deltaX < 0 or deltaY < 0 or deltaX >= arr[0] or deltaY >= arr[1]:\r\n pixel = image[i, j, k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n else:\r\n pixel = image[int(deltaX), int(deltaY), k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n # print(\"pixel: \", pixel, \" fileter: \", filter)\r\n temp = temp + value / len(convFilter)\r\n # print(\"+\", value)\r\n # print(\"===\", temp)\r\n out[i, j, k] = temp\r\n return out",
"def convolution(image, kernel):\n kh = kernel.shape[0] #kernel height\n kw = kernel.shape[1] #kernel width\n khm = math.floor(kh/2) #half of kernel height\n kwm = math.floor(kw/2) #half of kernel width\n ih = image.shape[0] #image height\n iw = image.shape[1] #image width\n #make an image frameless\n im_temp = np.zeros((ih+kh, iw+kw))\n im_temp[khm:ih+khm, kwm:iw+kwm] = image\n im_temp[0:khm, kwm:iw+kwm] = image[0:khm, :]\n im_temp[ih+khm:ih+2*khm, kwm:iw+kwm] = image[ih-khm:ih, :]\n im_temp[khm:ih+khm:, 0:kwm] = image[:, 0:kwm]\n im_temp[khm:ih+khm, iw+kwm:iw+2*kwm] = image[:, iw-kwm:iw]\n #create a new image to store the convoluted image\n convoluted = np.zeros((ih, iw))\n #convolute an image with a flipped kernel\n for i in range(ih):\n for j in range(iw):\n weights = 0\n for k in range(kh):\n for l in range(kw):\n kk = kh - 1 - k\n ll = kw - 1 - l\n weights = weights + im_temp[i+k, j+l] * kernel[kk,ll] \n convoluted[i,j] = weights\n return convoluted",
"def convolution(prev_layer, n_filters, hype_space, force_ksize=None):\n if force_ksize is not None:\n k = force_ksize\n else:\n k = int(round(hype_space['conv_kernel_size']))\n return tensorflow.keras.layers.Conv2D(\n filters=n_filters, kernel_size=(k, k), strides=(1, 1),\n padding='same', activation=hype_space['activation'],\n kernel_regularizer=tensorflow.keras.regularizers.l2(\n STARTING_L2_REG * hype_space['l2_weight_reg_mult'])\n )(prev_layer)",
"def forward(self, inp):\n # pdb.set_trace()\n batch_size = inp.size(0)\n # running #num_units conv2d layers on input; unit_list is a list of size 8, each containing [64, 32x6x6] sized tensor. \n unit_list = [conv2d(inp).view((batch_size, -1, 1)) for conv2d in self.conv2d_list]\n # convert unit_list to torch array of size: [64, 32x6x6, 8] (batch_size, out_channels x patch_height x patch_width, num_units)\n s = torch.cat(unit_list, dim=-1)\n # squash each 32x6x6 capsule unit on the last dimension (num_units:8) \n v = self.squash(s, dim=-1)\n # v is of shape [64, 1152, 8]\n return v",
"def convolve(im, kernel):\n if (len(im.shape)==2):\n im = np.expand_dims(im, 2)\n H, W, B = im.shape\n imc = np.zeros((H, W, B))\n for band in range(B):\n imc[:, :, band] = sps.correlate2d(im[:, :, band], kernel, mode='same')\n return imc",
"def forward(self, t):\r\n\r\n t = F.relu(self.conv1(t))\r\n t = self.pool(t)\r\n t = F.relu(self.conv2(t))\r\n #t = self.pool(t)\r\n t = F.relu(self.conv3(t))\r\n #t = F.relu(self.conv4(t))\r\n t = t.flatten(start_dim = 1)\r\n t = F.relu(self.fc(t))\r\n t = self.out(t)\r\n return t",
"def conv_backward_naive(dout, cache):\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n\n S, pad = conv_param['stride'], conv_param['pad']\n\n N, C, H, W = x.shape\n N, F, H_, W_ = dout.shape\n F, C, HH, WW = w.shape\n\n # Padding\n H += 2*pad\n W += 2*pad\n\n dx, dw, db = np.zeros((N, C, H, W)), np.zeros((F, C, HH, WW)), np.zeros(F)\n #\n # Loop over pairs of (image, activation) gradient pairs\n #\n for k, (img, da) in enumerate(zip(x, dout)):\n #\n # Compute gradients for this pair\n #\n x_padded = np.pad(img, ([0], [1], [1]), mode='constant', constant_values=0)\n for i in range(H_):\n for j in range(W_):\n da_ = da[:, i:i+1, j:j+1] # activations by all the filters for this little segment\n idx, jdx = S*i, S*j # retrive coordinates back in the image\n x_ = x_padded[:, idx:idx+HH, jdx:jdx+WW] # slice of original image\n\n db += da_.flatten()\n full_da = np.ones((F, C, HH, WW)) * da_.reshape(F, 1, 1, 1) # broadcast to achieve dim of scores\n dx[k, :, idx:idx+HH, jdx:jdx+WW] += np.sum(w*full_da, axis=0)\n dw += x_ * full_da # x_padded broadcasted to multiply all filters\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx[:, :, pad:H-pad, pad:W-pad], dw, db # remove padding",
"def forward(self, data):\n \n digits = []\n rep_features = []\n #iterate through the input data( in our case we have 2 channel data)\n for i in range(2):\n x = data[:,i].view(data[:,0].shape[0],1,14,14)\n # convolution 1, pooling, relu\n \n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n \n # convolution 2, droupout, pooling, relu\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n # Resize\n x = x.view(-1, 20*2*2)\n \n # store the representative features of each image before further processing \n rep_features.append(self.fc1(x))\n \n # Linear function1, relu\n x = F.relu(self.fc1(x))\n \n # Linear function 2\n x = self.fc2(x)\n \n # append the [0-1] scale of the last linear layer \n digits.append(F.log_softmax(x))\n \n \n # concatinate the features \n y = torch.cat((rep_features[0], rep_features[1]), dim=1)\n \n # Linear function3, relu\n y = F.relu(self.fc3(y))\n # Linear function4, relu\n y = F.relu(self.fc4(y))\n # Linear function5, relu\n y = F.relu(self.fc5(y))\n # Linear function6\n y = self.fc6(y)\n # rescale the into [0-1] interval\n targets = F.log_softmax(y)\n return digits, targets",
"def conv_forward_numpy_1D(x, w, b, conv_param):\n pad = conv_param.get('pad')\n stride = conv_param.get('stride')\n if stride != 1:\n raise ValueError(\"numpy requires stride = 1, but given: \", stride)\n N, C, W = x.shape\n F, C, WW = w.shape\n out_W = W + 2 * pad - WW + 1\n out = np.zeros([N, F, out_W])\n padded_x = (np.pad(x, ((0, 0), (0, 0), (pad, pad)), 'constant'))\n for nn in range(N): # For each time-series in the input batch.\n for ff in range(F): # For each filter in w\n for cc in range(C):\n out[nn, ff] += np.correlate(padded_x[nn, cc], w[ff, cc],\n mode=\"valid\")\n # we have a single bias per filter\n # at the end - sum all the values in the obtained tensor\n out[nn, ff] += b[ff]\n\n cache = (x, w, b, conv_param)\n return out, cache",
"def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n # START YOUR CODE HERE ### (You can change anything inside this block)\n\n conv_result = im\n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n # Visualize FFT\n plt.subplot(1, 5, 3)\n # Visualize FFT kernel\n plt.subplot(1, 5, 4)\n # Visualize filtered FFT image\n plt.subplot(1, 5, 5)\n # Visualize filtered spatial image\n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result",
"def convolve_im(im: np.array, kernel: np.array, verbose=True):\n H, W = im.shape[0], im.shape[1]\n K = kernel.shape[0]\n k_padded = np.pad(kernel, ((0, H - K), (0, W - K)), mode=\"constant\")\n if k_padded.shape != im.shape:\n raise Exception(\"Padded kernel does not match image dimensions {} != {}\", k_padded.shape, im.shape)\n fk = np.fft.fft2(k_padded)\n\n f = np.fft.fft2(im)\n fapplied = np.multiply(f, fk)\n conv_result = np.real(np.fft.ifft2(fapplied))\n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.title(\"Original\")\n plt.subplot(1, 5, 2)\n plt.imshow(np.abs(np.fft.fftshift(fk)), cmap=\"gray\")\n plt.title(\"Filter\")\n plt.subplot(1, 5, 3)\n plt.imshow(20*np.log(np.abs(np.fft.fftshift(f)) + 0.01), cmap=\"gray\")\n plt.title(\"FT (log)\")\n plt.subplot(1, 5, 4)\n plt.imshow(20*np.log(np.abs(np.fft.fftshift(fapplied)) + 0.01), cmap=\"gray\")\n plt.title(\"FT filtered (log)\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n plt.title(\"Result\")\n ### END YOUR CODE HERE ###\n return conv_result",
"def forward(self, x):\n\n ############################################################################\n # TODO: Chain our previously initialized convolutional neural network #\n # layers to resemble the architecture drafted in the class docstring. #\n # Have a look at the Variable.view function to make the transition from #\n # convolutional to fully connected layers. #\n ############################################################################\n\n x = self.conv(x)\n x = F.relu(F.max_pool2d(x, kernel_size=self.pool))\n (_, C, H, W) = x.data.size()\n x = x.view(-1, C * H * W)\n x = F.relu(F.dropout(self.fc1(x), p=self.dropout))\n x = self.fc2(x)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return x",
"def _conv_layers(self):\n layers, activations = [], []\n\n # The first out_channels should be the second to last filter size\n tmp = self.filters.pop()\n\n # self.output_shape[0] Needs to be the last out_channels to match the input matrix\n for i, (filter_, kernel, stride) in enumerate(\n zip(\n (*self.filters, self.output_shape[0]),\n self.kernels,\n self.strides,\n )\n ):\n shape = self.encoder_shapes[-1 * i - 1]\n\n # TODO: this is a quick fix but might not generalize to some architectures\n if stride == 1:\n padding = same_padding(shape[1:], kernel, stride)\n else:\n padding = tuple(\n int(dim % 2 == 0) for dim in self.encoder_shapes[-1 * i - 2][1:]\n )\n\n layers.append(\n nn.ConvTranspose2d(\n in_channels=shape[0],\n out_channels=filter_,\n kernel_size=kernel,\n stride=stride,\n padding=padding,\n )\n )\n\n # TODO: revist padding, output_padding, see github issue.\n # This code may not generalize to other examples. Needs testing.\n # this also needs to be addressed in conv_output_dim\n\n activations.append(get_activation(self.activation))\n\n # Overwrite output activation\n activations[-1] = get_activation(self.output_activation)\n\n # Restore invariant state\n self.filters.append(tmp)\n\n return nn.ModuleList(layers), activations",
"def _conv7x7(\n in_channel: int,\n out_channel: int,\n stride: int = 1,\n) -> nn.Conv2d:\n return nn.Conv2d(\n in_channel,\n out_channel,\n kernel_size=7,\n stride=stride,\n padding=3,\n pad_mode='pad',\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A fast implementation of the backward pass for a convolutional layer based on im2col and col2im.
|
def conv_backward_im2col(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
|
[
"def conv_backward_naive(dout, cache):\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n\n S, pad = conv_param['stride'], conv_param['pad']\n\n N, C, H, W = x.shape\n N, F, H_, W_ = dout.shape\n F, C, HH, WW = w.shape\n\n # Padding\n H += 2*pad\n W += 2*pad\n\n dx, dw, db = np.zeros((N, C, H, W)), np.zeros((F, C, HH, WW)), np.zeros(F)\n #\n # Loop over pairs of (image, activation) gradient pairs\n #\n for k, (img, da) in enumerate(zip(x, dout)):\n #\n # Compute gradients for this pair\n #\n x_padded = np.pad(img, ([0], [1], [1]), mode='constant', constant_values=0)\n for i in range(H_):\n for j in range(W_):\n da_ = da[:, i:i+1, j:j+1] # activations by all the filters for this little segment\n idx, jdx = S*i, S*j # retrive coordinates back in the image\n x_ = x_padded[:, idx:idx+HH, jdx:jdx+WW] # slice of original image\n\n db += da_.flatten()\n full_da = np.ones((F, C, HH, WW)) * da_.reshape(F, 1, 1, 1) # broadcast to achieve dim of scores\n dx[k, :, idx:idx+HH, jdx:jdx+WW] += np.sum(w*full_da, axis=0)\n dw += x_ * full_da # x_padded broadcasted to multiply all filters\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx[:, :, pad:H-pad, pad:W-pad], dw, db # remove padding",
"def conv_forward_im2col(x, w, b, conv_param):\n N, C, H, W = x.shape\n num_filters, _, filter_height, filter_width = w.shape\n stride, pad = conv_param['stride'], conv_param['pad']\n\n # Check dimensions\n assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'\n assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'\n\n # Create output\n out_height = (H + 2 * pad - filter_height) // stride + 1\n out_width = (W + 2 * pad - filter_width) // stride + 1\n out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)\n\n # x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)\n x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)\n res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)\n\n out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])\n out = out.transpose(3, 0, 1, 2)\n\n cache = (x, w, b, conv_param, x_cols)\n return out, cache",
"def conv_backward_numpy_1D(dout, cache):\n dx, dw, db = None, None, None\n x, w, b, conv_param = cache\n stride = conv_param.get('stride')\n if stride != 1:\n raise ValueError(\"numpy requires stride = 1, but given: \", stride)\n pad = conv_param.get('pad')\n\n N, C, W = x.shape\n F, C, WW = w.shape\n N, F, W_out = dout.shape\n\n padded_x = np.pad(x, ((0, 0), (0, 0), (pad, pad)), mode='constant')\n\n # W = padded_out_W - WW + 1; padded_out_W = W + WW - 1; pad_out = W + WW - 1 // 2\n pad_out = (W + WW - 1 - W_out) // 2\n # print(\"pad_out: \", pad_out)\n if pad_out < 0:\n padded_dout = dout[:, :, abs(pad_out):pad_out]\n else:\n padded_dout = np.pad(dout, ((0, 0), (0, 0), (pad_out, pad_out)),\n mode='constant')\n\n # Initialise gradient output tensors.\n dx = np.zeros_like(x) # the x used for convolution was with padding\n dw = np.zeros_like(w)\n db = np.zeros_like(b)\n\n # Calculate dB.\n # Just like in the affine layer we sum up all the incoming gradients for each filters bias.\n for ff in range(F):\n db[ff] += np.sum(dout[:, ff, :])\n\n # print(\"padded x: \", padded_x)\n # print(\"dout: \", dout)\n # Calculate dw.\n # By chain rule dw is dout*x\n for nn in range(N):\n for ff in range(F):\n for cc in range(C):\n # accumulate gradient for a filter from each channel\n dw[ff, cc] += np.correlate(padded_x[nn, cc], dout[nn, ff],\n mode=\"valid\")\n # print(\"dw numpy: \", dw[ff, cc])\n\n # Calculate dx.\n # By chain rule dx is dout*w. We need to make dx same shape as padded x for the gradient calculation.\n for nn in range(N):\n for ff in range(F):\n for cc in range(C):\n # print(\"dout[nn, ff]: \", dout[nn, ff])\n # print(\"dout[nn, ff] shape: \", dout[nn, ff].shape)\n # print(\"w[ff, cc]: \", w[ff, cc])\n # print(\"w[ff, cc] shape: \", w[ff, cc].shape)\n dx[nn, cc] += np.correlate(padded_dout[nn, ff],\n np.flip(w[ff, cc], axis=0),\n mode=\"valid\")\n # print(\"dx fft: \", dx[nn, cc])\n return dx, dw, db",
"def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n\t#Normalize columns to length 1 in 3D space\n\tleng = [0, 0, 0]\n\tcosx = [0, 0, 0]\n\tcosy = [0, 0, 0]\n\tcosz = [0, 0, 0]\n\tfor i in range(3):\n\t\tleng[i] = sqrt(MODx[i]*MODx[i] + MODy[i]*MODy[i] + MODz[i]*MODz[i])\n\t\tif not (leng[i] == 0):\n\t\t\tcosx[i] = MODx[i]/leng[i]\n\t\t\tcosy[i] = MODy[i]/leng[i]\n\t\t\tcosz[i] = MODz[i]/leng[i]\n\n\t#translation matrix\n\tif cosx[1] == 0:\n\t\tif cosy[1] == 0:\n\t\t\tif cosz[1] == 0: #2nd color is unspecified\n\t\t\t\tcosx[1] = cosz[0]\n\t\t\t\tcosy[1] = cosx[0]\n\t\t\t\tcosz[1] = cosy[0]\n\n\tif cosx[2] == 0:\n\t\tif cosy[2] == 0:\n\t\t\tif cosz[2] == 0: #3rd color is unspecified\n\t\t\t\t#3rd column will be cross product of first 2\n\t\t\t\t#fiji implementation allows for computation of 3rd color via Ruifroks method\n\t\t\t\t# but this is unnecessary for extracting just H&E \n\t\t\t\tcosx[2] = cosy[0] * cosz[1] - cosz[0] * cosy[1];\n\t\t\t\tcosy[2] = cosz[0] * cosx[1] - cosx[0] * cosz[1];\n\t\t\t\tcosz[2] = cosx[0] * cosy[1] - cosy[0] * cosx[1];\n\n\t#renormalize 3rd column\n\tleng = sqrt(cosx[2]*cosx[2] + cosy[2]*cosy[2] + cosz[2]*cosz[2])\n\tif leng != 0 and leng != 1:\n\t\tcosx[2] = cosx[2]/leng\n\t\tcosy[2] = cosy[2]/leng\n\t\tcosz[2] = cosz[2]/leng\n\n\tCOS3x3Mat = np.matrix([\n\t\t\t\t[cosx[0], cosy[0], cosz[0]], \n\t\t\t\t[cosx[1], cosy[1], cosz[1]],\n\t\t\t\t[cosx[2], cosy[2], cosz[2]]\n\t\t\t\t])\n\n\t#Note: I am skipping lines 390-459 of the matlab code, since\n\t# the determinant of the COS3x3Mat matrix is > 0 (~0.5). I think that\n\t# bit of code is trying to make the matrix invertible, but it already is\n\t# for H&E stain matrix \n\t#print(np.linalg.det(COS3x3Mat))\n\n\t#Invert the matrix\n\t# Note that this is done manually in the matlab code.\n\tQ3x3Mat = np.linalg.inv(COS3x3Mat)\n\tQ3x3MatInverted = COS3x3Mat #Just following the matlab code...\n\n\t#Compute transmittance \n\trowR = img.shape[0]\n\tcolR = img.shape[1]\n\n\t#These are the 1 channel transmittances of each dye \n\tDye1_transmittance = np.zeros([rowR, colR])\n\tDye2_transmittance = np.zeros([rowR, colR])\n\tDye3_transmittance = np.zeros([rowR, colR])\n\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\tRGB1 = img[r, c]\n\t\t\tRGB1[RGB1==0] = 1 #Avoid log0\n\t\t\tACC = -np.log(RGB1 / 255)\n\t\t\ttransmittances = 255 * np.exp(-ACC*Q3x3Mat)\n\t\t\ttransmittances = transmittances[0,:]\n\t\t\ttransmittances[transmittances>255] = 255\n\n\t\t\tDye1_transmittance[r,c] = transmittances[0,0]\n\t\t\tDye2_transmittance[r,c] = transmittances[0,1]\n\t\t\tDye3_transmittance[r,c] = transmittances[0,2]\n\n\t#Construct lookup tables to convert 1 channel dye images to \n\t# \t3 channel RGB representations \n\trLUT = np.zeros([256,3])\n\tgLUT = np.zeros([256,3])\n\tbLUT = np.zeros([256,3])\n\n\tfor i in range(3):\n\t\tfor j in range(256):\n\t\t\tif cosx[i] < 0:\n\t\t\t\trLUT[255-j, i] = 255 + (j * cosx[i])\n\t\t\telse:\n\t\t\t\trLUT[255-j, i] = 255 - (j * cosx[i])\n\n\t\t\tif cosy[i] < 0:\n\t\t\t\tgLUT[255-j, i] = 255 + (j * cosy[i])\n\t\t\telse:\n\t\t\t\tgLUT[255-j, i] = 255 - (j * cosy[i])\n\n\t\t\tif cosz[i] < 0:\n\t\t\t\tbLUT[255-j, i] = 255 + (j * cosz[i])\n\t\t\telse:\n\t\t\t\tbLUT[255-j, i] = 255 - (j * cosz[i])\n\n\t#Apply the lookup table to first dye (Hematoxilin)\n\tDye1_color_im = np.zeros(img.shape)\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\t#print(floor(Dye1_transmittance[r,c]))\n\t\t\tDye1_color_im[r,c,0] = rLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,1] = gLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,2] = bLUT[floor(Dye1_transmittance[r,c]),0]\n\n\tDye1_color_im = Dye1_color_im.astype(np.uint8)\n\n\treturn Dye1_transmittance, Dye1_color_im",
"def max_pool_forward_im2col(x, pool_param):\n\tN, C, H, W = x.shape\n\tpool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n\tstride = pool_param['stride']\n\n\tassert (H - pool_height) % stride == 0, 'Invalid height'\n\tassert (W - pool_width) % stride == 0, 'Invalid width'\n\n\tout_height = (H - pool_height) / stride + 1\n\tout_width = (W - pool_width) / stride + 1\n\n\tx_split = x.reshape(N * C, 1, H, W)\n\tx_cols = im2col_indices(x_split, pool_height, pool_width, padding=0, stride=stride)\n\tx_cols_argmax = np.argmax(x_cols, axis=0)\n\tx_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]\n\tout = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)\n\n\tcache = (x, x_cols, x_cols_argmax, pool_param)\n\treturn out, cache",
"def backward_max_pool(data, pool_width, pool_height, output_grad):\n\n # *** START CODE HERE ***\n input_channels = data.shape[0]\n input_width = data.shape[1]\n input_height = data.shape[2]\n partial = np.zeros((input_channels, input_width , input_height))\n for x in range(0, input_width, pool_width):\n for y in range(0, input_height, pool_height):\n #25 iterations\n window_max = np.amax(data[:, x:(x + pool_width), y:(y + pool_height)], axis=(1, 2))\n for c in range(input_channels):\n found_it = False\n for dx in range(pool_width):\n for dy in range(pool_height):\n if (data[c,x + dx,y + dy] == window_max[c]) and (found_it == False):\n partial[c,x + dx, y + dy] = output_grad[c, x // pool_width, y // pool_height]\n found_it = True\n return partial \n # *** END CODE HERE ***",
"def get_convolution_backward_data_algorithm(\n self, filter_desc, diff_desc, conv_desc, grad_desc,\n preference=CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,\n memory_limit=0):\n algo = ffi.new(\"cudnnConvolutionBwdDataAlgo_t *\")\n err = self._lib.cudnnGetConvolutionBackwardDataAlgorithm(\n self.handle, filter_desc, diff_desc, conv_desc, grad_desc,\n preference, memory_limit, algo)\n if err:\n raise CU.error(\"cudnnGetConvolutionBackwardDataAlgorithm\", err)\n return int(algo[0])",
"def get_convolution_backward_filter_algorithm(\n self, src_desc, diff_desc, conv_dec, grad_desc,\n preference=CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,\n memory_limit=0):\n algo = ffi.new(\"cudnnConvolutionBwdFilterAlgo_t *\")\n err = self._lib.cudnnGetConvolutionBackwardFilterAlgorithm(\n self.handle, src_desc, diff_desc, conv_dec, grad_desc,\n preference, memory_limit, algo)\n if err:\n raise CU.error(\"cudnnGetConvolutionBackwardFilterAlgorithm\", err)\n return int(algo[0])",
"def _conv2d_relu(prev_layer, layer, layer_name):\r\n W, b = _weights(layer, layer_name)\r\n W = tf.constant(W)\r\n b = tf.constant(np.reshape(b, (b.size)))\r\n out = tf.nn.conv2d(prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b\r\n return tf.nn.relu(out)",
"def backward_pass(self, error, learn_rate, activation_name='none'):\n \n output_delta = np.zeros( (self.__output_shape[0], self.__output_shape[1], self.__channel))\n \n # step 1: expand the error matrix to the output_delta tensor\n for channel_index in range(self.__channel):\n output_delta[:, :, channel_index] = error\n \n # step 2: init the shape of error tensor after padding\n X = np.zeros(shape=(self.__input_shape[0] + self.__kernel_height - 1, self.__input_shape[1] + self.__kernel_wide - 1, self.__channel))\n\n # step 2: padding 0 \n for y_index in range(output_delta.shape[0]):\n for x_index in range(output_delta.shape[1]):\n X[ self.__kernel_height - 1 + y_index * self.__strides[0],\n self.__kernel_wide - 1 + x_index * self.__strides[1], :] = output_delta[ y_index, x_index, :]\n \n # step 3: calculate delta of pre layer\n\n # 'error_{cur_layer}' conv 'rot180(W)' \n flip_conv_weights = self.__conv( input_data = X, weights = self.__flip_weights(), strides = (1, 1), _axis = 0)\n \n # 'error_{cur_layer-1}' = 'error_{cur_layer}' conv 'rot180(W)' dot-multi 'activation_prime'\n delta = flip_conv_weights * np.reshape( ActivationFunction.activation_prime(activation_name = activation_name, input_data = self.__input), flip_conv_weights.shape)\n \n temp_weights = X[self.__kernel_height-1 : 1-self.__kernel_height, self.__kernel_height-1 : 1-self.__kernel_height, :]\n # step 4 : update weights and bias\n weights_delta = self.__conv(input_data = self.__input, weights = temp_weights , strides=(1, 1), _axis = 0)\n \n self.__update_params(w_delta = weights_delta, b_delta = np.sum(error), learn_rate = learn_rate)\n \n return delta",
"def convolve(self, img):",
"def _schedule_im2col_conv2d(s, op):\n\n # get ops and tensors\n output = op.output(0)\n C = op.input_tensors[0]\n A, B = C.op.input_tensors\n kernel = A.op.input_tensors[0]\n data = B.op.input_tensors[0]\n\n # tuning parameter config\n tune_config = getattr(tvm.target.current_target(), \"tune_config\", None)\n if tune_config is None: # use rule\n bn = 4\n unroll_step = 16\n\n total_work = util.get_const_int(C.shape[0] * C.shape[1])\n reduce_work = util.get_const_int(A.shape[1])\n if total_work > 200000:\n last_work = util.get_const_int(C.shape[1])\n if last_work > 10000:\n num_thread = 16\n elif last_work > 3000:\n num_thread = 8\n elif reduce_work > 100:\n num_thread = 4\n else:\n num_thread = 2\n\n if reduce_work < 50 and last_work < 30000:\n num_thread = 4\n elif total_work > 150000:\n num_thread = 8\n elif total_work > 50000:\n num_thread = 4\n else:\n num_thread = 2\n\n if num_thread == 4:\n unroll_step = 2\n else:\n bn = tune_config[\"bn\"]\n num_thread = tune_config[\"num_thread\"]\n unroll_step = tune_config[\"unroll_step\"]\n\n bna = bnb = bn\n num_thread1 = num_thread2 = num_thread\n if data.dtype == 'float16':\n bnb *= 2\n last_work = util.get_const_int(C.shape[1])\n if last_work % (bnb * num_thread2) != 0:\n num_thread1 = num_thread * 2\n num_thread2 = num_thread // 2\n\n # schedule dilation\n if isinstance(kernel.op, tvm.tensor.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # schedule padding\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n s[data_pad].compute_inline()\n\n ##### SCHEDULE A #####\n if util.get_const_int(kernel.shape[2]) == 1 and util.get_const_int(kernel.shape[3]) == 1:\n s[A].compute_inline()\n else:\n y, x = s[A].op.axis\n yo, xo, yi, xi = s[A].tile(y, x, bna, util.get_const_int(kernel.shape[3]))\n s[A].vectorize(xi)\n fuse_and_bind(s, A, [yo, xo])\n\n # pack to vector form\n packedA = pack_tensor(s, A, bna, [C])\n\n # vectorize load\n y, x = s[packedA].op.axis[:2]\n tmp = s.cache_write(packedA, \"local\")\n x, xt = s[packedA].split(x, bna)\n _, _, _, xi = tile_and_bind(s, packedA, y, x, num_thread)\n s[tmp].compute_at(s[packedA], xi)\n s[tmp].vectorize(s[tmp].op.axis[1])\n s[tmp].unroll(s[tmp].op.axis[2])\n s[packedA].vectorize(s[packedA].op.axis[2])\n s[packedA].unroll(xt)\n\n ##### SCHEDULE B #####\n y, x = s[B].op.axis\n yo, xo, yi, xi = s[B].tile(y, x, 1, 1 * bnb)\n fuse_and_bind(s, B, [yo, xo])\n\n # transpose and pack to vector form\n B_transpose, B_tmp = transpose(s, B, [C])\n s[B_transpose].compute_inline()\n packedB = pack_tensor(s, B_transpose, bnb, [B_tmp])\n\n # vectorize load\n s[packedB].vectorize(s[packedB].op.axis[2])\n y, x = s[packedB].op.axis[:2]\n tile_and_bind(s, packedB, y, x, num_thread)\n\n ##### SCHEDULE C #####\n # vectorize and unroll dot\n y, x = s[C].op.axis\n y, x, yt, xt = s[C].tile(y, x, bna, bnb)\n\n k = s[C].op.reduce_axis[0]\n s[C].reorder(k, yt, xt)\n if unroll_step != 1:\n k, k_unroll = s[C].split(k, unroll_step)\n s[C].unroll(k_unroll)\n s[C].unroll(yt)\n s[C].vectorize(xt)\n\n tile_and_bind(s, C, y, x, num_thread1, num_thread2)\n\n ##### COPY TO OUTPUT #####\n if output.op in s.outputs: # no bias\n output = output\n else: # has bias\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n h, w, vh, vw = s[output].tile(h, w, 1, bnb)\n s[output].unroll(vh)\n if util.get_const_int(s[output].op.output(0).shape[3]) % bnb != 0:\n pass\n else:\n s[output].vectorize(vw)\n fuse_and_bind(s, output, [n, co, h, w])",
"def Convo_Relu_Pool_Conv_Relu_Backward(self,dout,cache):\n #pdb.set_trace()\n l_obj = layers()\n #spectral_conv_cache, relu_cache = cache\n spectral_conv_cache_1,relu_cache_1,pool_cache,spectral_conv_cache_2,relu_cache_2=cache \n da = l_obj.relu_backward_cuda(dout,relu_cache_2)\n dx_2,dw_2,db_2 = l_obj.spectralConvolutionBackprop_Final_cuda(da,spectral_conv_cache_2)\n da_pool=l_obj.max_pool_backward_cuda(dx_2, pool_cache)\n da = l_obj.relu_backward_cuda(da_pool,relu_cache_1)\n dx_1,dw_1,db_1 = l_obj.spectralConvolutionBackprop_Final_cuda(da,spectral_conv_cache_1)\n \n\n return dx_1.get(),dw_1,db_1,dw_2,db_2",
"def backward(ctx, grad_output):\n print(\"MYrelu\")\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n #grad_input[input < 0] = 0\n #grad_input[input < 0] = 0\n return grad_input",
"def forward_convolution(conv_W, conv_b, data):\n\n conv_channels, _, conv_width, conv_height = conv_W.shape\n\n input_channels, input_width, input_height = data.shape\n\n output = np.zeros((conv_channels, input_width - conv_width + 1, input_height - conv_height + 1))\n\n for x in range(input_width - conv_width + 1):\n for y in range(input_height - conv_height + 1):\n for output_channel in range(conv_channels):\n output[output_channel, x, y] = np.sum(\n np.multiply(data[:, x:(x + conv_width), y:(y + conv_height)], conv_W[output_channel, :, :, :])) + conv_b[output_channel]\n\n\n return output",
"def conv_backward_fft_1D(dout, cache):\n x, w, b, conv_param, fftsize = cache\n preserve_energy_rate = conv_param.get('preserve_energy_rate', None)\n index_back = conv_param.get('compress_rate', None)\n pad = conv_param.get('pad')\n stride = conv_param.get('stride')\n if stride != 1:\n raise ValueError(\"fft requires stride = 1, but given: \", stride)\n\n N, C, W = x.shape\n F, C, WW = w.shape\n N, F, W_out = dout.shape\n\n padded_x = (np.pad(x, ((0, 0), (0, 0), (pad, pad)), 'constant'))\n\n # W = padded_out_W - WW + 1; padded_out_W = W + WW - 1; pad_out = W + WW - 1 // 2\n pad_out = (W + WW - 1 - W_out) // 2\n # print(\"pad_out: \", pad_out)\n if pad_out < 0:\n padded_dout = dout[:, :, abs(pad_out):pad_out]\n else:\n padded_dout = np.pad(dout, ((0, 0), (0, 0), (pad_out, pad_out)),\n mode='constant')\n\n # Initialize gradient output tensors.\n dx = np.zeros_like(x) # the x used for convolution was with padding\n dw = np.zeros_like(w)\n db = np.zeros_like(b)\n\n # Calculate dB (the gradient for the bias term).\n # We sum up all the incoming gradients for each filters bias (as in the affine layer).\n for ff in range(F):\n db[ff] += np.sum(dout[:, ff, :])\n\n # print(\"padded x: \", padded_x)\n # print(\"dout: \", dout)\n # Calculate dw - the gradient for the filters w.\n # By chain rule dw is computed as: dout*x\n fftsize = next_power2(W + W_out - 1)\n for nn in range(N):\n for ff in range(F):\n for cc in range(C):\n # accumulate gradient for a filter from each channel\n # dw[ff, cc] += convolve1D_fft(padded_x[nn, cc], np.flip(dout[nn, ff], axis=0), fftsize, WW,\n # preserve_energy_rate=preserve_energy_rate)\n dw[ff, cc] += correlate_signals(padded_x[nn, cc], dout[nn, ff],\n fftsize, WW,\n preserve_energy_rate=preserve_energy_rate,\n index_back=index_back)\n # print(\"dw fft: \", dw[ff, cc])\n\n # Calculate dx - the gradient for the input x.\n # By chain rule dx is dout*w. We need to make dx same shape as padded x for the gradient calculation.\n # fftsize = next_power2(W_out + WW - 1)\n # print(\"padded_dout len: \", padded_dout.shape[-1])\n # print(\"W_out len: \", W_out)\n # fftsize = W\n fftsize = next_power2(padded_dout.shape[-1] + WW - 1)\n # print(\"fftsize: \", fftsize)\n for nn in range(N):\n for ff in range(F):\n for cc in range(C):\n # print(\"dout[nn, ff]: \", dout[nn, ff])\n # print(\"dout[nn, ff] shape: \", dout[nn, ff].shape)\n # print(\"padded_dout[nn, ff] shape: \", padded_dout[nn, ff].shape)\n # print(\"w[ff, cc]: \", w[ff, cc])\n # print(\"w[ff, cc] shape: \", w[ff, cc].shape)\n # dx[nn, cc] += correlate_signals(padded_dout[nn, ff], np.flip(w[ff, cc], axis=0), fftsize, W,\n # preserve_energy_rate=preserve_energy_rate, compress_rate=compress_rate)\n dx[nn, cc] += correlate_signals(padded_dout[nn, ff],\n np.flip(w[ff, cc], axis=0),\n fftsize, W,\n preserve_energy_rate=preserve_energy_rate,\n index_back=index_back)\n # print(\"dx fft: \", dx[nn, cc])\n\n return dx, dw, db",
"def Convolution(image, convFilter):\r\n startDistance = math.sqrt(len(convFilter))//2\r\n # convFilter = convFilter.reverse()\r\n if math.sqrt(len(convFilter))%2 != 1:\r\n print(\"Not Valide filter size\")\r\n return\r\n length = int(math.sqrt(len(convFilter)))\r\n arr = np.asarray(image.shape)\r\n out = np.zeros(arr, dtype=np.uint8)\r\n for k in range(arr[2]): # Channels\r\n for i in range(arr[0]): # Columns/x\r\n for j in range(arr[1]): # Rows/y\r\n sx = i-startDistance\r\n sy = j-startDistance\r\n temp = 0\r\n for y in range(length):\r\n for x in range(length):\r\n # if i < 0 or j < 0 or i\r\n deltaX = sx + x\r\n deltaY = sy + y\r\n if deltaX < 0 or deltaY < 0 or deltaX >= arr[0] or deltaY >= arr[1]:\r\n pixel = image[i, j, k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n else:\r\n pixel = image[int(deltaX), int(deltaY), k]\r\n filter = convFilter[length * y + x]\r\n value = pixel * filter\r\n # print(\"pixel: \", pixel, \" fileter: \", filter)\r\n temp = temp + value / len(convFilter)\r\n # print(\"+\", value)\r\n # print(\"===\", temp)\r\n out[i, j, k] = temp\r\n return out",
"def conv_batchnorm_relu_backward(dout, cache):\n conv_cache, batch_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n db, dgamma, dbeta = spatial_batchnorm_backward(da, batch_cache)\n dx, dw, db = conv_backward_fast(db, conv_cache)\n return dx, dw, db, dgamma, dbeta",
"def convolution(image, kernel):\n kh = kernel.shape[0] #kernel height\n kw = kernel.shape[1] #kernel width\n khm = math.floor(kh/2) #half of kernel height\n kwm = math.floor(kw/2) #half of kernel width\n ih = image.shape[0] #image height\n iw = image.shape[1] #image width\n #make an image frameless\n im_temp = np.zeros((ih+kh, iw+kw))\n im_temp[khm:ih+khm, kwm:iw+kwm] = image\n im_temp[0:khm, kwm:iw+kwm] = image[0:khm, :]\n im_temp[ih+khm:ih+2*khm, kwm:iw+kwm] = image[ih-khm:ih, :]\n im_temp[khm:ih+khm:, 0:kwm] = image[:, 0:kwm]\n im_temp[khm:ih+khm, iw+kwm:iw+2*kwm] = image[:, iw-kwm:iw]\n #create a new image to store the convoluted image\n convoluted = np.zeros((ih, iw))\n #convolute an image with a flipped kernel\n for i in range(ih):\n for j in range(iw):\n weights = 0\n for k in range(kh):\n for l in range(kw):\n kk = kh - 1 - k\n ll = kw - 1 - l\n weights = weights + im_temp[i+k, j+l] * kernel[kk,ll] \n convoluted[i,j] = weights\n return convoluted"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a mock, shared Driftwood object
|
def driftwood():
d = mock.Mock()
d.config = {
'database': {
'root': 'db.test',
'name': 'test.db'
}
}
d.log.msg.side_effect = Exception('log.msg called')
return d
|
[
"def mock(self):\r\n return self._mock",
"def __init__(self, mock):\n\n self.mock = mock\n self.response = None",
"def get_deployment_mock():",
"def _MockInsideChroot(self):\n mic = self.mox.CreateMock(cgt.InsideChroot)\n\n mic.creds = self.mox.CreateMock(gdata_lib.Creds)\n mic.gd_client = self.mox.CreateMock(gdata_ss_service.SpreadsheetsService)\n mic.it_client = self.mox.CreateMock(gdata_ph_client.ProjectHostingClient)\n\n return mic",
"def default_mocks(slack_mock):",
"def setup_mock_api(cls):\n\n mock_scheduler = Mock()\n mock_scheduler.url = \"http://something_or_other\"\n mock_scheduler_client = Mock()\n mock_scheduler_client.scheduler.return_value = mock_scheduler\n mock_scheduler_client.url = \"http://something_or_other\"\n mock_api = Mock(spec=HookedAuroraClientAPI)\n mock_api.scheduler = mock_scheduler_client\n return (mock_api, mock_scheduler_client)",
"def mock_db_session():\n class DB(object):\n def add(self, obj):\n pass\n def query(self, cls):\n pass\n return mock.Mock(spec=DB())",
"def createOperationsMock():\n opModuleMock = Mock()\n opModuleMock.name = \"OperationsModule Mock\"\n opMock = Mock()\n opModuleMock.return_value = opMock\n opMock.name = \"OperationsClass Mock\"\n\n opMock.getOptionsDict.side_effect = getOptionsDictMock\n return opModuleMock",
"def setup(object_path: str | None = None) -> DBusServiceMock:\n return Systemd()",
"def mock_registry(data_path):\n return MockRegistry(data_path / \"registry\")",
"def test_init(self):\n # create a mock object for wrapped class\n cls = mock.MagicMock()\n # call the method\n decorator._SingletonWrapper.__init__(self.wrapper, cls)\n # check whether the object holds wrapped class\n self.assertEqual(self.wrapper.__wrapped__, cls)",
"def setUp(self):\n self.blink = blinkpy.Blink(username=USERNAME,\n password=PASSWORD,\n motion_interval=0)\n # pylint: disable=protected-access\n self.blink._auth_header = {\n 'Host': 'test.url.tld',\n 'TOKEN_AUTH': 'foobar123'\n }\n self.blink.last_refresh = 0\n self.blink.urls = blinkpy.BlinkURLHandler('test')\n self.blink.sync['test'] = BlinkSyncModule(self.blink,\n 'test',\n '1234',\n [])\n self.camera = BlinkCamera(self.blink.sync)\n self.mock_start = [\n {'syncmodule': {\n 'id': 1234,\n 'network_id': 5678,\n 'serial': '12345678',\n 'status': 'foobar'}},\n {'event': True},\n {},\n {},\n None,\n {'devicestatus': {}},\n ]",
"def service():\r\n\r\n mock_soco = mock.MagicMock()\r\n mock_soco.ip_address = \"192.168.1.101\"\r\n return Service(mock_soco)",
"def mock_mouse():\n return MockMouse()",
"def test_Xdawn_init():\n _ = Xdawn()",
"def mocked_reddit():\n with patch('praw.reddit.Reddit', autospec=True) as reddit:\n with patch('shotbot.bots.watcher.praw.Reddit', reddit):\n with patch('shotbot.bots.commenter.praw.Reddit', reddit):\n reddit = reddit.return_value\n\n subreddit = MagicMock(name='MockSubreddit()',\n spec=praw.models.Subreddit)\n subreddit.__str__.return_value = \"fakesub\"\n subreddit.display_name = \"fakesub\"\n\n reddit.subreddit = Mock()\n reddit.subreddit.return_value = subreddit\n\n reddit.config = Mock(name='MockReddit().config')\n reddit.config.username = \"username\"\n\n yield reddit",
"def client():\n return TestClient()",
"def open_local_factory(mocker, stringio):\n opener = mocker.Mock(return_value=stringio)\n return opener",
"def __init__(self, mock_arg: object):\n\n super(MockProvider, self).__init__()\n self.mock_arg = mock_arg"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DatabaseManager should create the directory db.test if it doesn't exist already.
|
def test_create_db_dir_if_not_exist(self):
databasemanager.DatabaseManager(driftwood())
|
[
"def test_create_db_file_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())",
"def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)",
"def init_db(self):\n\n # is there a DB already?\n if os.path.exists(self.db_base_path):\n raise DatabaseExists\n \n # create the dir\n os.mkdir(self.db_base_path)",
"def test_database_setup():\n\n TEST_DB_NAME = 'test_db.sqlite3'\n with contextlib.suppress(FileNotFoundError):\n TIMEOUT_SECONDS = 60\n cutoff_time = datetime.now() + timedelta(seconds=TIMEOUT_SECONDS)\n db_deleted = False\n while not db_deleted and datetime.now() < cutoff_time:\n try:\n os.remove(TEST_DB_NAME)\n db_deleted = True\n except PermissionError:\n # DB file lock is probably still held by last Django server instance.\n # Let's give it a moment to release it.\n pass\n\n if not db_deleted:\n raise TimeoutError(f\"Could not delete {TEST_DB_NAME}\")\n\n # Just doing:\n # `subprocess.call(f'sqlite3 db.sqlite3 .schema | sqlite3 {self.TEST_DB_NAME}', shell=True)`\n # would be nicer, but unfortunately sqlite creates a default table (sqlite_sequence) that we need to\n # remove from the schema before passing it back in again\n schema_byte_string = subprocess.check_output('sqlite3 db.sqlite3 .schema', shell=True)\n schema_string = str(schema_byte_string, 'utf-8')\n schema_one_line = schema_string.replace('\\r','').replace('\\n','')\n schema_without_sqlite_sequence = schema_one_line.replace('CREATE TABLE sqlite_sequence(name,seq);','')\n subprocess.call(f'echo {schema_without_sqlite_sequence} | sqlite3 {TEST_DB_NAME}', shell=True)\n\n # populate new database as is needed for testing\n with open('logs/test_setup_log.txt', 'a') as log:\n subprocess.call(\n ['py', 'manage.py', 'test_setup', '--settings=charity_configuration.test_settings'],\n stdout=log,\n )",
"def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()",
"def make_database_dir(project_name):\n for db in get_data_db(6):\n create_dir(project_name, db)",
"def SQL_Check_DB_Directory():\n database_folder = 'Database'\n # checks if the directory already exists, if it does not, it will throw an exception. (Which will usually be because\n # of insufficent permissions)\n if not os.path.exists(database_folder):\n try:\n os.makedirs(database_folder)\n except PermissionError:\n print(\"Cannot create required directory, Aborting!\")",
"def testCreateDatabaseFile(self):\n from conf import DATABASE_NAME, DATABASE_FILE\n import sqlite3\n from sqlite3 import Error \n \n db_file = DATABASE_FILE\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n finally:\n conn.close()",
"def check_db(self):\n if not os.path.exists(self.db_base_path):\n raise DatabaseDoesNotExist",
"def setUp(self):\n db.create_all()\n self.db = db",
"def dbSetUp(self):\n pass",
"def test_db_create():\n _test_call(\n mysql.db_create,\n \"CREATE DATABASE IF NOT EXISTS `test``'\\\" db`;\",\n \"test`'\\\" db\",\n )",
"def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(app=self.app)\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n # Clean up the tables\n Base.metadata.drop_all(bind=self.app.db.engine)",
"def create_test_database(dbname):\n present = db_def.exists(dbname)\n if present:\n logger.warning('create_test_database: Database \"{0}\" already existed.'.format(dbname))\n return True\n\n cmnd = 'createdb {0}'.format(dbname)\n execute = subprocess.Popen(cmnd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout = execute.stdout.read().rstrip('\\n')\n stderr = execute.stderr.read()\n err = execute.returncode\n if err is not None:\n logger.error('create_test_database: stdout: {0}'.format(stdout))\n logger.error('create_test_database: stderr: {0}'.format(stderr))\n logger.error('create_test_database: failed to create the database')\n return False\n else:\n logger.info('create_test_database: database \"{0}\" created'.format(dbname))\n return True",
"def create_testdata_db(self):\n\n try:\n dsn = CommandlineTool.get_input_option('yoda-db-testdata-dsn')\n force = CommandlineTool.get_input_option('force')\n if (not dsn):\n dsn = self._mh.ext_cfg['Yoda']['db_testdata_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)\n db = DBO(dsn)._dbo_driver\n db._parse_dsn(dsn)\n\n result = True\n if (not db.database_exists() or force):\n if (force):\n dmsg(self._mh._trn.msg('yoda_remove_testdata_db', dsn))\n db.remove_database()\n\n print(self._mh._trn.msg('yoda_create_testdata_db', dsn))\n db.connect()\n dbdir = os.path.join(self._mh.ext_cfg['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR), 'db_testdata')\n script = file_get_contents(\n os.path.join(dbdir, 'db_struct.sql'))\n db._cursor.executescript(script)\n script = file_get_contents(os.path.join(dbdir, 'db_data.sql'))\n db._cursor.executescript(script)\n print(self._mh._trn.msg('yoda_testdata_db_created'))\n else:\n print(self._mh._trn.msg('yoda_testdata_db_exists', dsn))\n result = False\n\n return result\n except Error as ex:\n print(self._mh._trn.msg('yoda_testdata_db_error', ex))\n return False",
"def setupAllDB():\n createDatabase(CONFIG_DB['db_name'])\n runMigrations()\n setupJobTrackerDB()\n setupErrorDB()\n setupUserDB()\n setupJobQueueDB()\n setupValidationDB()",
"def setup_db():\n create_service_db()",
"def default_create_test_data(self, db_name):\n pass",
"def startupCheck():\n\n # ------------------------------------------------\n # Creating directories:\n Path(DB_PATH).mkdir(parents=True, exist_ok=True)\n Path(SCAN_PATH).mkdir(parents=True, exist_ok=True)\n\n # ------------------------------------------------\n # Creating database:\n createDatabase(DB_FULLPATH)\n\n # ------------------------------------------------"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DatabaseManager should create the file test.db if it doesn't exist already.
|
def test_create_db_file_if_not_exist(self):
databasemanager.DatabaseManager(driftwood())
|
[
"def test_create_db_dir_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())",
"def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)",
"def init_db(self):\n\n # is there a DB already?\n if os.path.exists(self.db_base_path):\n raise DatabaseExists\n \n # create the dir\n os.mkdir(self.db_base_path)",
"def testCreateDatabaseFile(self):\n from conf import DATABASE_NAME, DATABASE_FILE\n import sqlite3\n from sqlite3 import Error \n \n db_file = DATABASE_FILE\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n finally:\n conn.close()",
"def test_database_setup():\n\n TEST_DB_NAME = 'test_db.sqlite3'\n with contextlib.suppress(FileNotFoundError):\n TIMEOUT_SECONDS = 60\n cutoff_time = datetime.now() + timedelta(seconds=TIMEOUT_SECONDS)\n db_deleted = False\n while not db_deleted and datetime.now() < cutoff_time:\n try:\n os.remove(TEST_DB_NAME)\n db_deleted = True\n except PermissionError:\n # DB file lock is probably still held by last Django server instance.\n # Let's give it a moment to release it.\n pass\n\n if not db_deleted:\n raise TimeoutError(f\"Could not delete {TEST_DB_NAME}\")\n\n # Just doing:\n # `subprocess.call(f'sqlite3 db.sqlite3 .schema | sqlite3 {self.TEST_DB_NAME}', shell=True)`\n # would be nicer, but unfortunately sqlite creates a default table (sqlite_sequence) that we need to\n # remove from the schema before passing it back in again\n schema_byte_string = subprocess.check_output('sqlite3 db.sqlite3 .schema', shell=True)\n schema_string = str(schema_byte_string, 'utf-8')\n schema_one_line = schema_string.replace('\\r','').replace('\\n','')\n schema_without_sqlite_sequence = schema_one_line.replace('CREATE TABLE sqlite_sequence(name,seq);','')\n subprocess.call(f'echo {schema_without_sqlite_sequence} | sqlite3 {TEST_DB_NAME}', shell=True)\n\n # populate new database as is needed for testing\n with open('logs/test_setup_log.txt', 'a') as log:\n subprocess.call(\n ['py', 'manage.py', 'test_setup', '--settings=charity_configuration.test_settings'],\n stdout=log,\n )",
"def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()",
"def check_db(self):\n if not os.path.exists(self.db_base_path):\n raise DatabaseDoesNotExist",
"def test_db_create():\n _test_call(\n mysql.db_create,\n \"CREATE DATABASE IF NOT EXISTS `test``'\\\" db`;\",\n \"test`'\\\" db\",\n )",
"def create_test_database(dbname):\n present = db_def.exists(dbname)\n if present:\n logger.warning('create_test_database: Database \"{0}\" already existed.'.format(dbname))\n return True\n\n cmnd = 'createdb {0}'.format(dbname)\n execute = subprocess.Popen(cmnd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout = execute.stdout.read().rstrip('\\n')\n stderr = execute.stderr.read()\n err = execute.returncode\n if err is not None:\n logger.error('create_test_database: stdout: {0}'.format(stdout))\n logger.error('create_test_database: stderr: {0}'.format(stderr))\n logger.error('create_test_database: failed to create the database')\n return False\n else:\n logger.info('create_test_database: database \"{0}\" created'.format(dbname))\n return True",
"def touch_db_file(self):\n if not self.db_file_exist():\n # create empty database file\n open(self.db_file, 'a').close()\n\n if not self.db_file_exist():\n prinw(\"\"\"Offline database file is not present, even after creation attempt.\"\"\"\n \"\"\"\\nPlease create empty file in this path: <%s>\"\"\"\n \"\"\"\\nWorking without offline sql database can be slow - for every request there is internet connection needed\"\"\"\n , str(Path(self.db_file)))\n self.use_sql = False\n return None",
"def setUp(self):\n db.create_all()\n self.db = db",
"def create_testdata_db(self):\n\n try:\n dsn = CommandlineTool.get_input_option('yoda-db-testdata-dsn')\n force = CommandlineTool.get_input_option('force')\n if (not dsn):\n dsn = self._mh.ext_cfg['Yoda']['db_testdata_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)\n db = DBO(dsn)._dbo_driver\n db._parse_dsn(dsn)\n\n result = True\n if (not db.database_exists() or force):\n if (force):\n dmsg(self._mh._trn.msg('yoda_remove_testdata_db', dsn))\n db.remove_database()\n\n print(self._mh._trn.msg('yoda_create_testdata_db', dsn))\n db.connect()\n dbdir = os.path.join(self._mh.ext_cfg['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR), 'db_testdata')\n script = file_get_contents(\n os.path.join(dbdir, 'db_struct.sql'))\n db._cursor.executescript(script)\n script = file_get_contents(os.path.join(dbdir, 'db_data.sql'))\n db._cursor.executescript(script)\n print(self._mh._trn.msg('yoda_testdata_db_created'))\n else:\n print(self._mh._trn.msg('yoda_testdata_db_exists', dsn))\n result = False\n\n return result\n except Error as ex:\n print(self._mh._trn.msg('yoda_testdata_db_error', ex))\n return False",
"def create_db(path_to_HuGaDB_folder, dbname='HuGaDB.db'):\n if not isdir(path_to_HuGaDB_folder):\n print(\"No such folder \" + path_to_HuGaDB_folder)\n return\n files = glob.glob(join(path_to_HuGaDB_folder, 'HuGaDB*.txt'))\n length = len(files)\n if length == 0:\n print(\"No HuGaDB files in folder\")\n return\n\t\n if not create_tabels(dbname=dbname):\n print(\"DB is already exist\")\n return \n for i, filename in enumerate(files):\n sys.stdout.write('\\r')\n sys.stdout.write(\"Creating database: file {0}/{1}\".format(i+1, length))\n add_file_to_db(filename, dbname=dbname)\n sys.stdout.flush()",
"def setup_db():\n create_service_db()",
"def _create_database(self, last_upgrade_to_run):\n # Create the tables in the database\n conn = self._connect()\n try:\n with conn:\n self._create_tables(conn, last_upgrade_to_run)\n finally:\n conn.close()\n\n # Set the file permissions\n os.chmod(self.filename, stat.S_IRUSR | stat.S_IWUSR)",
"def create_db():\n\n cursor = get_db_connection()\n cursor.execute(\"commit\")\n cursor.execute(\"SELECT 1 FROM pg_catalog.pg_database \\\n WHERE datname = '{}'\".format(db_name))\n exists = cursor.fetchone()\n if not exists:\n cursor.execute('CREATE DATABASE {}'.format(db_name))",
"def SQL_Check_DB_Directory():\n database_folder = 'Database'\n # checks if the directory already exists, if it does not, it will throw an exception. (Which will usually be because\n # of insufficent permissions)\n if not os.path.exists(database_folder):\n try:\n os.makedirs(database_folder)\n except PermissionError:\n print(\"Cannot create required directory, Aborting!\")",
"def __call__(self):\n self.create_database()",
"def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(app=self.app)\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n # Clean up the tables\n Base.metadata.drop_all(bind=self.app.db.engine)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return data for day. Assumes data files are in ../data/
|
def get_data(day_num: int) -> Generator[str, None, None]:
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'data')
with open(os.path.join(data_dir, f'day_{day_num}.txt'), 'r') as fobj:
yield from fobj
|
[
"def data(ignore_date=False):",
"def get_day_predictions(day):\n ldir = glob.glob(status_path + day + '/*-dadestram.data')\n if not ldir:\n raise Exception('Day does not exists')\n ldata = []\n for f in sorted(ldir):\n ldata.append(DataTram(f))\n return ldata",
"def get_data_files(full_data_path, one_per_day=0):\n files = []\n for day_dir in os.listdir(full_data_path):\n # print(\"In folder \" + full_data_path+ os.sep + day_dir )\n day_dir_fpath = os.path.join(full_data_path, day_dir)\n if not os.path.isdir(day_dir_fpath):\n continue # Only look at directories\n\n if one_per_day: # Then only look at the first 2 files (hopefully a .graph and .netinfo pair)\n day_data = [x for x in os.listdir(day_dir_fpath) if\n x.endswith(\".netinfo\") or x.endswith(\".graph\")][0:2]\n files += [day_dir_fpath + os.sep + x for x in day_data if x.endswith(\".graph\")]\n else:\n day_data = os.listdir(day_dir_fpath)\n files += [day_dir_fpath + os.sep + x for x in day_data if x.endswith(\".graph\")]\n\n return files",
"def read_data_by_days(sc, path, start, end):\n day_list = get_day_range(start, end)\n print \"get data from {0} to {1}\".format(day_list[0], day_list[-1])\n day_paths = map(lambda x: \"{0}/day={1}\".format(path, x), day_list)\n day_paths_str = \",\".join(day_paths)\n rdd = sc.textFile(day_paths_str)\n return rdd",
"def get_dat(dir):\n os.chdir(dir)\n all_dat = []\n for file in glob.glob(\"*.dat\"):\n all_dat.append(file)\n return all_dat",
"def load_daily_data(self, date):\n # date_str = str(date.year)+str(date.month).zfill(2)+str(date.day).zfill(2)\n date_str = date.strftime('%Y%m%d')\n if path.exists(self.savepath+date_str):\n pass\n else:\n self.download_file(\"INTL.IVYDB.{}D.zip\".format(date_str))",
"def cfht_weather_data(year, month, day, hour, minute,\n dir='/u/ghezgroup/code/python/keckdar/'):\n\n temperature = np.zeros(len(year), dtype=float)\n pressure = np.zeros(len(year), dtype=float)\n humidity = np.zeros(len(year), dtype=float)\n wind_speed = np.zeros(len(year), dtype=float)\n wind_dir = np.zeros(len(year), dtype=float)\n\n\n cfht_file = None\n\n for ii in range(len(year)):\n cfht_file_new = dir + 'cfht-wx.' + str(year[ii]) + '.' + \\\n str(month[ii]).zfill(2) + '.dat'\n\n if (cfht_file != cfht_file_new):\n cfht_file = cfht_file_new\n cfht = asciidata.open(cfht_file)\n\n atmYear = cfht[0].tonumpy()\n atmMonth = cfht[1].tonumpy()\n atmDay = cfht[2].tonumpy()\n atmHour = cfht[3].tonumpy()\n atmMin = cfht[4].tonumpy() # HST times\n atmWindSpeed = cfht[5].tonumpy() # km/h\n atmWindDir = cfht[6].tonumpy() # degrees\n atmTemp = cfht[7].tonumpy() # Celsius\n atmHumidity = cfht[8].tonumpy() # percent\n atmPressure = cfht[9].tonumpy() # mb pressure\n\n\n # Find the exact time match for year, month, day, hour\n idx = (np.where((atmDay == day[ii]) & (atmHour == hour[ii])))[0]\n \n if (len(idx) == 0):\n print 'Could not find DAR data for %4d-%2d-%2d %2d:%2d in %s' % \\\n (year, month, day, hour, minute, logFile)\n\n # Find the closest minute\n mdx = abs(atmMin[idx] - minute[ii]).argmin()\n match = idx[ mdx ]\n\n # Ambient Temperature (Celsius)\n temperature[ii] = atmTemp[match]\n\n # Pressure at the observer (millibar)\n # Should be around 760.0 millibars\n pressure[ii] = atmPressure[match]\n\n # Relative humidity (%)\n # Should be around 0.1 %\n humidity[ii] = atmHumidity[match]\n\n # Wind speed (km/h)\n wind_speed[ii] = atmWindSpeed[match]\n\n # Wind direction (degrees)\n wind_dir[ii] = atmWindDir[match]\n\n return temperature, pressure, humidity, wind_speed, wind_dir",
"def sommeerDailyData(dailyfilename,arrDates,strPeriod):\n\tif blnDebug:\n\t\tprint \"Functie: sommeerDailyData(dailyfilename,arrDates,strPeriod)\"\n\t\tprint \"filename: \" + dailyfilename\n\t\tprint \"arrDates: \" + str(arrDates)\n\t\tprint \"strPeriod: \" + strPeriod\n\t\tprint \"\"\n\tif strPeriod == \"daily\":\n\t\tPeriodRecord = collections.namedtuple('PeriodRecord', 'timestamp, datetime, totaal, temp, pv_out, fout')\n\telif strPeriod == \"weekly\" or strPeriod == \"monthly\":\n\t\tPeriodRecord = collections.namedtuple('PeriodRecord', 'datum, totaal, gem_temp, gem_pv_out, totaal_pv_out')\n\telif strPeriod == \"yearly\":\n\t\tPeriodRecord = collections.namedtuple('PeriodRecord', 'jaar, maand, totaal, gem_temp, gem_pv_out, totaal_pv_out')\n\ttotaal = 0\n\tgem_temp = 0\n\tgem_pv_out = 0\n\ttotaal_pv_out = 0\n\ti = 0\n\tj = 0\n\t\n\tif os.path.exists(dailyfilename):\n\t\tfor loc in map(PeriodRecord._make, csv.reader(open(dailyfilename,\"r\"), delimiter=',')):\n\t\t\ti = i + 1\n\t\t\tfor date in arrDates:\n\t\t\t\tif strPeriod == \"daily\":\n\t\t\t\t\tif i >= 2 and time.strftime(\"%Y%m%d\", time.strptime(str(loc.datetime), '%Y-%m-%d %H:%M:%S')) == time.strftime(\"%Y%m%d\", time.strptime(str(date), '%Y%m%d')):\n\t\t\t\t\t\t# controle toevoegen of de datum in de array met data zit\n\t\t\t\t\t\ttotaal = loc.totaal\n\t\t\t\t\t\tgem_temp = gem_temp + int(loc.temp)\n\t\t\t\t\t\tgem_pv_out = gem_pv_out + int(loc.pv_out)\n\t\t\t\t\t\ttotaal_pv_out = totaal_pv_out + int(loc.pv_out)\n\t\t\t\t\t\tj = j + 1\n\t\t\t\telse:\n\t\t\t\t\tif i >= 2 and time.strftime(\"%Y%m%d\", time.strptime(str(loc.datum), '%Y%m%d')) == time.strftime(\"%Y%m%d\", time.strptime(str(date), '%Y%m%d')):\n\t\t\t\t\t\t# controle toevoegen of de datum in de array met data zit\n\t\t\t\t\t\ttotaal = loc.totaal\n\t\t\t\t\t\tgem_temp = gem_temp + int(loc.gem_temp)\n\t\t\t\t\t\tgem_pv_out = gem_pv_out + int(loc.gem_pv_out)\n\t\t\t\t\t\ttotaal_pv_out = totaal_pv_out + int(loc.totaal_pv_out)\n\t\t\t\t\t\tj = j + 1\n\t\ttotaal = int(totaal)\n\t\tgem_temp = gem_temp/j-1\n\t\tgem_pv_out = gem_pv_out/j-1\n\t\tif blnDebug:\n\t\t\tprint 'Totaal: %i' % (totaal)\n\t\t\tprint 'Gemiddelde temperatuur: %i' % (gem_temp)\n\t\t\tprint 'Gemiddelde output: %i' % (gem_pv_out)\n\t\t\tprint 'Totaal output: %i' % (totaal_pv_out)\n\t\t\tprint \"\"\n\treturn [totaal,gem_temp,gem_pv_out,totaal_pv_out]",
"def _get_data_file(self):\n data = glob.glob(os.path.join(self.md_dir, '*.data'))[0]\n with open(os.path.join(self.md_dir, data), 'r') as data_file:\n return data_file.readlines()",
"def get_data_dict(news_path=\"data/news\"):\n\tdata = defaultdict(list)\n\tfor root, subFolders, files in os.walk(news_path):\n\t\tif len(root.split(\"/\")) != 4: continue # only work on root end with subfolder with date\n\t\tdate = root.split(\"/\")[-1]\n\t\tif \"bloomberg\" in root: # bloomberg format\n\t\t\t# print \"bloomberg\"\n\t\t\tdate = date.replace(\"-\", \"\")\n\t\t\tfor file in files:\n\t\t\t\tfilepath = root + \"/\" + file\n\t\t\t\twith open(filepath) as fin:\n\t\t\t\t\tnews_tuple = []\n\t\t\t\t\tfor i, line in enumerate(fin):\n\t\t\t\t\t\ttry: # original file format not consistent\n\t\t\t\t\t\t\tif i == 0: # title\n\t\t\t\t\t\t\t\ttitle = line[3:-1]\n\t\t\t\t\t\t\t\tnews_tuple.append(title)\n\t\t\t\t\t\t\telif i == 2: # time\n\t\t\t\t\t\t\t\ttime = line[14:-2]\n\t\t\t\t\t\t\t\thour = int(time[:2].replace(\" \", \"\")) + 9\n\t\t\t\t\t\t\t\tdate_add_one = hour > 24\n\t\t\t\t\t\t\t\tif date_add_one:\n\t\t\t\t\t\t\t\t\thour %= 24\n\t\t\t\t\t\t\t\t\tdate_one_added = add_one_day(date)\n\t\t\t\t\t\t\t\t\ttime = str(hour) + time[2:]\n\t\t\t\t\t\t\t\tnews_tuple.append(time)\n\t\t\t\t\t\t\t\tdata[date_one_added if date_add_one else date].append(news_tuple)\n\t\t\t\t\t\t\t\tbreak # stop iterate current file\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tif i == 1: # title\n\t\t\t\t\t\t\t\ttitle = line[:-1]\n\t\t\t\t\t\t\t\tnews_tuple.append(title)\n\t\t\t\t\t\t\telif i == 5: # time\n\t\t\t\t\t\t\t\ttime = line[11:-2]\n\t\t\t\t\t\t\t\thour = int(time[:2]) + 9\n\t\t\t\t\t\t\t\tdate_add_one = hour > 24\n\t\t\t\t\t\t\t\tif date_add_one:\n\t\t\t\t\t\t\t\t\thour %= 24\n\t\t\t\t\t\t\t\t\tdate_one_added = add_one_day(date)\n\t\t\t\t\t\t\t\t\ttime = str(hour) + time[2:]\n\t\t\t\t\t\t\t\tnews_tuple.append(time)\n\t\t\t\t\t\t\t\tdata[date_one_added if date_add_one else date].append(news_tuple)\n\t\t\t\t\t\t\t\tbreak # stop iterate current file\n\n\t\tif \"ReutersNews\" in root: # reuter format\n\t\t\t# print \"reuter\"\n\t\t\tfor file in files:\n\t\t\t\tfilepath = root + \"/\" + file\n\t\t\t\twith open(filepath) as fin:\n\t\t\t\t\tnews_tuple = []\n\t\t\t\t\tfor i, line in enumerate(fin):\n\t\t\t\t\t\tif i == 0: # title\n\t\t\t\t\t\t\ttitle = line[3:-1]\n\t\t\t\t\t\t\t# print title\n\t\t\t\t\t\t\tnews_tuple.append(title)\n\t\t\t\t\t\telif i == 2: # time\n\t\t\t\t\t\t\ttime = line.split(\" \")[-2]\n\t\t\t\t\t\t\ttime = hour_12_to_24(time)\n\t\t\t\t\t\t\tnews_tuple.append(time)\n\t\t\t\t\t\t\tdata[date].append(news_tuple)\n\t\t\t\t\t\t\tbreak # stop iterate current file\n\t# within a day sort result\n\tfor k in data:\n\t\tdata[k].sort(key=lambda tup: tup[1])\n\tsave_pickle(data, \"training_dir\", \"data\")",
"def _get_data_post2006(date):\r\n \r\n # build the url based on date & create data container\r\n url = '{}/{}/{}/'.format(BASE_URL, date.year, str(date).replace('-','_'))\r\n data = dict(Air_Temp = [], Barometric_Press = [], Wind_Speed = [])\r\n\r\n print('Fetching online data for {}'.format(date)) \r\n for key in data.keys():\r\n try:\r\n data[key] = request.urlopen('{}{}'.format(url, key)).read().decode(encoding='utf_8').split('\\r\\n')\r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n data[key].pop() # remove last item which will be an empty string \r\n\r\n # verify lengths of 3 files are equal\r\n lengths = []\r\n for k in data.keys():\r\n lengths.append(len(data[k]))\r\n if lengths[1:] != lengths[:-1]:\r\n raise ValueError(date) # file lengths do not match\r\n \r\n for i in range(len(data['Air_Temp'])):\r\n \r\n # verify timestamps are equal for every related entry in 3 files\r\n timestamps = []\r\n for k in data.keys():\r\n timestamps.append(data[k][i].split()[1])\r\n if timestamps[1:] != timestamps[:-1]:\r\n raise ValueError(date) # timestamps for fields do not line up\r\n \r\n yield dict(Date = data['Air_Temp'][i].split()[0],\r\n Time = data['Air_Temp'][i].split()[1],\r\n Status = 'PARTIAL' if date == date.today() else 'COMPLETE', # assume data from today is incomplete\r\n Air_Temp = data['Air_Temp'][i].split()[2],\r\n Barometric_Press = data['Barometric_Press'][i].split()[2],\r\n Wind_Speed = data['Wind_Speed'][i].split()[2])",
"def _read_dwd(date, timezone, longitude, latitude, path):\n \n # initialize variables \n dwdpath = os.path.join(os.path.join(path, \"dwd\"))\n fields = [\"aswdifd_s\", \"aswdir_s\", \"t_2m\", \"t_g\"]\n \n lastForecast = None\n for f in range(len(fields)):\n # get date of latest forecast\n dirList = os.listdir(os.path.join(dwdpath, fields[f]))\n dirList.sort(reverse = True)\n if dirList[0].rsplit(\"_\", 2)[0] == 120:\n lastForecast = dirList[0].rsplit(\"_\", 2)[1]\n \n if lastForecast != None:\n # unpack compressed, latest forecast\n os.system(\"bunzip2 --keep `find \" + dwdpath + \" -name '*\" + lastForecast + \"*.bz2'`\")\n \n dates = []\n data = []\n for f in range(len(fields)):\n # list all extracted grib files\n dirList = glob.glob(os.path.join(dwdpath, fields[f], \"*\" + lastForecast + \"*.grib2\"))\n dirList.sort()\n \n lastValue = 0\n data.append([])\n \n if len(dirList) >= 48:\n for i in range(24):\n grb = pygrib.open(dirList[i])\n grb.seek(0)\n \n lat, lon = grb.latlons()\n i, j = _get_location_nearest(lat, lon, latitude, longitude)\n \n lastTimestamp = False\n firstTimestamp = False\n for g in grb:\n timestamp = datetime.datetime.strptime(str(g['validityDate']) + \" \" + '%0.0f'%(g['validityTime']/100.0), \"%Y%m%d %H\")\n \n if lastTimestamp:\n if f == 0:\n datestr = datetime.datetime.strftime(lastTimestamp, \"%Y-%m-%d %H\")\n dates.append(datestr)\n \n if fields[f] == \"aswdifd_s\" or fields[f] == \"aswdir_s\":\n diff = (timestamp - lastTimestamp).total_seconds() / 3600.0\n value = (1 / diff) * ((timestamp - firstTimestamp).total_seconds() / 3600 * g['values'][i, j] - (lastTimestamp - firstTimestamp).total_seconds() / 3600 * lastValue)\n else:\n value = g['values'][i, j]\n \n data[f].append(value)\n \n else:\n firstTimestamp = timestamp\n \n lastTimestamp = timestamp\n lastValue = g['values'][i, j]\n \n grb.close()\n \n if len(dates) > 0:\n csvpath = os.path.join(os.path.join(path, \"csv\"))\n with open(os.path.join(csvpath, \"DWD_\" + lastForecast + \".csv\"), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter = \",\")\n line = [\"time\"]\n line.extend(fields)\n writer.writerow(line)\n for i in range(len(dates)):\n line = [dates[i] + \":00:00\"]\n for j in range(len(fields)):\n line.append(data[j][i])\n writer.writerow(line)\n \n # clean up\n os.system(\"find \" + dwdpath + \" -name '*\" + lastForecast + \"*.grib2' -exec rm -f {} \\;\")\n \n return None;",
"def get_raw_data_descriptors(self):\n def is_date(_d, _fmt='%Y%m%d'):\n \"\"\"\n Check if string (folder name) matches datetime format fmt\n :param _d:\n :param _fmt:\n :return:\n \"\"\"\n try:\n datetime.datetime.strptime(_d, _fmt)\n except Exception as e:\n self.logger.error(e)\n return False\n return True\n\n # get all dates with some raw data from all input sources\n dates = dict()\n # Robo-AO's NAS archive contains folders named as YYYYMMDD.\n # Only consider data taken starting from archiving_start_date\n archiving_start_date = datetime.datetime.strptime(self.config['misc']['archiving_start_date'], '%Y/%m/%d')\n for _p in self.config['path']['path_raw']:\n dates[_p] = sorted([d for d in os.listdir(_p)\n if os.path.isdir(os.path.join(_p, d))\n and is_date(d, _fmt='%Y%m%d')\n and datetime.datetime.strptime(d, '%Y%m%d') >= archiving_start_date\n ])\n return dates",
"def load_station_data(self, filename, dataset='ECA-station', print_prog=True, offset_in_file=0):\n\n if dataset == 'Klem_day':\n raw_data = np.loadtxt(self.data_folder + filename) # first column is continous year and second is actual data\n self.data = np.array(raw_data[:, 1])\n time = []\n\n # use time iterator to go through the dates\n y = int(np.modf(raw_data[0, 0])[1])\n if np.modf(raw_data[0, 0])[0] == 0:\n start_date = date(y, 1, 1)\n delta = timedelta(days = 1)\n d = start_date\n while len(time) < raw_data.shape[0]:\n time.append(d.toordinal())\n d += delta\n self.time = np.array(time)\n self.location = 'Praha-Klementinum, Czech Republic'\n print(\"Station data from %s saved to structure. Shape of the data is %s\" % (self.location, str(self.data.shape)))\n print(\"Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1\")\n\n if dataset == 'ECA-station':\n with open(self.data_folder + filename, 'rb') as f:\n time = []\n data = []\n missing = []\n i = 0 # line-counter\n reader = csv.reader(f)\n for row in reader:\n i += 1\n if i == 16 + offset_in_file: # line with location\n c_list = filter(None, row[1].split(\" \"))\n del c_list[-2:]\n country = ' '.join(c_list).lower()\n station = ' '.join(row[0].split(\" \")[7:]).lower()\n self.location = station.title() + ', ' + country.title()\n if i > 20 + offset_in_file: # actual data - len(row) = 5 as STAID, SOUID, DATE, TG, Q_TG\n staid = int(row[0])\n value = float(row[3])\n year = int(row[2][:4])\n month = int(row[2][4:6])\n day = int(row[2][6:])\n time.append(date(year, month, day).toordinal())\n if value == -9999.:\n missing.append(date(year, month, day).toordinal())\n data.append(np.nan)\n else:\n data.append(value/10.)\n self.station_id = staid\n self.data = np.array(data)\n self.time = np.array(time)\n self.missing = np.array(missing)\n if print_prog:\n print(\"Station data from %s saved to structure. Shape of the data is %s\" % (self.location, str(self.data.shape)))\n print(\"Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1\")\n if self.missing.shape[0] != 0 and self.verbose:\n print(\"** WARNING: There were some missing values! To be precise, %d missing values were found!\" % (self.missing.shape[0]))",
"def load_cached_isd_daily_temp_data(self):\n return load_cached_isd_daily_temp_data(self.usaf_id)",
"def daily_cruncher(days=1, save=False):\n day = []\n with open('server/JSON/dogetipdata2.json') as f:\n dogetipdata = json.load(f)\n for a in dogetipdata:\n if a[0] >= (dogetipdata[-1][0] - (86400*days)): # 86400 seconds in a day\n day.append([a[0],a[1]])\n if save == True and days == 1:\n with open('server/JSON/24h.json', 'wb') as f:\n json.dump(day, f)\n return day",
"def get_data(tstart,tstop,binsperdec = 4,data_dir = '/phys/groups/tev/scratch1/users/Fermi/data'):\n start_date = MyDate(*MET(tstart).time.timetuple()[:6])\n stop_date = MyDate(*MET(tstop).time.timetuple()[:6])\n files = dict(monthly=dict(bpd=None,lt=None),weekly=dict(bpd=None,lt=None),daily=dict(bpd=None,lt=None))\n for t in ['monthly','weekly','daily']:\n files[t]['bpd'] = np.asarray(sorted(glob(os.path.join(data_dir,t,'bpd','%s_*_%ibpd.fits'%(t[:-2].replace('i','y'),binsperdec)))))\n files[t]['lt'] = np.asarray(sorted(glob(os.path.join(data_dir,t,'lt','%s_*_lt.fits'%(t[:-2].replace('i','y'))))))\n month_mask,gti = accept_files(files['monthly']['bpd'],start_date,stop_date,months = True)\n week_mask,gti = accept_files(files['weekly']['bpd'],start_date,stop_date,gti=gti)\n day_mask,gti = accept_files(files['daily']['bpd'],start_date,stop_date,gti=gti)\n bpds = np.append(files['monthly']['bpd'][month_mask],np.append(files['weekly']['bpd'][week_mask],files['daily']['bpd'][day_mask]))\n lts = np.append(files['monthly']['lt'][month_mask],np.append(files['weekly']['lt'][week_mask],files['daily']['lt'][day_mask]))\n return bpds,lts",
"def load_isd_daily_temp_data(\n self, start, end, read_from_cache=True, write_to_cache=True, fetch_from_web=True\n ):\n return load_isd_daily_temp_data(\n self.usaf_id,\n start,\n end,\n read_from_cache=read_from_cache,\n write_to_cache=write_to_cache,\n fetch_from_web=fetch_from_web,\n )",
"def get_daily_data(varid, plev, years, datafiles, data, daymin=1,\n daymax=366, yearnm='year'):\n\n years = atm.makelist(years)\n datafiles = atm.makelist(datafiles)\n\n if isinstance(plev, int) or isinstance(plev, float):\n pres = atm.pres_convert(plev, 'hPa', 'Pa')\n elif plev == 'LML' and 'PS' in data:\n pres = data['PS']\n else:\n pres = None\n\n def get_var(data, varnm, plev=None):\n if plev is None:\n plev = ''\n elif plev == 'LML' and varnm == 'QV':\n varnm = 'Q'\n return data[varnm + str(plev)]\n\n if var_type(varid) == 'calc':\n print('Computing ' + varid)\n if varid == 'THETA':\n var = atm.potential_temp(get_var(data, 'T', plev), pres)\n elif varid == 'THETA_E':\n var = atm.equiv_potential_temp(get_var(data, 'T', plev), pres,\n get_var(data, 'QV', plev))\n elif varid == 'DSE':\n var = atm.dry_static_energy(get_var(data, 'T', plev),\n get_var(data, 'H', plev))\n elif varid == 'MSE':\n var = atm.moist_static_energy(get_var(data, 'T', plev),\n get_var(data, 'H', plev),\n get_var(data, 'QV', plev))\n elif varid == 'VFLXMSE':\n Lv = atm.constants.Lv.values\n var = data['VFLXCPT'] + data['VFLXPHI'] + data['VFLXQV'] * Lv\n var.attrs['units'] = data['VFLXCPT'].attrs['units']\n var.attrs['long_name'] = 'Vertically integrated MSE meridional flux'\n else:\n with xray.open_dataset(datafiles[0]) as ds:\n if varid not in ds.data_vars:\n varid = varid + str(plev)\n var = atm.combine_daily_years(varid, datafiles, years, yearname=yearnm,\n subset_dict={'day' : (daymin, daymax)})\n var = atm.squeeze(var)\n\n # Make sure year dimension is included for single year\n if len(years) == 1 and 'year' not in var.dims:\n var = atm.expand_dims(var, yearnm, years[0], axis=0)\n\n # Wrap years for extended day ranges\n if daymin < 1 or daymax > 366:\n var = wrapyear_all(var, daymin, daymax)\n\n # Convert precip and evap to mm/day\n if varid in ['precip', 'PRECTOT', 'EVAP']:\n var = atm.precip_convert(var, var.attrs['units'], 'mm/day')\n\n return var"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Split a line by tabs and convert each element
|
def tab_split(line: str, converter: Callable[[str], Any]=str) -> List[Any]:
return [converter(x) for x in line.split('\t')]
|
[
"def __convert_tabs_to_spaces(self, spaces, tabs, line):\n\n line = line.replace('\\t', ' ')\n spaces += tabs * 4\n return spaces, line",
"def read_ptsv(line):\n return map(read_ptsv_element, line.rstrip().split('\\t'))",
"def line_to_tokens(line):\n if len(line) < 1:\n yield from[]\n cleaned_line = clean_line(line)\n if len(cleaned_line) < 1:\n yield from[]\n for token in cleaned_line.split(\" \"):\n if len(token) > 0:\n yield token",
"def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)",
"def parse_tsv(line, field_names=None, field_delim='\\t', field_columns=None):\n field_names = field_names or ['inputs', 'targets']\n field_columns = field_columns or list(range(len(field_names)))\n return dict(\n zip(field_names,\n tf.io.decode_csv(\n line,\n record_defaults=[''] * len(field_names),\n field_delim=field_delim,\n use_quote_delim=False,\n select_cols=field_columns)))",
"def convert_line(line, features_to_int):\n features_start = [m.start() for m in re.finditer('\\t', line)] # finds positions of all tabs\n main_features = line[:features_start[COUNTRY_CODES]].split('\\t') # splits the line all up to tab after country codes\n result = [np.nan] * (len(features_to_int) + len(main_features)) # fill the array with nan values\n \n for i in range(len(main_features)): # fill the first values\n result[i] = main_features[i]\n \n\n features = line[features_start[COUNTRY_CODES]+1:].split('|') # split the features by |\n for feature in features:\n equal_symbol = feature.find('=') # finds the position of = in features\n key, value = feature[:equal_symbol], feature[equal_symbol+1:] # splits the feature into key, value\n index = len(main_features) + features_to_int[key] # moves the index so it doesn't overlap with previously filled values such as wals code, etc...\n result[index] = value\n return result",
"def _processline (self, line) :\n uttid, text = re.split (r\"\\t\", line.strip ())\n spkrid = uttid[0:5]\n gender = uttid[0].lower ()\n text = self._cleantext (text)\n\n return uttid, text, gender, spkrid",
"def __getWords(self,line):\n\n l=string.expandtabs(string.lower(line),1)\n words=string.split(string.lstrip(l))\n \n return words",
"def _line_parser(self, line):\n line = (line.rstrip()).split(' ')\n return line",
"def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):\n return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2)",
"def _read_format_line(line, format):\r\n rows = line.strip().split(\"\\t\")\r\n return _read_format_rows(rows, format)",
"def tsv_line(value_list):\n return '\\t'.join([str(x) for x in value_list])",
"def split_sample(line):\n line = line.strip()\n letter_id, letter, next_id, word_id, possition, fold, x = line.split('\\t', 6)\n x = np.fromstring(x, dtype=int, sep='\\t')\n # return letter_id, letter, next_id, fold, x\n label = ENG2I[letter]\n return x, label, next_id",
"def preprocess_tsv(line,\n field_delim='\\t',\n num_fields=2,\n inputs_format='{0}',\n targets_format='{1}',\n field_names=None,\n use_quote_delim=False):\n def _format_part_with_field_numbers(part, field_values):\n found = re.findall(r'{(\\d+)}', part)\n if found:\n return field_values[int(found[0])]\n else:\n return part\n\n def _format_part_with_field_names(part, field_names, field_values):\n field_names_re = '|'.join(['{{({})}}'.format(x) for x in field_names])\n found = re.findall(field_names_re, part)\n if found:\n pos = field_names.index(''.join(found[0]))\n return field_values[int(pos)]\n else:\n return part\n\n def _format(format_string, field_names, field_values):\n if field_names is None:\n parts = [\n _format_part_with_field_numbers(p, field_values)\n for p in re.split(r'({\\d+})', format_string)\n ]\n else:\n field_names_re = '(' + '|'.join(['{{{}}}'.format(x) for x in field_names\n ]) + ')'\n parts = [\n _format_part_with_field_names(p, field_names, field_values)\n for p in re.split(field_names_re, format_string)\n ]\n return tf.strings.join(parts)\n\n field_values = tf.io.decode_csv(\n line,\n record_defaults=[''] *\n (num_fields if field_names is None else len(field_names)),\n field_delim=field_delim,\n use_quote_delim=use_quote_delim)\n return {\n 'inputs': _format(inputs_format, field_names, field_values),\n 'targets': _format(targets_format, field_names, field_values)\n }",
"def stresses_for_line(line):\n\n\tparts = line.split('\\t')\n\n\tif len(parts) == 2:\n\t\ttext, info = parts\n\t\tstresses_string = get_property(info, 'stress')\n\t\tstresses = ''.join(stresses_string.split())\n\t\treturn list(stresses)\n\telif len(parts) == 1:\n\t\treturn stresses_for_text(parts[0])",
"def find_tabs(line: str, start: int = 0) -> int:\n\twhile line[start] == \"\\t\":\n\t\tstart += 1\n\treturn start",
"def segment_by_lines(text: str):\n\treturn text.splitlines()",
"def filter_spaces_tabs(text):\n\n return re.sub(\" |\\t\", \"\", text)",
"def SplitLines(*args):\n return _snap.TStrUtil_SplitLines(*args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a varaible in the Plan. This method is used to create the variables that are needed in the Plan in order to add an entry for the outbound connection pool for the new data source.
|
def makeDeploymentPlanVariable(wlstPlan, name, value, xpath, origin='planbased'):
try:
variableAssignment = wlstPlan.createVariableAssignment(name, moduleOverrideName, moduleDescriptorName)
variableAssignment.setXpath(xpath)
variableAssignment.setOrigin(origin)
wlstPlan.createVariable(name, value)
except:
print('--> was not able to create deployment plan variables successfully')
|
[
"def _create_variables(self) -> None:\n if self.relaxed:\n kind = LpContinuous\n else:\n kind = LpInteger\n\n # List all combinations of apps and instances and workloads\n comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)\n comb_dem = cartesian_product(\n self.system.apps, self.cooked.instances_dem, self.load_hist.keys()\n )\n map_res = LpVariable.dicts(\"Y\", comb_res, 0, None, kind)\n map_dem = LpVariable.dicts(\"X\", comb_dem, 0, None, kind)\n self.cooked = self.cooked._replace(map_res=map_res, map_dem=map_dem)",
"def CreateOptimizerSlotVariable(self, var_name, var_params, slot_var_dict):\n self.CreateVariable(var_name, var_params, trainable=False)\n var = self.vars[var_name]\n slot_var_dict[var.name] = var\n return var",
"def create_statement(self):\n var = VariableDeclaration(\n # Create int\n type='int',\n\n # Declarate a variable with name counter and initialise with value counter\n variable_declarators=[VariableDeclarator(\n variable=Variable(\n name='var' + str(self.counter)\n ),\n initializer=Literal(self.counter)\n )]\n )\n self.counter += 1\n return var",
"def newvar(self):\n self._typevars += 1\n return types.variable('a' + str(self._typevars))",
"def Variable(self, variableUI , globalVariables):\n pass",
"def __init__(self,\n variable_pool,\n apply_to,\n modifier = None):\n self._variable_pool = variable_pool\n super(VariableFactory, self).__init__(apply_to, modifier=modifier)",
"def create_new_variable(next_creator, **kwargs):\n canonical_name = _canonicalize_variable_name(kwargs.get(\"name\"))\n v = next_creator(**kwargs)\n\n if canonical_name not in shared_variable_store:\n shared_variable_store[canonical_name] = []\n shared_variable_store[canonical_name].append(v)\n return v",
"def makeVar(v) :\n return [\"var\", v]",
"def create_variable(self, asset_body):\n request = self.workspaces_service.variables().create(\n parent=self.path, body=asset_body\n )\n return gtm_manager.variable.GTMVariable(\n variable=request.execute(), parent=self.path, service=self.service\n )",
"def register_var(self, name, bc_object):\n\n if self.initialized == 1:\n msg.fail(\"ERROR: grid already initialized\")\n\n self.vars.append(name)\n self.nvar += 1\n\n self.BCs[name] = bc_object",
"def Creatvarriables(self): # for creating decision varriables q,x,y,p\r\n self.F = self.source + self.VNFS\r\n \r\n self.r_template = \"r_{:s}_{:s}_{:s}\"\r\n self.lembda_template = \"lambda_{:s}_{:s}_{:s}_{:s}\"\r\n self.T_template = \"T_{:s}_{:s}_{:s}\"\r\n self.o_template = \"o_{:s}_{:s}\"\r\n self.lambda2_template = \"lambda2_{:s}_{:s}_{:s}_{:s}\"\r\n self.r1_template = \"r_{:s}_{:s}_{:s}\"\r\n \r\n for d in self.destination:\r\n for f in G.nodes():\r\n for u in G.nodes():\r\n name = self.r_template.format(d, f, u)\r\n self.Vars[name] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name = name)\r\n \r\n for u,v in G.edges():\r\n name_lembda = self.lembda_template.format(d, f, u, v)\r\n self.Vars[name_lembda] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_lembda)\r\n for u,v in G.edges():\r\n name_T = self.T_template.format(f, u, v)\r\n self.Vars[name_T] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_T)\r\n for u in G.nodes():\r\n name_o = self.o_template.format(f, u) \r\n self.Vars[name_o] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_o)\r\n for u,v in G.edges():\r\n name_lambda2 = self.lambda2_template.format(d, f, v, u)\r\n self.Vars[name_lambda2] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_lambda2)\r\n \r\n for d in self.destination:\r\n for fn in self.VNFS:\r\n for u in G.nodes():\r\n name_r1 = self.r1_template.format(d, f, u)\r\n self.Vars[name_r1] = self.model.addVar(\r\n lb=0, vtype=GRB.BINARY, name= name_r1)\r\n\r\n self.model.update()",
"def _code_setvar_allocate(self, lines, spacer):\n #This only works if the value they specified includes a specific allocate dimension\n #or we can easily determine what it needs to be.\n if self.allocate is None:\n return\n\n variable = self.variable if self.globaldecl is None else self.globaldecl\n if (variable.dimension is not None and\n self.allocatable and variable.D >= 1):\n lines.append(\"{}allocate({}({}))\".format(spacer, self.name, self.allocate))\n\n if (\"pointer\" in self.global_attr(\"modifiers\", \"\") and\n (\"class\" in self.global_attr(\"type\", \"\") or \"type\" in self.global_attr(\"type\", \"\"))):\n if self.allocate == True or not self.allocate:\n lines.append(\"{}allocate({})\".format(spacer, self.name))\n else:\n lines.append(\"{}allocate({}({}))\".format(spacer, self.name, self.allocate))",
"def create_tmp(self, name, size):\n name = f'_v_tmp_{name}'\n if name not in self.c.variables:\n # First time defining it ever for this compiler state,\n # add the newly declared variable to it\n self.c.add_variable(Variable(\n name,\n 'byte' if size == 8 else 'short',\n value='?'\n ))\n\n return name",
"def create_flow(sn, tn, dn):\n return f\"{FLOW_VAR}{sn}{tn}{dn}\"",
"def add_variable(self, name, domain):\n name = str(name)\n if name in self.vs:\n raise RuntimeError(\"Variable '{0}' already defined\".format(name))\n v = Variable(name, domain, None, None)\n self.vs[name] = v",
"def createTrainVariable(self,name,min,max):\r\n\t\tself.addTrainVariable(TrainingVariable(name,min,max))",
"def nc_create(self, output_dataset, name, dimensions, standard_name=\"\"):\n if standard_name == \"\":\n standard_name = name\n\n metadata = self.lookup(name)\n var = output_dataset.createVariable(standard_name, metadata[\"netcdf\"][\"var_type\"], dimensions, zlib=True)\n var.units = metadata[\"units\"]\n var.long_name = metadata[\"name\"][\"default\"]\n var.comment = metadata[\"description\"]\n # var.accuracy = metadata[\"accuracy\"]\n # var.accuracy_info = metadata[\"accuracy_info\"]\n\n ### add other, variable specific attributes\n if metadata[\"netcdf\"][\"other\"] != \"\":\n ## use csv reader as it handles well quotations\n other_args = csv.reader(StringIO(metadata[\"netcdf\"][\"other\"]), delimiter=',')\n # print(other_args)\n for row in other_args:\n for arg in row:\n # print(arg)\n key, val = arg.split('=')\n key = key.strip()\n val = val.strip().strip('\"')\n setattr(var, key, val)\n return var",
"def _generate_variables(self, names, domain):\n for i, name in enumerate(names):\n constraints = self.constraints.get_constraints_by_variable(name)\n var = Variable(name, domain[i], constraints)\n if var not in self.variables:\n self.variables[name] = var\n # adding neighbours to a variable:\n neighbours_names = set()\n for constraint in constraints:\n self.__add_neighbours_to_var(name, constraint.get_variables())\n # for neighbour in constraint.variables:\n # neighbours_names.add(neighbour)\n # if name in neighbours_names:\n # neighbours_names.remove(name) # remove self from neighbours.\n # self.variables[name].set_neighbours(neighbours_names) # give a reference to the set.\n else:\n raise Exception(\"Variable name repeats twice!\")",
"def __init__(self):\n self.variables = {}\n self.values = {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To make sure city and country names like 'London, UK' works.
|
def test_city_country_names(self):
city = formatted_city_country('london', 'united kingdom')
self.assertEqual(city, 'London, United Kingdom')
|
[
"def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country, 'Santiago, Chile')",
"def test_city_country_name_and_population(self):\r\n city_information = formatted_city_country('london', 'united kingdom', 8900000)\r\n self.assertEqual(city_information,\r\n 'London, United Kingdom - Population 8900000')",
"def test_get_city(self):\n self.assertTrue(get_city(\"Sydney, Australia\")==\"Sydney\")",
"def city_country(city, country):\n return city + \", \" + country",
"def city_country(city, country):\n return (city.title() + \", \" + country.title())",
"def check_city(self, token):\n shortened_cities = {'saint': 'st.'}\n if self.city is None and self.state is not None and self.street_suffix is None:\n if token.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize())\n return True\n return False\n # Check that we're in the correct location, and that we have at least one comma in the address\n if self.city is None and self.apartment is None and self.street_suffix is None and len(\n self.comma_separated_address) > 1:\n if token.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize())\n return True\n return False\n # Multi word cities\n if self.city is not None and self.street_suffix is None and self.street is None:\n print(\"Checking for multi part city\", token.lower(), token.lower() in list(shortened_cities.keys()))\n if token.lower() + ' ' + self.city in self.parser.cities:\n self.city = self._clean((token.lower() + ' ' + self.city).capitalize())\n return True\n if token.lower() in list(shortened_cities.keys()):\n token = shortened_cities[token.lower()]\n print(\"Checking for shorted multi part city\", token.lower() + ' ' + self.city)\n if token.lower() + ' ' + self.city.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize() + ' ' + self.city.capitalize())\n return True",
"def test_invalid_city(self):\n\n invalid_cities_to_test = [\" Rosevill3 \", \"W@rr3n\", \"St. Cl@!r Sh0r3s\", \" \", \"_Tr0y\", \" W3st Br@nch\", \" !D3tr0!t\"]\n option = \"city\"\n\n for city in invalid_cities_to_test:\n self.database.city = city\n self.assertFalse(self.database.validate_cityInfo(option, self.database.city))",
"def city_country(city, country, population=''):\n location = city.title() + \", \" + country.title() \n if population:\n location += \" Population - \" + str(population)\n return location",
"def test_str_City(self):\n kansas = City()\n string = \"[City] ({}) {}\".format(kansas.id, kansas.__dict__)\n self.assertEqual(string, str(kansas))",
"def test_city_country_population(self):\n formatted_city_country_population = city_country(\n 'santiago', 'chile', 500000)\n self.assertEqual(formatted_city_country_population,\n 'Santiago, Chile, Population = 500000')",
"def test_extract_city():\n assert extract_city(\"123 W Main, Rexburg, ID 83440\") == \"Rexburg\"\n assert extract_city(\"78 Pine St, Avon Park, FL 33825\") == \"Avon Park\"",
"def describe_city(city, country):\n print(city.title(), \"is in\", country.title() + '.')",
"def test_valid_county(self):\n\n valid_county_to_test = [\"Macomb\", \"Saginaw\", \" Clinton\", \"Gratiot\", \"Ogemaw\", \"Huron\", \"Gladwin\"]\n option = \"county\"\n\n for county in valid_county_to_test:\n self.database.county = county\n self.assertTrue(self.database.validate_cityInfo(option, self.database.county))",
"def test_str_cityid(self):\n self.assertEqual(str, type(Place().city_id))",
"def format_city(city_str: str) -> str:\n city_str = unidecode.unidecode(city_str)\n\n if len(city_str.split()) == 2:\n composed_str = city_str.split()\n first_str = composed_str[0]\n sec_str = composed_str[1]\n\n if first_str == 'St' or first_str == 'Saint' or first_str == 'Sankt':\n return 'St. ' + sec_str\n # specific cases - frequent mistakes\n if city_str == 'Geneva':\n return 'Geneve'\n elif city_str == 'Lucerne':\n return 'Luzern'\n elif city_str == 'Biel' or city_str == 'Bienne':\n return 'Biel/Bienne'\n elif city_str == 'Berne':\n return 'Bern'\n elif city_str == 'Schlatt (Zurich)':\n return 'Zurich'\n else:\n return city_str",
"def validate_city(self, field):\n cities = mongo.db.cities.find({'country': self.country.data})\n choices = [c['name'] for c in cities]\n if field.data not in choices:\n raise ValidationError('Invalid choice')",
"def test_build_abbreviation(self):\n\n sub_agency_name = \"Administrative Conference of the United States\"\n self.assertEqual(\"ACUS\", build_abbreviation(sub_agency_name))\n\n sub_agency_name = \"U.S. Customs & Border Protection\"\n self.assertEqual(\"USCBP\", build_abbreviation(sub_agency_name))",
"def location_combiner(city_name, country_name):\n\n # Initialize variables\n location = city_name + ', ' + country_name\n\n return location",
"def location_cap(location):\r\n if not location:\r\n return None\r\n\r\n #Split on comma - Fort Collins, CO, USA\r\n tokens = location.split(',')\r\n for token in tokens:\r\n #Split on spaces so we can case every word\r\n #If the word len > 2 (ie, not a state prefix) and not USA\r\n #Titlecase the string\r\n #Else uppercase the string\r\n t = [i.title() if len(i) > 2 and i.upper() != \"USA\" else i.upper() for i in token.split(' ')]\r\n tokens[tokens.index(token)] = ' '.join(t)\r\n return ','.join(tokens)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
To make sure city information in this form 'London, United Kingdom population 8900000' works.
|
def test_city_country_name_and_population(self):
city_information = formatted_city_country('london', 'united kingdom', 8900000)
self.assertEqual(city_information,
'London, United Kingdom - Population 8900000')
|
[
"def test_city_country_names(self):\r\n city = formatted_city_country('london', 'united kingdom')\r\n self.assertEqual(city, 'London, United Kingdom')",
"def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country, 'Santiago, Chile')",
"def test_extract_city():\n assert extract_city(\"123 W Main, Rexburg, ID 83440\") == \"Rexburg\"\n assert extract_city(\"78 Pine St, Avon Park, FL 33825\") == \"Avon Park\"",
"def test_get_city(self):\n self.assertTrue(get_city(\"Sydney, Australia\")==\"Sydney\")",
"def test_city_country_population(self):\n formatted_city_country_population = city_country(\n 'santiago', 'chile', 500000)\n self.assertEqual(formatted_city_country_population,\n 'Santiago, Chile, Population = 500000')",
"def test_str_City(self):\n kansas = City()\n string = \"[City] ({}) {}\".format(kansas.id, kansas.__dict__)\n self.assertEqual(string, str(kansas))",
"def test_geocode_city_state(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"golden, co\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.756655, .001) \n self.assertApxEqual(results[0].lng, -105.224949, .001)",
"def test_geocode_city(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"Denver\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.737567, .01)\n self.assertApxEqual(results[0].lng, -104.9847179, .01)",
"def city_country(city, country, population=''):\n location = city.title() + \", \" + country.title() \n if population:\n location += \" Population - \" + str(population)\n return location",
"def format_city(city_str: str) -> str:\n city_str = unidecode.unidecode(city_str)\n\n if len(city_str.split()) == 2:\n composed_str = city_str.split()\n first_str = composed_str[0]\n sec_str = composed_str[1]\n\n if first_str == 'St' or first_str == 'Saint' or first_str == 'Sankt':\n return 'St. ' + sec_str\n # specific cases - frequent mistakes\n if city_str == 'Geneva':\n return 'Geneve'\n elif city_str == 'Lucerne':\n return 'Luzern'\n elif city_str == 'Biel' or city_str == 'Bienne':\n return 'Biel/Bienne'\n elif city_str == 'Berne':\n return 'Bern'\n elif city_str == 'Schlatt (Zurich)':\n return 'Zurich'\n else:\n return city_str",
"def check_city(self, token):\n shortened_cities = {'saint': 'st.'}\n if self.city is None and self.state is not None and self.street_suffix is None:\n if token.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize())\n return True\n return False\n # Check that we're in the correct location, and that we have at least one comma in the address\n if self.city is None and self.apartment is None and self.street_suffix is None and len(\n self.comma_separated_address) > 1:\n if token.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize())\n return True\n return False\n # Multi word cities\n if self.city is not None and self.street_suffix is None and self.street is None:\n print(\"Checking for multi part city\", token.lower(), token.lower() in list(shortened_cities.keys()))\n if token.lower() + ' ' + self.city in self.parser.cities:\n self.city = self._clean((token.lower() + ' ' + self.city).capitalize())\n return True\n if token.lower() in list(shortened_cities.keys()):\n token = shortened_cities[token.lower()]\n print(\"Checking for shorted multi part city\", token.lower() + ' ' + self.city)\n if token.lower() + ' ' + self.city.lower() in self.parser.cities:\n self.city = self._clean(token.capitalize() + ' ' + self.city.capitalize())\n return True",
"def getCity (address):\n city = \"\"\n postalCode = getPostalCode(address)\n if postalCode:\n address = address.split(postalCode)\n for word in address[1].split(\" \")[::-1]:\n if not word.isdigit() and word not in [country.alpha_2 for country in pc.countries]:\n city = word +\" \"+city\n return city.lstrip().rstrip()\n else:\n for i, word in enumerate(address.split(\" \")[::-1]):\n if not word.isdigit() and word.lower() not in [country.alpha_2.lower() for country in pc.countries]:\n city = word +\"\"+city\n if i == 3:\n return city.lstrip().rstrip()",
"def test_invalid_city(self):\n\n invalid_cities_to_test = [\" Rosevill3 \", \"W@rr3n\", \"St. Cl@!r Sh0r3s\", \" \", \"_Tr0y\", \" W3st Br@nch\", \" !D3tr0!t\"]\n option = \"city\"\n\n for city in invalid_cities_to_test:\n self.database.city = city\n self.assertFalse(self.database.validate_cityInfo(option, self.database.city))",
"def describe_city(city, country):\n print(city.title(), \"is in\", country.title() + '.')",
"def test_str_cityid(self):\n self.assertEqual(str, type(Place().city_id))",
"def city_country(city, country):\n return city + \", \" + country",
"def city_country(city, country):\n return (city.title() + \", \" + country.title())",
"def get_city_name(zipcode):\n try:\n city = geocode(zipcode)\n city = find_between(city, '\"', '\"') # remove json formatting\n city = city.split(', ') # separate into parts\n city[1] = remove_numbers(city[1])\n return ', '.join(city).strip() # return final value\n except:\n print 'Your city was not found, resorting to default.'\n return 'Austin, TX, USA' # show sample on break",
"def test_sunnyvale_geo():\n dataframe = get_final_geocodes_dataframe()\n sunnyvale = get_city_state_row(dataframe, 'sunnyvale', 'california')\n assert len(sunnyvale) == 1\n assert float(sunnyvale.get('latitude')) == 37.36883\n assert float(sunnyvale.get('longitude')) == -122.0363496\n sunnyvale_address = 'El Camino & Mathilda, Sunnyvale, CA 94087, USA'\n assert sunnyvale.get('reverse_address').iloc[0] == sunnyvale_address"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unit test for 'predict(...)'.
|
def test_predict(self):
assert 2 == 2
|
[
"def test_predict():\n\t\n\t# Create a row of data and run prediction.\n\thome = 'Arsenal'\n\taway = 'Chelsea'\n\tstats = pd.read_sql_query(\"select * from stats;\", engine)\n\tmodel = joblib.load('./model.pkl')\n\tresult = prediction.prediction(home, away, stats, model)\n\n\t# Check type of output.\n\tassert isinstance(result, np.ndarray)\n\n\t# Check array length.\n\tassert len(result) == 3",
"def predict(model, X_test):",
"def mock_predict(model_id):\t\n\n\tmodel_path = \"{}/m{}.pkl\".format(model_db_path, model_id)\n\n\n\t##################\n\t# demo predict input\n\tfor i in range(10):\n\t\tsample_idx = randint(0,len(test_dataset)-1)\n\t\tsample_test = test_dataset[sample_idx]\n\n\t\tif i == 0:\n\t\t\tsample_test_data = mx.nd.expand_dims(sample_test[0], axis = 0)\t# ndarray [[data1] [data2] ...]\n\t\t\tsample_test_label = mx.nd.array([sample_test[1]])\t\t\t# ndarray [label1 label2 ... ]\n\t\telse:\n\t\t\tsample_test_data = mx.nd.concat(sample_test_data, mx.nd.expand_dims(sample_test[0], axis = 0))\t# ndarray [[data1] [data2] ...]\n\t\t\tsample_test_label = mx.nd.concat(sample_test_label, mx.nd.array([sample_test[1]]), dim = 0)\t\t\t\t# ndarray [label1 label2 ... ]\n\t##################\n\n\ttry: \n\t\toutput = model.predict(sample_test_data, model_path)\n\n\n\t\t# Cast each output to int\n\t\tresults = []\n\t\tresult_labels = []\n\t\tfor i in range(output.size):\n\t\t\tresults.append(str(mx.nd.cast(output[i], dtype='int32').asscalar()))\n\t\t\tresult_labels.append(str(mx.nd.cast(sample_test_label[i], dtype='int32').asscalar()))\n\t\t\n\t\tresponse = {\"results\": results, \"labels\": result_labels}\n\n\t\treturn make_response(jsonify(response), 200)\n\n\texcept FileNotFoundError:\n\t\tresponse = {\"error\": \"Model not found. Make sure you have trained the model\"}\n\t\treturn make_response(jsonify(response), 404)",
"def test_predict(self, get_model):\n data, data_val = make_data()\n model = get_model()\n model.compile(optimizer='sgd', loss='categorical_crossentropy')\n\n expe = Experiment(model)\n expe.fit([data], [data_val])\n KTB.predict(expe.model_dict, [data['X']])\n print(self)",
"def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)",
"def testModelFnInPredictMode(self):\n configs = _get_configs_for_model(MODEL_NAME_FOR_TEST)\n self._assert_outputs_for_predict(configs)",
"def test_predict_tool(working_dir):\n out_dir = os.path.join(working_dir, 'learn_output')\n model_path = os.path.join(out_dir, 'model.txt')\n predictions_path = os.path.join(out_dir, 'predictions.txt')\n\n # Mock up some input data\n prob_path, prob = mock_input(working_dir)\n os.mkdir(out_dir)\n\n # Train a model and save it to a file\n classifier = SelectAndClassify(SelectKBest(k=5), GaussianNB(), name='test model').fit(prob)\n model = ClassificationModel(classifier, prob)\n model.write(model_path)\n\n # Run the predict tool with the model using the training data loaded from a file, and validate that\n # the returned predictions match\n predict.main([model_path, prob_path, predictions_path, '--index_col', 'sample_id'])\n\n expected_predictions = pd.DataFrame({'sample_id': prob.sample_ids, 'score': classifier.apply(prob)})\n actual_predictions = pd.read_csv(predictions_path, sep='\\t')\n\n np.testing.assert_allclose(actual_predictions['score'].values, expected_predictions['score'].values)",
"def predict(self, test_data, predict_proba = False, pred_class_and_proba = False):\n pass",
"def test_model_prediction(self):\n self.assertTrue(type(self.pred) is dict)",
"def test1(self):\n # arrange\n model_task = MLModelPredictionTask(module_name=\"iris_model.iris_predict\", class_name=\"IrisModel\")\n\n # act\n exception_raised = False\n result = None\n try:\n result = model_task.run(data={\"sepal_length\": 4.4, \"sepal_width\": 2.9, \"petal_length\": 1.4, \"petal_width\": 0.2})\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(result) is dict)",
"def test_model_inference_success(test_client, input_line):\n response = test_client.post('/predict',\n data=dumps(input_line),\n content_type=\"application/json\")\n assert response.status_code == 200",
"def test_prediction_input():\n\twith pytest.raises(ValueError) as excinfo1:\n\t\t# test data input\n\t\tpath_to_model = 'test/bad_test_model.pkl'\n\t\tsample_data='test/one_sample.csv'\n\t\tprediction(path_to_model, sample_data)\n\n\tassert str(excinfo1.value) == 'Number of features of the model must match the input. Model n_features is 9 and input n_features is 8 '",
"def _predict(payload: Dict[str, Any]):\n client_options = {\"api_endpoint\": payload[\"model_api_endpoint\"]}\n client = automl.TablesClient(\n project=payload[\"model_project\"],\n region=payload[\"model_region\"],\n client_options=client_options)\n\n return client.predict(\n model_display_name=payload[\"model_name\"], inputs=payload[\"row\"])",
"def test_multiple_predict_candidates():\n\n inputs = [{\"SMILES\": \"c1(C=O)cc(OC)c(O)cc1\"}, {\"SMILES\": \"C=C\"}]\n vid = 177\n\n prediction_results = client.predict(vid, inputs, method=\"scalar\")\n assert len(prediction_results) == 2\n assert type(prediction_results[0]) == PredictionResult\n assert type(prediction_results[1]) == PredictionResult",
"def _mocked_predict(a_rel, a_data, a_ret, a_i):\n a_ret[a_i] *= 0\n a_ret[a_i][PRED_IDX] = 1.",
"def test_run_experiment_predict_expected_scores_non_probablistic_svc():\n source = 'predict-expected-scores-non-probabilistic-svc'\n config_file = join(rsmtool_test_dir,\n 'data',\n 'experiments',\n source,\n 'rsmpredict.json')\n do_run_prediction(source, config_file)",
"def do_predictions(self):\n\n self.train_preds = self.tfmodel.predict(self.Data.X_train)\n self.test_preds = self.tfmodel.predict(self.Data.X_test)\n\n self.Helpers.logger.info(\n \"Training predictions: \" + str(self.train_preds))\n self.Helpers.logger.info(\n \"Testing predictions: \" + str(self.test_preds))\n print(\"\")",
"def predict(self):\n raise NotImplementedError(\"Child class must implement this method\")",
"def classifier_predict(samples, classifier_model):\n return classifier_model.predict(samples)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unit test for 'reverse_encode(...)'.
|
def test_reverse_encode(self):
reverse = self.test_wbn.reverse_encode([0, 1])
assert isinstance(reverse, list)
|
[
"def test_decode():\n assert one.decode(one.encode(\"Hello World\")) == \"hello world\"",
"def test_encode():\n assert one.encode(\n \"Hello World\") == \"11481249678067805698 10695698668367809533\"",
"def test_encode():\n\n assert ceaser.encode(\"bbb\", 3) == \"eee\"\n\n assert ceaser.encode(\"ccccc\", 2) == \"eeeee\"\n\n assert ceaser.encode(\"blake\", 4) == \"fpeoi\"\n \n assert ceaser.encode(\"\", 4) == \"\"",
"def test_URLEncode_encoded_input():\n res = main({'value': 'https%3A//www.google.com/'})\n assert res == 'https%3A//www.google.com/'",
"def test_URLEncode_partial_encoded_input():\n res = main({'value': 'https%3A//www.google.com/url@to@encode'})\n assert res == 'https%3A//www.google.com/url%40to%40encode'",
"def test_reverse_text() -> None:\n string_to_reverse = 'qwerty'\n reversed_text = text.reverse_string(string_to_reverse)\n assert reversed_text == 'ytrewq'",
"def test_decode():\n assert three.decode(\"test3_keycode\",\n three.encode(\"test3_keycode\",\n \"Hello World!\")) == \"Hello World!\"",
"def encodeString():\n pass",
"def test_encode_byte_string(self):\n self.assertEqual(\n encode(b'Hello, IOTA!', 'trytes'),\n b'RBTC9D9DCDQAEASBYBCCKBFA',\n )",
"def reverse_str() -> None:\n r = requests.post(\"http://challenge.code2040.org/api/reverse\",\n data={'token': token})\n\n if (type(r.text) is str): # Making sure it is a string\n reverse_str = str(r.text[::-1])\n\n r = requests.post(\"http://challenge.code2040.org/api/reverse/validate\",\n data={'token': token, 'string': reverse_str})\n print(r.status_code, r.reason)",
"def test_get_reverse():\n assert get_reverse(1234) == 4321\n assert get_reverse(3445) == 5443",
"def test_URLEncode():\n res = main({'value': 'https://www.google.com/'})\n assert res == 'https%3A//www.google.com/'",
"def test_reversal(self):\n assert self.example_one.string_reversal() == \"Madam,I'm Adam\"[::-1]\n assert self.example_two.string_reversal() == \"howdy\"[::-1]\n assert self.example_three.string_reversal() == \"Third Times The ChArM\"[::-1]",
"def test_9():\n str_1 = 'Introduction'\n reversed_str = str_1[::-1]\n assert (reversed_str == 'noitcudortnI')",
"def test_unescape(fb, fb_secure):\n\n assert fb.unescape('This has \\\\\"quotes\\\\\"') == 'This has \"quotes\"'\n assert fb.unescape('This has a backslash \\\\\\\\') == 'This has a backslash \\\\'\n assert fb.unescape('This has \\\\\\\\\\\\\"both\\\\\\\\\\\\\"') == 'This has \\\\\"both\\\\\"'",
"def test_encoding(self):\n b = self.locator.get_model('b').model\n self.assertEqual(b['unicode'],\n self.unicode_data)",
"def test_encode():\n assert three.encode(\n \"test3_keycode\", \"Hello World!\"\n ) == \"867329260437960514960514180364036524194068180364438195960514789640104968\"",
"def test_reverse_wordmap():\n wordmap = {'C':0,'#':1,'!':2}\n rev_wordmap = DataUtils.reverse_wordmap(wordmap)\n assert len(rev_wordmap) == 3\n return",
"def test_decode_invalid_pair_errors_replace(self):\n self.assertEqual(\n decode(b'ZJVYUGTDRPDYFGFXMK', 'trytes', 'replace'),\n b'??\\xd2\\x80??\\xc3??',\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hook into nosetests or other unittest based frameworks. The hook will throw exceptions such that a debugger like PyCharm can inspect them easily. This will only be done if there is just a single test case. This code might be a bit experimental. It should work though. But if it does not, we can also skip this. Currently any exception here would be fatal though, as we expect this to work.
|
def _try_hook_into_tests():
# Check if this is run inside a debugger. Skip if this is not the case.
import sys
get_trace = getattr(sys, "gettrace", None)
if not get_trace:
return
if get_trace() is None:
return
# get TestProgram instance from stack...
from unittest import TestProgram
from returnn.util.better_exchook import get_current_frame
frame = get_current_frame()
if not frame:
# This will not always work. Just silently accept this. This should be rare.
return
test_program = None
while frame:
local_self = frame.f_locals.get("self")
if isinstance(local_self, TestProgram):
test_program = local_self
break
frame = frame.f_back
if not test_program:
# Ok, this is not run as test, so fine, nothing to do then.
return
test_names = getattr(test_program, "testNames")
if not test_names:
# Unexpected, but just silently ignore.
return
if len(test_names) >= 2 or ":" not in test_names[0]:
# Multiple tests are being run. Do not hook into this.
# We only want to install the hook if there is only a single test case.
return
# Ok, try to install our plugin.
class _ReraiseExceptionTestHookPlugin:
@staticmethod
def _reraise_exception(test, err):
exc_class, exc, tb = err
print("Test %s, exception %s %s, reraise now." % (test, exc_class.__name__, exc))
raise exc
handleFailure = _reraise_exception
handleError = _reraise_exception
config = getattr(test_program, "config")
config.plugins.addPlugin(_ReraiseExceptionTestHookPlugin())
|
[
"def fail (self):\n \n import leoGlobals as g\n \n g.app.unitTestDict[\"fail\"] = g.callerName(2)",
"def run_tests(self):\n import pytest\n\n errno = pytest.main([])\n sys.exit(errno)",
"def skip_this_extension_module():\n if not run_end_to_end:\n raise unittest.SkipTest('this module is skipped because it is an extension module')",
"def test_pytest_broken_setup_will_be_reported_as_error(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def my_fixture():\n raise Exception('will fail in setup')\n yield\n\n def test_will_fail_in_setup(my_fixture):\n assert 1 == 1\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 4\n test_span = spans[0]\n\n assert test_span.get_tag(test.STATUS) == test.Status.FAIL.value\n assert test_span.get_tag(\"error.type\").endswith(\"Exception\") is True\n assert test_span.get_tag(ERROR_MSG) == \"will fail in setup\"\n assert test_span.get_tag(\"error.stack\") is not None\n assert test_span.get_tag(\"component\") == \"pytest\"",
"def test_skip(self):\n LOG.info('About to skip...')\n self.skipTest('No reason.')",
"def run_failed_tests(self):\n self.reset_rollback_importer()\n test_suite = unittest.TestSuite()\n for node in self.model.node_lookup.values():\n if isinstance(node.test, unittest.TestCase) and node.get_status() in {\n TestStatus.fail,\n TestStatus.error,\n }:\n mayaunittest.get_tests(test=node.path(), test_suite=test_suite)\n self.output_console.clear()\n self.model.run_tests(self.stream, test_suite)",
"def main(num_threads, test_subdir, test_runner_name, results_formatter):\n\n # Do not shut down on sighup.\n if hasattr(signal, 'SIGHUP'):\n signal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n dotest_argv = sys.argv[1:]\n\n global RESULTS_FORMATTER\n RESULTS_FORMATTER = results_formatter\n\n # We can't use sys.path[0] to determine the script directory\n # because it doesn't work under a debugger\n parser = dotest_args.create_parser()\n global dotest_options\n dotest_options = dotest_args.parse_args(parser, dotest_argv)\n\n adjust_inferior_options(dotest_argv)\n\n session_dir = os.path.join(os.getcwd(), dotest_options.s)\n\n # The root directory was specified on the command line\n test_directory = os.path.dirname(os.path.realpath(__file__))\n if test_subdir and len(test_subdir) > 0:\n test_subdir = os.path.join(test_directory, test_subdir)\n else:\n test_subdir = test_directory\n\n # clean core files in test tree from previous runs (Linux)\n cores = find('core.*', test_subdir)\n for core in cores:\n os.unlink(core)\n\n system_info = \" \".join(platform.uname())\n\n # Figure out which test files should be enabled for expected\n # timeout\n expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)\n if results_formatter is not None:\n results_formatter.set_expected_timeouts_by_basename(expected_timeout)\n\n # Figure out which testrunner strategy we'll use.\n runner_strategies_by_name = get_test_runner_strategies(num_threads)\n\n # If the user didn't specify a test runner strategy, determine\n # the default now based on number of threads and OS type.\n if not test_runner_name:\n test_runner_name = default_test_runner_name(num_threads)\n\n if test_runner_name not in runner_strategies_by_name:\n raise Exception(\n \"specified testrunner name '{}' unknown. Valid choices: {}\".format(\n test_runner_name,\n list(runner_strategies_by_name.keys())))\n test_runner_func = runner_strategies_by_name[test_runner_name]\n\n # Collect the files on which we'll run the first test run phase.\n test_files = []\n find_test_files_in_dir_tree(\n test_subdir, lambda tdir, tfiles: test_files.append(\n (test_subdir, tfiles)))\n\n # Do the first test run phase.\n summary_results = walk_and_invoke(\n test_files,\n dotest_argv,\n num_threads,\n test_runner_func)\n\n (timed_out, passed, failed, unexpected_successes, pass_count,\n fail_count) = summary_results\n\n # Check if we have any tests to rerun as phase 2.\n if results_formatter is not None:\n tests_for_rerun = results_formatter.tests_for_rerun\n results_formatter.tests_for_rerun = {}\n\n if tests_for_rerun is not None and len(tests_for_rerun) > 0:\n rerun_file_count = len(tests_for_rerun)\n print(\"\\n{} test files marked for rerun\\n\".format(\n rerun_file_count))\n\n # Check if the number of files exceeds the max cutoff. If so,\n # we skip the rerun step.\n if rerun_file_count > configuration.rerun_max_file_threshold:\n print(\"Skipping rerun: max rerun file threshold ({}) \"\n \"exceeded\".format(\n configuration.rerun_max_file_threshold))\n else:\n rerun_tests(test_subdir, tests_for_rerun, dotest_argv)\n\n # The results formatter - if present - is done now. Tell it to\n # terminate.\n if results_formatter is not None:\n results_formatter.send_terminate_as_needed()\n\n timed_out = set(timed_out)\n num_test_files = len(passed) + len(failed)\n num_test_cases = pass_count + fail_count\n\n # move core files into session dir\n cores = find('core.*', test_subdir)\n for core in cores:\n dst = core.replace(test_directory, \"\")[1:]\n dst = dst.replace(os.path.sep, \"-\")\n os.rename(core, os.path.join(session_dir, dst))\n\n # remove expected timeouts from failures\n for xtime in expected_timeout:\n if xtime in timed_out:\n timed_out.remove(xtime)\n failed.remove(xtime)\n result = \"ExpectedTimeout\"\n elif xtime in passed:\n result = \"UnexpectedCompletion\"\n else:\n result = None # failed\n\n if result:\n test_name = os.path.splitext(xtime)[0]\n touch(os.path.join(session_dir, \"{}-{}\".format(result, test_name)))\n\n # Only run the old summary logic if we don't have a results formatter\n # that already prints the summary.\n print_legacy_summary = results_formatter is None\n if not print_legacy_summary:\n # Print summary results. Summarized results at the end always\n # get printed to stdout, even if --results-file specifies a different\n # file for, say, xUnit output.\n results_formatter.print_results(sys.stdout)\n\n # Figure out exit code by count of test result types.\n issue_count = 0\n for issue_status in EventBuilder.TESTRUN_ERROR_STATUS_VALUES:\n issue_count += results_formatter.counts_by_test_result_status(\n issue_status)\n\n # Return with appropriate result code\n if issue_count > 0:\n sys.exit(1)\n else:\n sys.exit(0)\n else:\n # Print the legacy test results summary.\n print()\n sys.stdout.write(\"Ran %d test suites\" % num_test_files)\n if num_test_files > 0:\n sys.stdout.write(\" (%d failed) (%f%%)\" % (\n len(failed), 100.0 * len(failed) / num_test_files))\n print()\n sys.stdout.write(\"Ran %d test cases\" % num_test_cases)\n if num_test_cases > 0:\n sys.stdout.write(\" (%d failed) (%f%%)\" % (\n fail_count, 100.0 * fail_count / num_test_cases))\n print()\n exit_code = 0\n\n if len(failed) > 0:\n failed.sort()\n print(\"Failing Tests (%d)\" % len(failed))\n for f in failed:\n print(\"%s: LLDB (suite) :: %s (%s)\" % (\n \"TIMEOUT\" if f in timed_out else \"FAIL\", f, system_info\n ))\n exit_code = 1\n\n if len(unexpected_successes) > 0:\n unexpected_successes.sort()\n print(\"\\nUnexpected Successes (%d)\" % len(unexpected_successes))\n for u in unexpected_successes:\n print(\"UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)\" % (u, system_info))\n\n sys.exit(exit_code)",
"def setUp(self):\n self.errors_found = []\n self.standard_stat = {'uid': 0, 'gid': 80, 'mode': '0775'}\n self.application_exceptions = {}\n self.application_exceptions['System Preferences'] = {}\n self.application_exceptions['System Preferences']['gid'] = 0\n self.application_exceptions['System Preferences']['mode'] = '0775'\n self.application_exceptions['System Preferences']['uid'] = 0\n self.utilities_exceptions = {}\n # Here are a couple of examples of making exceptions for stuff we\n # symlink into Applications or Applications/Utilities\n self.utilities_exceptions['Kerberos'] = {}\n self.utilities_exceptions['Kerberos']['gid'] = 0\n self.utilities_exceptions['Kerberos']['mode'] = '0755'\n self.utilities_exceptions['Kerberos']['symlink_ok'] = True\n self.utilities_exceptions['Kerberos']['uid'] = 0\n self.utilities_exceptions['Screen Sharing'] = {}\n self.utilities_exceptions['Screen Sharing']['gid'] = 0\n self.utilities_exceptions['Screen Sharing']['mode'] = '0755'\n self.utilities_exceptions['Screen Sharing']['symlink_ok'] = True\n self.utilities_exceptions['Screen Sharing']['uid'] = 0",
"def test_patch_run(self):\n pass",
"def setup_exceptionhook():\n\n def _pdb_excepthook(type, value, tb):\n if is_interactive():\n import traceback\n import pdb\n traceback.print_exception(type, value, tb)\n print()\n pdb.post_mortem(tb)\n else:\n lgr.warn(\"We cannot setup exception hook since not in interactive mode\")\n _sys_excepthook(type, value, tb)\n\n sys.excepthook = _pdb_excepthook",
"def run_iptest():\n # Apply our monkeypatch to Xunit\n if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):\n monkeypatch_xunit()\n\n warnings.filterwarnings('ignore',\n 'This will be removed soon. Use IPython.testing.util instead')\n \n if sys.argv[1] in special_test_suites:\n sys.argv[1:2] = special_test_suites[sys.argv[1]]\n special_suite = True\n else:\n special_suite = False\n\n argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks\n\n '--with-ipdoctest',\n '--ipdoctest-tests','--ipdoctest-extension=txt',\n\n # We add --exe because of setuptools' imbecility (it\n # blindly does chmod +x on ALL files). Nose does the\n # right thing and it tries to avoid executables,\n # setuptools unfortunately forces our hand here. This\n # has been discussed on the distutils list and the\n # setuptools devs refuse to fix this problem!\n '--exe',\n ]\n if '-a' not in argv and '-A' not in argv:\n argv = argv + ['-a', '!crash']\n\n if nose.__version__ >= '0.11':\n # I don't fully understand why we need this one, but depending on what\n # directory the test suite is run from, if we don't give it, 0 tests\n # get run. Specifically, if the test suite is run from the source dir\n # with an argument (like 'iptest.py IPython.core', 0 tests are run,\n # even if the same call done in this directory works fine). It appears\n # that if the requested package is in the current dir, nose bails early\n # by default. Since it's otherwise harmless, leave it in by default\n # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.\n argv.append('--traverse-namespace')\n\n # use our plugin for doctesting. It will remove the standard doctest plugin\n # if it finds it enabled\n ipdt = IPythonDoctest() if special_suite else IPythonDoctest(make_exclude())\n plugins = [ipdt, KnownFailure()]\n \n # We need a global ipython running in this process, but the special\n # in-process group spawns its own IPython kernels, so for *that* group we\n # must avoid also opening the global one (otherwise there's a conflict of\n # singletons). Ultimately the solution to this problem is to refactor our\n # assumptions about what needs to be a singleton and what doesn't (app\n # objects should, individual shells shouldn't). But for now, this\n # workaround allows the test suite for the inprocess module to complete.\n if not 'IPython.kernel.inprocess' in sys.argv:\n globalipapp.start_ipython()\n\n # Now nose can run\n TestProgram(argv=argv, addplugins=plugins)",
"def test_without_plugin():\n pass",
"def skip_on_exception(exp):\n\n from pytest import skip\n\n @wrapt.decorator\n def wrapper(wrapped, instance, args, kwargs):\n try:\n return wrapped(*args, **kwargs)\n except exp as e:\n skip(str(e))\n\n return wrapper",
"def test_handle_tests_core(self):\n args = parse_args(u\"tests core --dry-run\", use_shlex=True)\n self.assertEqual(Main.handle_tests(args), 0)",
"def startTestRun(self, event):\n try:\n # Django >= 1.6\n from django.test.runner import reorder_suite\n except ImportError:\n # Django < 1.6\n from django.test.simple import reorder_suite\n from django.test.simple import DjangoTestSuiteRunner\n from django.test.utils import setup_test_environment\n # Init the django default runner so we can call it's functions as needed\n self.dtsr = DjangoTestSuiteRunner()\n\n setup_test_environment()\n\n event.suite = reorder_suite(event.suite, (unittest.TestCase,))\n\n self.old_config = self.dtsr.setup_databases()\n \n if self.session.verbosity > 1:\n # ensure that deprecation warnings are displayed during testing\n # the following state is assumed:\n # logging.capturewarnings is true\n # a \"default\" level warnings filter has been added for\n # DeprecationWarning. See django.conf.LazySettings._configure_logging\n self.logger = logging.getLogger('py.warnings')\n handler = logging.StreamHandler()\n self.logger.addHandler(handler)",
"def test_09_debug(self):\n self.app = self.create_application()\n self.app.debug = True\n exceptional = Exceptional(self.app)\n self.app.config[\"EXCEPTIONAL_ENVIRONMENT_FILTER\"].append(\"os.*\")\n self.app.config[\"PROPAGATE_EXCEPTIONS\"] = None\n assert exceptional.url == self.app.config[\"EXCEPTIONAL_DEBUG_URL\"]\n\n with self.app.test_client() as client:\n self.assertRaises(ZeroDivisionError, client.get, \"/error\")\n json.loads(g.exceptional)\n print \"See %s for HTTP request details.\" % exceptional.url",
"def test_pytest_all_tests_skipped_propagates(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.skip(reason=\"Because\")\n def test_not_ok_but_skipped():\n assert 0\n\n @pytest.mark.skip(reason=\"Because\")\n def test_also_not_ok_but_skipped():\n assert 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(skipped=2)\n spans = self.pop_spans()\n for span in spans:\n assert span.get_tag(\"test.status\") == \"skip\"",
"def skip_or_run_test_tarantool(func, REQUIRED_TNT_VERSION, msg):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if func.__name__ == 'setUp':\n func(self, *args, **kwargs)\n\n skip_or_run_test_tarantool_impl(self, REQUIRED_TNT_VERSION, msg)\n\n if func.__name__ != 'setUp':\n func(self, *args, **kwargs)\n\n return wrapper",
"def visit_default_test_case(self, test_case) -> None:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
General function for creating an expression for a smooth minimum or maximum. Uses the smooth_abs operator.
|
def smooth_minmax(a, b, eps=1e-4, sense='max'):
# Check type of eps
if not (isinstance(eps, (float, int, Param))):
raise TypeError("Smooth {} eps argument must be a float, int or "
"Pyomo Param".format(sense))
# Set sense of expression
if sense == 'max':
mm = 1
elif sense == 'min':
mm = -1
else:
raise ValueError("Unrecognised sense argument to smooth_minmax. "
"Must be 'min' or 'max'.")
# Create expression
try:
expr = 0.5*(a+b+mm*smooth_abs(a-b, eps))
except TypeError:
raise TypeError("Unsupported argument type for smooth_{}. Must be "
"a Pyomo Var, Param or Expression, or a float or int."
.format(sense))
return expr
|
[
"def smooth_minmax(a, b, eps=1e-4, sense=\"max\"):\n # Check type of eps\n if not isinstance(eps, (float, int, Param)):\n raise TypeError(\n \"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense)\n )\n\n # Set sense of expression\n if sense == \"max\":\n mm = 1\n elif sense == \"min\":\n mm = -1\n else:\n raise ValueError(\n \"Unrecognised sense argument to smooth_minmax. \" \"Must be 'min' or 'max'.\"\n )\n\n # Create expression\n try:\n expr = 0.5 * (a + b + mm * smooth_abs(a - b, eps))\n except TypeError:\n raise TypeError(\n \"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\".format(sense)\n )\n\n return expr",
"def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"max\")\n return expr",
"def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense='max')\n return expr",
"def smooth_min(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"min\")\n return expr",
"def smooth_min(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense='min')\n return expr",
"def smooth_bound(val, lb, ub, eps=1e-4, eps_lb=None, eps_ub=None):\n if eps_lb is None:\n eps_lb = eps\n if eps_ub is None:\n eps_ub = eps\n return smooth_min(smooth_max(val, lb, eps_lb), ub, eps_ub)",
"def smoothmap(min, max, x):\n\n pass",
"def smoothstep(min, max, x):\n\n pass",
"def sp_maximum_2D ( fun ,\n xmin , xmax ,\n ymin , ymax , x0 = () , *args ) :\n funmin = lambda x , y , *a : -1.0 * ( float ( fun ( x , y , *a ) ) )\n return sp_minimum_2D ( funmin ,\n xmin , xmax ,\n ymin , ymax , x0 , *args )",
"def closest_obs_blender(low_value: Any, high_value: Any, coeff: float):\n return low_value if coeff < 0.5 else high_value",
"def clip_min(self, min_val: int | float) -> Series:",
"def abs(self) -> LinearOperator:\n return self.__class__(self._tensor.abs(), upper=self.upper)",
"def sp_maximum_3D ( fun ,\n xmin , xmax ,\n ymin , ymax ,\n zmin , zmax , x0 = () , *args ) :\n funmin = lambda x , y , z , *a : -1.0 * ( float ( fun ( x , y , z , *a ) ) )\n return sp_minimum_3D ( funmin ,\n xmin , xmax ,\n ymin , ymax ,\n zmin , zmax , x0 , *args )",
"def get_loss_scale_by_abs_range(self, g, W):\n raise NotImplementedError(\"get_loss_scale_by_abs_range should be implemented\")",
"def smooth(smooth_atom):\n proximal_atom = zero_cone(smooth_atom.primal_shape)\n return simple_problem(smooth_atom, proximal_atom)",
"def clip(self, min_val: int | float, max_val: int | float) -> Series:",
"def rescale_score_by_abs(score, max_score, min_score):\r\n\r\n # CASE 1: positive AND negative scores occur --------------------\r\n if max_score > 0 and min_score < 0:\r\n\r\n if max_score >= abs(min_score): # deepest color is positive\r\n if score >= 0:\r\n return 0.5 + 0.5 * (score / max_score)\r\n else:\r\n return 0.5 - 0.5 * (abs(score) / max_score)\r\n\r\n else: # deepest color is negative\r\n if score >= 0:\r\n return 0.5 + 0.5 * (score / abs(min_score))\r\n else:\r\n return 0.5 - 0.5 * (score / min_score)\r\n\r\n # CASE 2: ONLY positive scores occur -----------------------------\r\n elif max_score > 0 and min_score >= 0:\r\n if max_score == min_score:\r\n return 1.0\r\n else:\r\n return 0.5 + 0.5 * (score / max_score)\r\n\r\n # CASE 3: ONLY negative scores occur -----------------------------\r\n elif max_score <= 0 and min_score < 0:\r\n if max_score == min_score:\r\n return 0.0\r\n else:\r\n return 0.5 - 0.5 * (score / min_score)",
"def linear_obs_blender(low_value: Any, high_value: Any, coeff: float):\n return low_value * (1 - coeff) + high_value * coeff",
"def fwhmpos(halfmax, maxarray, ascending=True):\n\n values = maxarray.values\n positions = np.array(maxarray.index, dtype=np.float32)\n if ascending:\n s = interp1d(values[:values.argmax()],\n positions[:values.argmax()])\n else:\n s = interp1d(values[values.argmax():],\n positions[values.argmax():])\n\n return s(halfmax)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Smooth maximum operator, using smooth_abs operator.
|
def smooth_max(a, b, eps=1e-4):
expr = smooth_minmax(a, b, eps, sense='max')
return expr
|
[
"def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"max\")\n return expr",
"def max_abs(\n self,\n a,\n axis=None,\n keepdims=False,\n mtol=None,\n split_every=None,\n chunk_function=None,\n ):\n return self.max(\n abs(a),\n axis=axis,\n keepdims=keepdims,\n mtol=mtol,\n split_every=split_every,\n )",
"def clip_max(self, max_val: int | float) -> Series:",
"def argrelmax(wls, amps):\n\n return ss.argrelmax(rewrite_equal(wls, amps))[0]",
"def getmax(v):\n max = np.max(v)\n v[v < -max] = -max\n return max",
"def fwhmpos(halfmax, maxarray, ascending=True):\n\n values = maxarray.values\n positions = np.array(maxarray.index, dtype=np.float32)\n if ascending:\n s = interp1d(values[:values.argmax()],\n positions[:values.argmax()])\n else:\n s = interp1d(values[values.argmax():],\n positions[values.argmax():])\n\n return s(halfmax)",
"def normalize_maxabs(feature, feature_scale=None):\n scale = abs(feature_scale) if feature_scale is not None else feature.abs().max()\n t = feature/scale\n return t, scale",
"def smoothness(self):\n from numpy import abs,average,array\n avg=average( abs(array(self.y[1:])-array(self.y[0:-1])) )\n return self.max_deviation_from_linear()/avg",
"def reverse_maxabs(data_scaled, interest_vars, stats_df):\n data_unscaled = np.copy(data_scaled)\n k = 0\n for i in interest_vars:\n coefs = stats_df[\"maxabs\"].iloc[i]\n if len(data_unscaled.shape) > 1:\n data_unscaled[:, k] = coefs * data_unscaled[:, k]\n else:\n data_unscaled = coefs * data_unscaled\n k = k + 1\n return data_unscaled",
"def max_abs_weight(self):\n return max(np.abs(w).max() if w.size > 0 else -np.inf for w in self.weights)",
"def _roll_orig_max_squared(x, window=2000):\n x_rolled = np.lib.stride_tricks.sliding_window_view(x, window, axis=0)\n # https://stackoverflow.com/questions/61703879/in-numpy-how-to-select-elements-based-on-the-maximum-of-their-absolute-values\n shape = np.array(x_rolled.shape)\n shape[-1] = -1\n return np.take_along_axis(x_rolled, np.square(x_rolled).argmax(-1).reshape(shape), axis=-1)",
"def sp_maximum_2D ( fun ,\n xmin , xmax ,\n ymin , ymax , x0 = () , *args ) :\n funmin = lambda x , y , *a : -1.0 * ( float ( fun ( x , y , *a ) ) )\n return sp_minimum_2D ( funmin ,\n xmin , xmax ,\n ymin , ymax , x0 , *args )",
"def maximum_basic(a: float, b: float) -> float:",
"def smooth_minmax(a, b, eps=1e-4, sense='max'):\n # Check type of eps\n if not (isinstance(eps, (float, int, Param))):\n raise TypeError(\"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense))\n\n # Set sense of expression\n if sense == 'max':\n mm = 1\n elif sense == 'min':\n mm = -1\n else:\n raise ValueError(\"Unrecognised sense argument to smooth_minmax. \"\n \"Must be 'min' or 'max'.\")\n\n # Create expression\n try:\n expr = 0.5*(a+b+mm*smooth_abs(a-b, eps))\n except TypeError:\n raise TypeError(\"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\"\n .format(sense))\n\n return expr",
"def cummax(self, reverse: bool = False) -> Series:",
"def smooth_minmax(a, b, eps=1e-4, sense=\"max\"):\n # Check type of eps\n if not isinstance(eps, (float, int, Param)):\n raise TypeError(\n \"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense)\n )\n\n # Set sense of expression\n if sense == \"max\":\n mm = 1\n elif sense == \"min\":\n mm = -1\n else:\n raise ValueError(\n \"Unrecognised sense argument to smooth_minmax. \" \"Must be 'min' or 'max'.\"\n )\n\n # Create expression\n try:\n expr = 0.5 * (a + b + mm * smooth_abs(a - b, eps))\n except TypeError:\n raise TypeError(\n \"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\".format(sense)\n )\n\n return expr",
"def max_abs(self):\n # Find result for tensor stored in memory\n if self.in_mem:\n\n # Find result for Dense tensor\n if self.sym is None:\n maxval = max(self.backend.max(self.ten),self.backend.min(self.ten))\n\n # Find result for symtensor\n else:\n maxval = max(self.backend.max(self.ten.array),self.backend.min(self.ten.array))\n\n # Find result for tensor not in memory\n else:\n maxval = max(self._max_val, self._min_val, key=abs)\n\n # Return result\n return float(maxval)",
"def limit_maximum_flux(model, new_limit):\n\n if new_limit < 0:\n new_limit = new_limit * -1\n\n old_limit = model.maximum_flux\n\n if old_limit > new_limit:\n for rr in model.reactions.values():\n\n if abs(rr.upper_bound) > new_limit:\n sign = 1 if rr.upper_bound >= 0 else -1\n rr.upper_bound = new_limit*sign\n\n if abs(rr.lower_bound) > new_limit:\n sign = 1 if rr.lower_bound >= 0 else -1\n rr.lower_bound = new_limit*sign\n else:\n for rr in model.reactions.values():\n\n if abs(rr.upper_bound) == old_limit:\n sign = 1 if rr.upper_bound >= 0 else -1\n rr.upper_bound = new_limit*sign\n\n if abs(rr.lower_bound) > old_limit:\n sign = 1 if rr.lower_bound >= 0 else -1\n rr.lower_bound = new_limit*sign\n\n model._calc_max_flux()",
"def sp_maximum_3D ( fun ,\n xmin , xmax ,\n ymin , ymax ,\n zmin , zmax , x0 = () , *args ) :\n funmin = lambda x , y , z , *a : -1.0 * ( float ( fun ( x , y , z , *a ) ) )\n return sp_minimum_3D ( funmin ,\n xmin , xmax ,\n ymin , ymax ,\n zmin , zmax , x0 , *args )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Smooth minimum operator, using smooth_abs operator.
|
def smooth_min(a, b, eps=1e-4):
expr = smooth_minmax(a, b, eps, sense='min')
return expr
|
[
"def smooth_min(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"min\")\n return expr",
"def min_abs(\n self,\n a,\n axis=None,\n keepdims=False,\n mtol=None,\n split_every=None,\n chunk_function=None,\n ):\n return self.min(\n abs(a),\n axis=axis,\n keepdims=keepdims,\n mtol=mtol,\n split_every=split_every,\n )",
"def argrelmin(wls, amps):\n\n return ss.argrelmin(rewrite_equal(wls, amps))[0]",
"def smoothstep(min, max, x):\n\n pass",
"def smooth(smooth_atom):\n proximal_atom = zero_cone(smooth_atom.primal_shape)\n return simple_problem(smooth_atom, proximal_atom)",
"def clip_min(self, min_val: int | float) -> Series:",
"def minAbs(self):\n return min(abs(self.x), abs(self.y), abs(self.z), abs(self.w))",
"def _calc_min(self):\n return np.min(self.get_points()) - 1",
"def softclip(tensor, min):\n result_tensor = min + F.softplus(tensor - min)\n \n return result_tensor",
"def minimal_step(X):\n return min(abs(X[i+1] - X[i]) for i in range(len(X)-1))",
"def smooth_minmax(a, b, eps=1e-4, sense='max'):\n # Check type of eps\n if not (isinstance(eps, (float, int, Param))):\n raise TypeError(\"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense))\n\n # Set sense of expression\n if sense == 'max':\n mm = 1\n elif sense == 'min':\n mm = -1\n else:\n raise ValueError(\"Unrecognised sense argument to smooth_minmax. \"\n \"Must be 'min' or 'max'.\")\n\n # Create expression\n try:\n expr = 0.5*(a+b+mm*smooth_abs(a-b, eps))\n except TypeError:\n raise TypeError(\"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\"\n .format(sense))\n\n return expr",
"def smooth_minmax(a, b, eps=1e-4, sense=\"max\"):\n # Check type of eps\n if not isinstance(eps, (float, int, Param)):\n raise TypeError(\n \"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense)\n )\n\n # Set sense of expression\n if sense == \"max\":\n mm = 1\n elif sense == \"min\":\n mm = -1\n else:\n raise ValueError(\n \"Unrecognised sense argument to smooth_minmax. \" \"Must be 'min' or 'max'.\"\n )\n\n # Create expression\n try:\n expr = 0.5 * (a + b + mm * smooth_abs(a - b, eps))\n except TypeError:\n raise TypeError(\n \"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\".format(sense)\n )\n\n return expr",
"def nullmin(fun,x0,args,**kwargs):\n\treturn scipy.optimize.OptimizeResult({'x':x0,'success':True,'fun':fun(x0)})",
"def nonsmooth(proximal_atom):\n smooth_atom = zero_smooth(proximal_atom.primal_shape)\n return simple_problem(smooth_atom, proximal_atom)",
"def test_abs_function(self):\n minimum = np.array([0.0, 0.0, 0.0])\n def abs_func(x):\n return tf.reduce_sum(tf.abs(x), axis=-1)\n\n start = tf.constant([0.6, 1.8, -4.3], dtype=tf.float64)\n results = self.evaluate(\n differential_evolution.minimize(\n abs_func,\n initial_position=start,\n func_tolerance=1e-12,\n max_iterations=200,\n seed=1212))\n self.assertTrue(results.converged)\n self.assertArrayNear(results.position, minimum, 1e-5)",
"def cummin(self, reverse: bool = False) -> Series:",
"def smooth2min(self, repeattimes):\n for i in xrange(repeattimes):\n #pool = Pool(THREADNUM)\n #pool.map(self.smoothrow, xrange(self.rows))\n\t\t\t\n #debug:\n for rowidx in xrange(self.rows):\n self.smoothrow(rowidx)\n self.matrix = np.minimum(self.smoothedmap, self.matrix)\n self.smoothedmap = self.matrix",
"def move_min(a, window, min_count=None, axis=-1): # real signature unknown; restored from __doc__\n pass",
"def smooth_step(a, b, x):\n if x < a: return 0.0\n if x > b: return 1.0\n x = (x - a) / (b - a)\n return x * x * x * (x * (x * 6 - 15) + 10)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the log of max(a, eps) using the smooth_max expression. This can be used to avoid transient evaluation errors when changing a model from one state to another. This can be used when at the solution, a >> eps.
|
def safe_log(a, eps=1e-4):
return log(smooth_max(a, eps, eps=eps))
|
[
"def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense=\"max\")\n return expr",
"def smooth_max(a, b, eps=1e-4):\n expr = smooth_minmax(a, b, eps, sense='max')\n return expr",
"def apply_ada_max(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon, target=utils.CCE):\n\n _check_inputs(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)\n\n out_var, out_m, out_v = _apply_ada_max_compute(var, m, v, grad, lr, beta1, beta1_power, beta2, epsilon)\n\n # reuse var, m and v\n out_var, binds_info = TensorUtils.inplace_set(var, out_var, \"var_buf\")\n out_m, binds_info2 = TensorUtils.inplace_set(m, out_m, \"m_buf\")\n out_v, binds_info3 = TensorUtils.inplace_set(v, out_v, \"v_buf\")\n binds_info.update(binds_info2)\n binds_info.update(binds_info3)\n attrs = {utils.BINDS: binds_info}\n return out_var, out_m, out_v, attrs",
"def smooth_minmax(a, b, eps=1e-4, sense=\"max\"):\n # Check type of eps\n if not isinstance(eps, (float, int, Param)):\n raise TypeError(\n \"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense)\n )\n\n # Set sense of expression\n if sense == \"max\":\n mm = 1\n elif sense == \"min\":\n mm = -1\n else:\n raise ValueError(\n \"Unrecognised sense argument to smooth_minmax. \" \"Must be 'min' or 'max'.\"\n )\n\n # Create expression\n try:\n expr = 0.5 * (a + b + mm * smooth_abs(a - b, eps))\n except TypeError:\n raise TypeError(\n \"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\".format(sense)\n )\n\n return expr",
"def smooth_minmax(a, b, eps=1e-4, sense='max'):\n # Check type of eps\n if not (isinstance(eps, (float, int, Param))):\n raise TypeError(\"Smooth {} eps argument must be a float, int or \"\n \"Pyomo Param\".format(sense))\n\n # Set sense of expression\n if sense == 'max':\n mm = 1\n elif sense == 'min':\n mm = -1\n else:\n raise ValueError(\"Unrecognised sense argument to smooth_minmax. \"\n \"Must be 'min' or 'max'.\")\n\n # Create expression\n try:\n expr = 0.5*(a+b+mm*smooth_abs(a-b, eps))\n except TypeError:\n raise TypeError(\"Unsupported argument type for smooth_{}. Must be \"\n \"a Pyomo Var, Param or Expression, or a float or int.\"\n .format(sense))\n\n return expr",
"def powerflow_max_rule(_m, l):\r\n\r\n if l in m.L_I:\r\n return float(self.data.powerflow_limits[l]['forward'])\r\n else:\r\n # Set arbitrarily loose bound if not an interconnector\r\n return float(1e5)",
"def _maxL_waveform(self, func, *args, **kwargs):\n ind = np.argmax(self[\"log_likelihood\"])\n kwargs[\"ind\"] = ind\n return func(*args, **kwargs)",
"def max_to_sage(expr):\n global op_sage_to_max, op_max_to_sage\n global sym_sage_to_max, sym_max_to_sage\n if expr.consp():\n op_max=caar(expr)\n if op_max in special_max_to_sage:\n return special_max_to_sage[op_max](expr)\n if not(op_max in op_max_to_sage):\n op=sageop.next()\n op_max_to_sage[op_max]=op\n op_sage_to_max[op]=op_max\n op=op_max_to_sage[op_max]\n max_args=cdr(expr)\n args=[]\n while not(max_args.nullp()):\n args.append(max_to_sage(car(max_args)))\n max_args=cdr(max_args)\n return op(*args)\n elif expr.symbolp():\n if not(expr in sym_max_to_sage):\n sym=sagesym.next()\n sym_max_to_sage[expr]=sym\n sym_sage_to_max[sym]=expr\n sym=sym_max_to_sage[expr]\n return sym\n else:\n return expr.python()",
"def find_max_beta(beta_0,tol = 1e-6):\n\n beta = beta_0\n\n I = 1e-4\n R = 0\n S = 1 - I - R\n\n v_0 = np.array([S,I,R])\n \n TN = 180\n dt = 0.05\n \n sir = SIRSolver(0,v_0,TN,dt,beta_0,10)\n _,v = sir()\n\n Imax = np.max(v[:,1])\n Ilim = 0.2\n\n err = np.abs(Imax - Ilim)\n \n while err > tol:\n if Ilim > Imax :\n beta = beta * 2**(err)\n else:\n beta = beta * 2**(-err)\n\n sir = SIRSolver(0,v_0,TN,dt,beta,10)\n _,v = sir()\n\n err = np.abs(np.max(v[:,1]) - Ilim)\n\n return beta, err",
"def clip_max(self, max_val: int | float) -> Series:",
"def max(x):\n\treturn np.max(x)",
"def LogSoftmax(axis=-1):\n return Fn('LogSoftmax', lambda x: log_softmax(x, axis=axis))",
"def max_log2(x):\n d = min(30, -np.log2(np.finfo(float).eps) - np.ceil(np.log2(x)) - 1)\n if (x + 2 ** -d) - x != 2 ** -d:\n raise ValueError('max_log2 failed')\n return d",
"def apply_ada_max_d(var,\n m,\n v,\n beta1_power,\n lr,\n beta1,\n beta2,\n epsilon,\n grad,\n var_out,\n m_out,\n v_out,\n kernel_name='apply_ada_max_d'):\n\n input_dict = (var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad)\n\n args = ApplyOpConfig.TensorArgs(input_dict, apply_ada_max_d_compute,\n [var_out, m_out, v_out], 14)\n name = ApplyOpConfig.TensorName(all=('var', 'm', 'v', 'beta1_power', 'lr',\n 'beta1', 'beta2', 'epsilon', 'grad'),\n scalar=('lr', 'beta1_power', 'beta1',\n 'beta2', 'epsilon'),\n reuse=('m', 'v', 'var'))\n\n common_apply_op_process(ApplyOpConfig(args, name), kernel_name)",
"def get_max_grad(self, params=None):\n if params is None:\n params = self.params\n\n gmax = 0\n for param in params:\n if self._bias:\n assert isinstance(param, list)\n for p in param:\n if p.grad is not None and p.max() > gmax:\n gmax = p.grad.max()\n else:\n if param.grad is not None and param.max() > gmax:\n gmax = param.grad.max()\n return gmax",
"def maximum_basic(a: float, b: float) -> float:",
"def max_gradient_score(example_trace,x_vals, thresh=0.2,offset=0,maxval=6e-10,smoothing = 3):\n\n #Normalise trace between 0 and 1 and remove noise level\n vrange = abs(x_vals[0]-x_vals[-1])\n \n example_trace=example_trace\n example_trace_norm=example_trace.copy()-offset\n example_trace_norm[example_trace_norm<0]=0\n example_trace_norm = (example_trace_norm)/((maxval-offset))\n \n smoothing = np.convolve(example_trace_norm, np.ones((smoothing,))/smoothing, mode='valid')\n smoothgrad = np.gradient(smoothing,(vrange/example_trace.size))\n \n if(len(example_trace_norm[(example_trace_norm)>thresh])>0) and (len(example_trace_norm[(example_trace_norm)<thresh])>0):\n gradmax=np.max(smoothgrad)\n pinchscore=gradmax\n else:\n pinchscore=0\n \n return pinchscore",
"def argrelmax(wls, amps):\n\n return ss.argrelmax(rewrite_equal(wls, amps))[0]",
"def _roll_orig_max_squared(x, window=2000):\n x_rolled = np.lib.stride_tricks.sliding_window_view(x, window, axis=0)\n # https://stackoverflow.com/questions/61703879/in-numpy-how-to-select-elements-based-on-the-maximum-of-their-absolute-values\n shape = np.array(x_rolled.shape)\n shape[-1] = -1\n return np.take_along_axis(x_rolled, np.square(x_rolled).argmax(-1).reshape(shape), axis=-1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the hash portion using base object method, but with no network_id included.
|
def test_users_hash_no_network_id(self):
test_hash = SAMPLE_USER_HASH.copy()
test_hash.pop('network_id')
self.base_test_hash(test_hash)
|
[
"def test_hash_id(self):\n self.assertEqual(hash_id(self.id1, self.id2, self.salt, self.length), \"2Y7W5d\")",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_get_xrp__ripple_block_details_by_block_hash(self):\n pass",
"def get_hash(self, descriptor):",
"def __hash__(self):\n return hash(self.__dn__)",
"def __hash__(self):\n return hash((self.bike.public_key, self.remote))",
"def __check_hash__(self) -> None:\n state = self.__dict__.copy()\n event_hash = state.pop(\"__event_hash__\")\n method_name = state.get(\"__event_hash_method_name__\", \"__hash_object_v1__\")\n hash_method = getattr(self, method_name)\n if event_hash != hash_method(state):\n raise EventHashError()",
"def hash_field(self):\n return None",
"def __hash__(self) -> int:\n return hash((self.__class__, self.address))",
"def hashring(self):\n return",
"def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def __hash__(self):",
"def test_ipv4network_hash(self):\n n = 10**6\n net = ip.IPv4Network('1.2.3.0/24')\n time1, result1 = timefn(n, net.__hash__)\n enet = eip.IPv4Network('1.2.3.0/24')\n time2, result2 = timefn(n, enet.__hash__)\n # results will differ, so don't compare them\n results = (time1, None), (time2, None)\n self.report_4n.report(fn_name(), n, results, net)",
"def __hash__(self):\n return hash(self.piece_identification)",
"def _hash(self, flow):\n r = flow.request\n\n _, _, path, _, query, _ = urlparse.urlparse(r.url)\n queriesArray = urlparse.parse_qsl(query, keep_blank_values=True)\n\n key = [\n str(r.port),\n str(r.scheme),\n str(r.method),\n str(path),\n ]\n\n if not self.ignore_content:\n form_contents = r.urlencoded_form or r.multipart_form\n if self.ignore_payload_params and form_contents:\n key.extend(\n p for p in form_contents\n if p[0] not in self.ignore_payload_params\n )\n else:\n key.append(str(r.content))\n\n if not self.ignore_host:\n key.append(r.host)\n\n filtered = []\n ignore_params = self.ignore_params or []\n for p in queriesArray:\n if p[0] not in ignore_params:\n filtered.append(p)\n for p in filtered:\n key.append(p[0])\n key.append(p[1])\n\n if self.headers:\n headers = []\n for i in self.headers:\n v = r.headers.get(i)\n headers.append((i, v))\n key.append(headers)\n return hashlib.sha256(repr(key)).digest()",
"def __hash__(self):\n # see if there is an available hash value\n # if you are seeing cache bugs this is the thing\n # to try eliminating because it is very likely that\n # someone somewhere is modifying the data without\n # setting `self._hash = None`\n hashed = getattr(self, '_hash', None)\n if hashed is not None:\n return hashed\n\n hashed = hash_fast(\n (''.join(str(hash(k)) + v.get('geometry', '')\n for k, v in self.edge_data.items()) +\n ''.join(str(k) + v.get('geometry', '')\n for k, v in self.node_data.items())).encode('utf-8') +\n b''.join(v['matrix'].tobytes()\n for v in self.edge_data.values()\n if 'matrix' in v))\n self._hash = hashed\n return hashed",
"def test__AutoModerationActionMetadataBase__hash():\n metadata = AutoModerationActionMetadataBase()\n \n vampytest.assert_instance(hash(metadata), int)",
"def test__GuildProfile__hash():\n avatar = Icon(IconType.static, 12)\n boosts_since = DateTime(2016, 5, 14)\n flags = GuildProfileFlag(3)\n joined_at = DateTime(2016, 5, 15)\n nick = 'Ayumi'\n pending = False\n role_ids = [2022100009, 2022100010]\n timed_out_until = DateTime(2016, 5, 20)\n \n \n guild_profile = GuildProfile(\n avatar = avatar,\n boosts_since = boosts_since,\n flags = flags,\n joined_at = joined_at,\n nick = nick,\n pending = pending,\n role_ids = role_ids,\n timed_out_until = timed_out_until,\n )\n \n vampytest.assert_instance(hash(guild_profile), int)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a new Vocab with extra tokens prepended.
|
def add_extra_vocab(self, extra_vocab):
extra_tok_counts = [(w, float('inf')) for w in extra_vocab]
return Vocab(extra_tok_counts + self.tok_counts,
unk_tok=self.unk_tok)
|
[
"def get_vocab(self) -> torchtext.vocab.Vocab:\n if self.vocab is not None:\n return self.vocab\n else:\n tok_to_idx = list(self.vectorizer.vocabulary_.items())\n tok_to_idx.sort(key = lambda x: x[1])\n ordered_vocab = [ (k,1) for (k,_) in tok_to_idx ]\n if self.additional_feature_keys:\n if isinstance(self.additional_feature_keys, list):\n for f in self.additional_feature_keys:\n ordered_vocab.append((f,1))\n else:\n ## assume it's a dictionary\n for k in self.additional_feature_keys:\n for v in self.additional_feature_keys[k]:\n ordered_vocab.append((k+':'+v, 1))\n cv_vocab = OrderedDict(ordered_vocab) \n vb = build_vocab(cv_vocab)\n self.vocab = vb\n return vb",
"def _create_vocab():\n # Create vocabulary dictionary\n vocab_dict = {}\n\n # Blank token\n idx = 0\n vocab_dict['-'] = idx\n\n # 0-9\n for i in range(ord('9') - ord('0') + 1):\n idx += 1\n vocab_dict[chr(ord('0') + i)] = idx\n\n # a-z\n for i in range(ord('z') - ord('a') + 1):\n idx += 1\n vocab_dict[chr(ord('a') + i)] = idx\n\n # Create vocabulary object\n vocab = Vocabulary(vocab_dict)\n\n return vocab",
"def createVocab(self, entry):\n uri = \"/vocab/\" + self.username + \"/\"\n return self.Post(entry, uri= uri)",
"def augment_vocab_with_kmer(cls, values): # pylint: disable=no-self-argument,no-self-use\n vocab = values[\"vocab\"]\n alphabet = values[\"alphabet\"]\n kmer_length = values[\"kmer_length\"]\n\n values[\"vocab\"] = _create_kmer_vocab_from_token(vocab, alphabet, kmer_length)\n return values",
"def get_fake_vocab():\n\n @dataclasses.dataclass\n class DummyVocab:\n vocab_size: int = 128\n eos_id: int = 1\n\n vocab = DummyVocab()\n return (vocab, vocab)",
"def __init__(self, vocab):\n self.vocab = vocab",
"def _add_vocab_xml(self, elem):\n self.__vocabs.append(ControlledVocab(elem))",
"def extend_vocab(self, path):\n for words, _, _ in Tokenizer().generate_samples(path):\n for word in words:\n if word not in self.vocab_input:\n self.vocab_input[word] = self.ru_model.get_word_vector(word)",
"def make_vocab(word_list):\n vocab = {}\n for i, word in enumerate(word_list):\n vocab[word] = Vocab(count=1, index=i)\n return vocab",
"def _create_vocab(self, data: List[str], **vocab_kwargs) -> vocab.Vocab:\n\n assert isinstance(data, list), f\"data should be a list, got {type(data)}\"\n\n self.vocab = vocab.build_vocab_from_iterator(data, **vocab_kwargs)\n\n return self.vocab",
"def getVocabInstance(self, name):\n uri = \"/vocab/\" + self.username + \"/\" + name + \"/\"\n return self.Get(uri = uri)",
"def build_vocab(self, *args, **kwargs):\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in\n arg.fields.items() if field is self]\n else:\n sources.append(arg)\n for data in sources:\n for x in data:\n if not self.sequential:\n x = [x]\n try:\n counter.update(x)\n except TypeError:\n counter.update(chain.from_iterable(x))\n specials = list(OrderedDict.fromkeys(\n tok for tok in [self.pad_token, self.unk_token, self.init_token,\n self.eos_token] + kwargs.pop('specials', [])\n if tok is not None))\n self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)",
"def _custom_tokenizer(self, text):\n normalized_string = self._pre_tokenizer.pre_tokenize_str(text)\n words = [string[0] for string in normalized_string]\n offsets = [string[1] for string in normalized_string]\n spaces = []\n for i in range(len(words)):\n if i == len(words) - 1:\n spaces.append(False)\n break\n spaces.append(True if offsets[i][1] != offsets[i+1][0] else False)\n # default is None\n spaces = None if not spaces else spaces\n return Doc(self.spacy_tokenizer.vocab, words=words, spaces=spaces)",
"def getVocabList(self, query = None):\n uri = \"/vocab/\" + self.username + \"/\"\n if query:\n query.feed = uri\n uri = query.ToUri()\n return self.Get(uri = uri)",
"def create_load_vocab(arg,\n file_name,\n out_file_name,\n pad=True,\n unk=True,\n sos_eos=False):\n\n full_path = os.path.join('./top_data', arg.train_data_path, file_name)\n output_path = os.path.join(arg.vocab_path, out_file_name)\n\n create_vocabulary(full_path, output_path, pad, unk, sos_eos)\n vocab = load_vocabulary(output_path)\n\n return vocab",
"def create_vocabulary(self, sentence):\n for term in sentence:\n if term not in self.vocabulary:\n self.vocabulary = np.append(self.vocabulary, term)",
"def EmbedVocabInMetaTable(\n session: sqlutil.Session, vocabulary: typing.Dict[str, int]\n):\n q = session.query(encoded.Meta).filter(encoded.Meta.key.like(\"vocab_%\"))\n q.delete(synchronize_session=False)\n\n session.add(encoded.Meta(key=\"vocab_size\", value=str(len(vocabulary))))\n session.add_all(\n [encoded.Meta(key=f\"vocab_{v}\", value=k) for k, v in vocabulary.items()]\n )",
"def build_vocabulary(instances: List[Dict],\n vocab_size: 10000,\n add_tokens: List[str] = None) -> Tuple[Dict, Dict]:\n print(\"\\nBuilding Vocabulary.\")\n\n # make sure pad_token is on index 0\n UNK_TOKEN = \"@UNK@\"\n PAD_TOKEN = \"@PAD@\"\n token_to_id = {PAD_TOKEN: 0, UNK_TOKEN: 1}\n\n # First add tokens which were explicitly passed.\n add_tokens = add_tokens or []\n for token in add_tokens:\n if not token.lower() in token_to_id:\n token_to_id[token] = len(token_to_id)\n\n # Add remaining tokens from the instances as the space permits\n words = []\n for instance in instances:\n words.extend(instance[\"text_tokens\"])\n token_counts = dict(Counter(words).most_common(vocab_size))\n for token, _ in token_counts.items():\n if token not in token_to_id:\n token_to_id[token] = len(token_to_id)\n if len(token_to_id) == vocab_size:\n break\n # Make reverse vocabulary lookup\n id_to_token = dict(zip(token_to_id.values(), token_to_id.keys()))\n return (token_to_id, id_to_token)",
"def add_special_tokens_(model, tokenizer):\n orig_num_tokens = tokenizer.vocab_size\n num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there\n if num_added_tokens > 0:\n model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)",
"def add_reserved_vocab(embed_path, vocab_path, reserved_path, embed_size):\n # Get reserved words.\n print(\"Getting total reserved words...\")\n reversed_words = []\n with open(reserved_path, \"r\", encoding=\"utf-8\") as fr:\n for index, line in enumerate(fr):\n line = line.strip()\n if line != \"\": reversed_words.append(line)\n \n # Add reserved vocab to the vacab file.\n print(\"Building new vocab file...\")\n reversed_words_str = \"\\n\".join(reversed_words) + \"\\n\"\n with open(vocab_path, \"r\", encoding=\"utf-8\") as fv:\n reversed_words_str += fv.read()\n with open(vocab_path, \"w\", encoding=\"utf-8\") as fv:\n fv.write(reversed_words_str)\n\n # Add reserved vecters to the embed file.\n idx_embed = []\n idx_embed.extend([[0.0] * embed_size for _ in range(len(reversed_words))])\n print(\"Getting total Glove vectors...\")\n with open(embed_path, \"r\", encoding=\"utf-8\") as fe:\n for index, line in enumerate(fe):\n line = line.strip().split()\n word, vector = line[0], line[1:]\n vector = [float(item) for item in vector]\n assert len(vector) == embed_size\n idx_embed.append(vector)\n # For <unk> -> [UNK]\n idx_embed[1] = idx_embed[-1]\n idx_embed = idx_embed[0:-1]\n print(\"Building new embed file...\")\n np.savetxt(embed_path, idx_embed)\n print(\"Add successfully!\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a new Vocab containing the top `size` tokens.
|
def truncate(self, size):
return Vocab(self.tok_counts[:size], unk_tok=self.unk_tok)
|
[
"def limitVocab(self, max_size):\n if self.VOCAB_SIZE <= max_size:\n print(f'Current vocab size is {self.VOCAB_SIZE}, no need to decrease size')\n return\n# self.word2index = {}\n# # self.word2count = {}\n# self.index2word = {}\n self.VOCAB_SIZE = max_size\n \n# self.SOS = '<s>'\n# self.EOS = '</s>'\n# self.UNK = '<unk>'\n# self.iSOS = 0\n# self.iEOS = 1\n# self.iUNK = 2\n \n c = Counter(self.word2count)\n m = c.most_common(1)[0][1]\n c[self.PAD] = m + 4\n c[self.SOS] = m + 3\n c[self.EOS] = m + 2\n c[self.UNK] = m + 1\n \n list_of_wc = c.most_common(max_size)\n self.index2word = {i:w for i, (w, _) in enumerate(list_of_wc)}\n self.word2index = {w:i for i, (w, _) in enumerate(list_of_wc)}",
"def get_top_verbs_in_path(path, top_size=10):\n all_words = get_all_words_in_path(path)\n verbs = flat([get_verbs_from_name(word) for word in all_words])\n return collections.Counter(verbs).most_common(top_size)",
"def vocab_size(self) -> int:",
"def print_top_s(filename):\n word_count = words_count(filename)\n\n # Each item is a (word, count) tuple.\n # Sort them so the big counts are first using key=get_count() to extract count.\n items = sorted(word_count.items(), key= lambda w : w[1], reverse=True)\n\n # Print the first 20\n for item in items[:20]:\n print (item[0], item[1])",
"def get_top_tags(tags):\n tag_count = Counter(tags)\n return tag_count.most_common(10)",
"def _get_bm_top(self, query: List[str]) -> List[List[str]]:\n # sort titles according to score and return indices\n scores = [(score, title) for score, title in zip(self.bm25.get_scores(query), self.corpus)]\n scores = sorted(scores, key=itemgetter(0), reverse=True)\n\n # Return top 2048 for evaluation purpose, cut to half for recommendations to prevent memory errors\n if self.eval:\n try:\n return [title for score, title in scores][:256]\n except IndexError:\n return [title for score, title in scores]\n else:\n try:\n return [title for score, title in scores if score > 0][:1028]\n except IndexError:\n return [title for score, title in scores if score > 0]",
"def make_vocabulary(input_file, max_vocab_size, output_file):\n\n # count all the tokens\n freq_dict = {}\n with open(input_file, 'r') as fin:\n line = fin.readline()\n while line:\n line = line.rstrip()\n tokens = line.split()\n for token in tokens:\n if token in freq_dict:\n freq_dict[token] = freq_dict[token] + 1\n else:\n freq_dict[token] = 1\n # end token-in-ifelse\n # end token-for\n line = fin.readline()\n # end line-while\n # end fin-with\n\n # sort by frequency. write to a text file\n numElement = 0\n vocab_dict = {}\n with open(output_file, \"w\") as fout:\n for k, v in sorted(freq_dict.items(), key=lambda x: -x[1]):\n fout.write(str(k) + \"\\t\" + str(v) + \"\\n\")\n vocab_dict[k] = numElement\n\n numElement = numElement + 1\n\n if numElement >= max_vocab_size:\n break\n # end if\n # end sort-for\n\n # add special tokens\n fout.write('<BOS>\" + \"\\t\" + \"0\" + \\n')\n fout.write('<EOS>\" + \"\\t\" + \"0\" + \\n')\n fout.write('<UNK>\" + \"\\t\" + \"0\" + \\n')\n\n vocab_dict[\"<BOS>\"] = numElement\n vocab_dict[\"<EOS>\"] = numElement + 1\n vocab_dict[\"<UNK>\"] = numElement + 2\n\n print(output_file + \" created, vocabulary size=\" + str(numElement+2))\n\n # end opne-with\n\n return vocab_dict",
"def top_word(word_count):\n\n top_words = []\n last_count = 0\n for each in word_count:\n if len(each.split()) < 2:\n # we compress spaces in our word count but sometimes they still\n # show up as a count, we won't list them as a top word though.\n continue\n word = each.split()[1]\n count = int(each.split()[0])\n if count >= last_count:\n top_words.append(word)\n last_count = count\n else:\n break\n return top_words, last_count",
"def top_ten(self):\n return heapq.nlargest(10, self.read_by_visitor_dict, key=self.read_by_visitor_dict.get)",
"def build_vocab(self, words, vocab_size):\n count = [(\"UNK\", 0)]\n most_frequent_words = Counter(words).most_common(vocab_size - 1)\n count.extend(most_frequent_words)\n word2index = {}\n index = 0\n\n if self.write_vocab:\n path = os.path.dirname(__file__)\n path = os.path.join(path, 'vocab_1000.tsv')\n f = open(path, \"w\")\n\n for word, _ in count:\n word2index[word] = index\n\n if index < 1000 and self.write_vocab:\n f.write(word + \"\\n\")\n\n index += 1\n\n if self.write_vocab:\n f.close()\n\n index2word = dict(zip(word2index.values(), word2index.keys()))\n return count, word2index, index2word",
"def top_10_words(hist, num = 10):\n t = most_common(hist)\n for freq, word in t[:num]:\n print(word,\"\\t\", freq)",
"def top_k_word2vec(self,word2vec_file_name,top_k_words,word2vec_dimension,new_file_name):\n #word2vec = pd.read_csv(\"../../temp_results/a.txt\",sep=' ', header=None, skiprows=range(1))\n model = models.KeyedVectors.load_word2vec_format(word2vec_file_name, binary=False)\n filtered_vectors = model[top_k_words]\n word2vec_frame = pd.DataFrame({'name':top_k_words})\n for i in range(word2vec_dimension):\n word2vec_frame[i] = filtered_vectors[:,i]\n word2vec_frame.to_csv(new_file_name,sep=\" \",encoding='utf-8',index=False)",
"def get_top_five():\n\n # this is simply a placeholder until I create the logic to query top movies based on num reviews and star ratings...\n t1 = Movie.objects.get(name__icontains='out of the past')\n t2 = Movie.objects.get(name__icontains='double indem')\n t3 = Movie.objects.get(name__icontains='big sleep')\n t4 = Movie.objects.get(name__icontains='scarlet street')\n t5 = Movie.objects.get(name__icontains='maltese falcon')\n\n top_five = [t1, t2, t3, t4, t5]\n\n return top_five",
"def _vocab_size_with_padding(orig_vocab_size, args):\n\n after = orig_vocab_size\n multiple = args.make_vocab_size_divisible_by * \\\n args.tensor_model_parallel_size\n while (after % multiple) != 0:\n after += 1\n if args.rank == 0:\n print(' > padded vocab (size: {}) with {} dummy tokens '\n '(new size: {})'.format(\n orig_vocab_size, after - orig_vocab_size, after), flush=True)\n return after",
"def top10_bagofwords(data, output_name, title):\n bagofwords = CountVectorizer()\n # Output will be a sparse matrix\n inbound = bagofwords.fit_transform(data)\n # Inspecting of often contractions and colloquial language is used\n word_counts = np.array(np.sum(inbound, axis=0)).reshape((-1,))\n words = np.array(bagofwords.get_feature_names())\n words_df = pd.DataFrame({\"word\": words, \"count\": word_counts})\n words_rank = words_df.sort_values(by=\"count\", ascending=False)\n wordranks[output_name] = words_rank\n # words_rank.to_csv('words_rank.csv') # Storing it in a csv so I can inspect and go through it myself\n # Visualizing top 10 words\n plt.figure(figsize=(12, 6))\n sns.barplot(\n words_rank[\"word\"][:10],\n words_rank[\"count\"][:10].astype(str),\n palette=\"inferno\",\n )\n plt.title(title)\n\n # Saving\n # plt.savefig(f'visualizations/next_ver/{output_name}.png')\n st.pyplot()",
"def getTopWords(self):\n\n # First, preprocess the article text\n text = self.article_text\n text = self.preprocessor.changeToLower(text)\n text = self.preprocessor.replaceNewline(text, ' ')\n text = self.preprocessor.removeStopWords(text)\n text = self.preprocessor.stripAccents(text)\n text = self.preprocessor.removeSpecialChars(text)\n words = self.preprocessor.tokenizeWords(text)\n preprocessed_text = self.preprocessor.useOriginalWords(words)\n\n # Then, vectorize, and get the top 20 words (word frequency)\n vectorizer = CountVectorizer(ngram_range=(1,2))\n vectors = vectorizer.fit_transform([preprocessed_text])\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n df = pd.DataFrame(denselist, columns=feature_names)\n top_words = df.iloc[[0]].sum(axis=0).sort_values(ascending=False)\n return top_words[0:20]",
"def get_top_words(data_list, n_top_words=160):\n top_words = []\n \n d = Counter(concatenate_all_text(data_list))\n d_sorted = sorted(d.items(), key=itemgetter(1), reverse=True)\n \n assert len(d_sorted) >= n_top_words, 'Too many top words'\n \n for i in range(n_top_words):\n top_words.append(d_sorted[i][0])\n \n return top_words",
"def new_text_top_words(new_text, corpus_word_counts):\n newtext_scores = new_text_word_score(new_text, corpus_word_counts)\n sorted_list = sorted(newtext_scores.items(), key=lambda x: x[1], reverse=True)\n \n #now remove the scores\n top_words_list = [item[0] for item in sorted_list]\n return list_to_string(top_words_list[:10])",
"def fetch_top_k_words(k):\n\treturn redis_wcloud_cli.zrange(WORD_CLOUD_SET,0,k,desc=True,withscores=True)",
"def GetTopKeywords(self, num_keywords):\n pairs = [(key,val) for key,val in self.all_word_freq_dict.iteritems()]\n pairs = sorted(pairs, cmp=self.decrease_sort)\n\n #return keywords that aren't too common; those that aren't stop words\n return filter(lambda word: word not in Profile.stop_words, map(lambda (k,v): k, pairs))[:num_keywords]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a password hash.
|
def generate_password_hash(self, password):
hash = generate_password_hash(password)
return hash
|
[
"def gen_hash_password(password):\n import random\n letters = 'abcdefghijklmnopqrstuvwxyz0123456789'\n p = ''\n random.seed()\n for x in range(32):\n p += letters[random.randint(0, len(letters)-1)]\n return p",
"def hash_password(self, password):\n password = hashpw(password.encode('utf-8'), gensalt()).decode('utf-8')\n return password",
"def gen_hash(name, password):\n if name in __GEN_HASH_METHODS:\n return __GEN_HASH_METHODS[name](password)\n else:\n raise ValueError(\n \"Hash-method '{}' does not exists\"\n .format(name))",
"def generate_password():\n return Credential.generate_password()",
"def lm_password_hash(password):\n ucase_password=password.upper()[:14]\n while len(ucase_password)<14:\n\tucase_password+=\"\\0\"\n password_hash=des_hash(ucase_password[:7])\n password_hash+=des_hash(ucase_password[7:])\n return password_hash",
"def create_hash(password, salt=uuid.uuid4().hex):\n\n hash_pass = hashlib.sha512((password + salt).encode('utf-8')).hexdigest()\n\n return hash_pass, salt",
"def hash_password(self, password):\n self.password_hash = pswd_context.hash(password)",
"def make_pwhash(algo, password, iterations):\n salt = binascii.hexlify(os.urandom(16))\n hsh = pbkdf2_hmac(algo, password.encode(), salt, iterations)\n hsh = binascii.hexlify(hsh)\n hsh = \"%s$%s$%s\" % (algo, salt.decode(), hsh.decode())\n return hsh",
"def hash_password(self):\n self.password = get_password_hash(self.password)",
"def test_hashpw(self):\n salt = '$2a$04$e9aKlXB7x0Uacbi7tRyKA.'\n should_be = ('$2a$04$e9aKlXB7x0Uacbi7tRyKA.ZE1qLWlAKZiMV9P2q8bI.'\n 'azUYj2EcFS')\n\n hashed = bcrypt.hashpw('abc', salt)\n\n eq_(hashed, should_be)",
"def hash_nt_password_hash(password_hash):\n md4_context = md4.new()\n md4_context.update(password_hash)\n\n res = md4_context.digest()\n return res",
"def gen_pass():\n length = random.randint(7, 9)*5\n alphabet = string.ascii_letters + string.digits\n password = ''.join(secrets.choice(alphabet) for i in range(length))\n return password",
"def hash_password(raw_password):\n return bcrypt.hashpw(raw_password, bcrypt.gensalt())",
"def gen_pass(self):\n\n length = int(self.mainwindow_gui.length_slider.value())\n password = \"\"\n\n if (self.mainwindow_gui.include_numbers.isChecked()):\n password = functions.generate_password(length=length, include_numbers=True)\n else:\n password = functions.generate_password(length=length, include_numbers=False)\n\n self.update_status(\"status\", \"Password Generated\")\n self.mainwindow_gui.output_edit.setText(password)",
"def generate_password():\n return \"\".join(\n [\n secrets.choice(\n string.ascii_letters +\n string.digits +\n string.punctuation\n )\n for _ in range(32)\n ]\n )",
"def make_password(password, salt=None, hasher='default'):\n if password is None:\n return UNUSABLE_PASSWORD_PREFIX + get_random_string(\n UNUSABLE_PASSWORD_SUFFIX_LENGTH)\n hasher = Argon2PasswordHasher()\n salt = salt or hasher.salt()\n return hasher.encode(password, salt)",
"def make_password(password, salt=None, hasher='default'):\n if password is None:\n return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)\n \n hasher = get_hasher()\n\n if not salt:\n salt = hasher.salt()\n\n return hasher.encode(password, salt)",
"def gen_hexdigest(raw_password, algorithm=BCRYPT, salt=None):\n if raw_password is None:\n raise ValueError('No empty passwords, fool')\n if algorithm == BCRYPT:\n # bcrypt has a special salt\n if salt is None:\n salt = bcrypt.gensalt()\n return (algorithm, salt, bcrypt.hashpw(raw_password, salt))\n raise ValueError('Unknown password algorithm')",
"def hash_password(password):\n password = password.encode('utf-8')\n salt = app.config['SECRET_KEY']\n return hashlib.md5(salt + password).hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a runnable test suite from given datasources and settings.
|
def TestSuite(datasources, settings):
datasources = [utils.abspath(path) for path in datasources]
suite = _get_suite(datasources, settings['SuiteNames'], settings['WarnOnSkipped'])
suite.set_options(settings)
_check_suite_contains_tests(suite, settings['RunEmptySuite'])
return suite
|
[
"def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(FRAMConnection))\n suite.addTest(unittest.makeSuite(FRAMActions))\n return suite",
"def _create_test_suite(test_cases: [unittest.TestCase]) -> unittest.TestSuite:\n suite = unittest.TestSuite()\n\n # Add each test case to suite\n for case in test_cases:\n # Load tests of current test case\n case_tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)\n # Add tests to suite\n suite.addTest(case_tests)\n\n return suite",
"def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UserInputOneTests))\n suite.addTest(unittest.makeSuite(UserInputTwoTests))\n suite.addTest(unittest.makeSuite(UserInputThreeTests))\n return suite",
"def make(name, tests, environment=None, default=True):\n suite = _TestSuite(name, tests, environment, default)\n testsuites.append(suite)\n return suite",
"def create_suite(self, test_types, with_crypto=False,\n run_known_fails=False):\n suite = unittest.TestSuite()\n\n for _type in test_types:\n tests = self._detect_tests(_type.test_dir)\n # Create test cases for a specific type.\n for test_name in tests:\n suite.addTest(_type(test_name, with_crypto=with_crypto,\n run_known_fails=run_known_fails))\n\n return suite",
"def suite():\n print(\"Inside suite()...\")\n\n # Create a test suite by collecting all test cases defined\n # in MyUnitTestA. By default it only looks for methods starting\n # with test*\n suite_a = unittest.makeSuite(MyUnitTestA)\n\n # Similarly, create suite_b using testcases from MyUnitTestB\n suite_b = unittest.makeSuite(MyUnitTestB)\n\n # Add a new testcase to suite_b.\n suite_b.addTest(MyUnitTestB(\"not_called_by_default\"))\n\n # Return a composite test suite containing suite_a and suite_b\n return unittest.TestSuite((suite_a, suite_b))",
"def makeSuite():\n result = unittest.TestSuite()\n test_modules = pysupport.getPackageModules(__file__)\n # Sort test modules names by case insensitive compare to get nicer output\n caseInsensitiveCompare = lambda a, b: cmp(a.lower(), b.lower())\n test_modules.sort(caseInsensitiveCompare)\n\n for mod_name in test_modules:\n if not mod_name.startswith('test_'): continue\n\n # Import module\n module = __import__(__name__ + '.' + mod_name, \n \tglobals(), locals(), ['__file__'])\n # Collect TestCase and create suite for each one\n test_cases = [unittest.makeSuite(obj, 'test') \n for name, obj in module.__dict__.items()\n if name.endswith('TestCase')]\n \n # Add tests suites\n if test_cases:\n suite = unittest.TestSuite(test_cases)\n result.addTest(suite)\n\n return result",
"def run_tests(self):\n settings.configure(\n DEBUG = True,\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n },\n INSTALLED_APPS = self.INSTALLED_APPS + self.apps,\n )\n from django.test.simple import DjangoTestSuiteRunner\n failures = DjangoTestSuiteRunner().run_tests(self.apps, verbosity=1)\n if failures:\n sys.exit(failures)",
"def suite():\n suite = unittest.TestSuite()\n for filename in glob.glob('test_*.py'):\n module = __import__(os.path.splitext(filename)[0])\n suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(module))\n return suite",
"def makeTestSuite (c,p):\n\n h = p.headString()\n script = g.getScript(c,p).strip()\n if not script:\n print \"no script in %s\" % h\n return None\n\n try:\n exec script + '\\n' in {}\n suite = g.app.scriptDict.get(\"suite\")\n if not suite:\n print \"%s script did not set g.app.scriptDict\" % h\n return suite\n except:\n g.es_exception()\n return None",
"def suite():\n suite = unittest.TestSuite()\n suite.addTest(EnrollmentAccessTest.suite());\n return suite",
"def run_selected_tests():\n tests = ['test_something']\n suite = unittest.TestSuite(list(map(MyTestCase, tests)))\n return suite",
"def makeSuite(testCaseClass, prefix='test'):\r\n testFnNames = filter(lambda n,p=prefix: n[:len(p)] == p, \\\r\n dir(testCaseClass))\r\n cases = map(testCaseClass, testFnNames)\r\n return TestSuite(cases)",
"def build_suites_list(lang, include, exclude, application):\n defined_suites = {\n 'app_identity': app_identity_tests.suite(lang, application),\n 'blobstore' : blobstore_tests.suite(lang, application),\n 'channel': channel_tests.suite(lang, application),\n 'datastore' : datastore_tests.suite(lang, application),\n 'async_datastore' : async_datastore_tests.suite(lang, application),\n 'env_var' : environment_variable_tests.suite(lang, application),\n 'images' : images_tests.suite(lang, application),\n 'memcache' : memcache_tests.suite(lang, application),\n 'ndb' : ndb_tests.suite(lang, application),\n 'secure_url' : secure_url_tests.suite(lang, application),\n 'taskqueue' : taskqueue_tests.suite(lang, application),\n 'urlfetch': urlfetch_tests.suite(lang, application),\n 'users' : user_tests.suite(lang, application),\n 'xmpp' : xmpp_tests.suite(lang, application),\n 'cron' : cron_tests.suite(lang, application),\n 'logservice': logservice_tests.suite(lang, application),\n 'modules' : modules_tests.suite(lang, application),\n 'runtime': runtime_tests.suite(lang, application),\n 'search': search_tests.suite(lang, application),\n 'sessions': sessions_tests.suite(lang, application)\n }\n # Validation include and exclude lists\n for suite_name in include + exclude:\n if suite_name not in defined_suites:\n print_usage_and_exit(\"Unknown suite '{}'. Suite can be one of {}\"\n .format(suite_name, defined_suites.keys()))\n\n if include:\n suites = [suite for suite_name, suite in defined_suites.iteritems()\n if suite_name in include]\n if 'warmup' in include and 'warmup' not in exclude:\n warmup = warmup_tests.suite(lang, application)\n suites.insert(0, warmup)\n else:\n suites = [suite for suite_name, suite in defined_suites.iteritems()\n if suite_name not in exclude]\n if 'warmup' not in exclude:\n warmup = warmup_tests.suite(lang, application)\n suites.insert(0, warmup)\n if not suites:\n print_usage_and_exit('Must specify at least one suite to execute')\n return suites",
"def build_test_cases(name_prefix, test_mixins, browsers, resolutions, module_name, use_splinter=False):\n test_cases = []\n for browser in browsers:\n for resolution in resolutions:\n base_classes = [AcceptanceTestCase]\n test_parents = tuple(test_mixins + base_classes)\n test_case_name = ' '.join([browser, resolution, 'Test Case'])\n test_case_name = name_prefix + convert_to_camelcase(test_case_name)\n class_attrs = {\n 'browser_name': browser,\n 'resolution': RESOLUTIONS[resolution],\n }\n if use_splinter:\n class_attrs['use_splinter'] = True\n test_case = type(test_case_name, test_parents, class_attrs)\n test_cases.append(test_case)\n # Skip non-major test cases if run_full_suite() evaluates to false\n browser_resolution_is_major = browser == 'chrome' and resolution == 'desktop'\n if not browser_resolution_is_major:\n test_case = unittest.skipUnless(*run_full_suite())(test_case)\n module = sys.modules[module_name]\n for test_case in test_cases:\n setattr(module, test_case.__name__, test_case)",
"def __init__(self, tests=(), indexer=count(),\n base_work_dir=ROTEST_WORK_DIR, save_state=True, config=None,\n parent=None, run_data=None, enable_debug=False,\n skip_init=False, resource_manager=None):\n super(TestSuite, self).__init__()\n\n self.parent = parent\n name = self.get_name()\n self.identifier = next(indexer)\n self.resource_manager = resource_manager\n self.parents_count = self._get_parents_count()\n self.config = config\n\n if parent is not None:\n parent.addTest(self)\n\n core_log.debug(\"Initializing %r test-suite\", name)\n if len(self.components) == 0 and len(tests) == 0:\n raise AttributeError(\"%s: Components tuple can't be empty\" % name)\n\n core_log.debug(\"Creating database entry for %r test-suite\", name)\n self.work_dir = get_work_dir(base_work_dir, name, self)\n self.data = SuiteData(name=name, run_data=run_data)\n\n for test_component in chain(self.components, tests):\n\n if issubclass(test_component, TestCase):\n for method_name in test_component.load_test_method_names():\n test_item = test_component(parent=self,\n config=config,\n indexer=indexer,\n run_data=run_data,\n skip_init=skip_init,\n save_state=save_state,\n methodName=method_name,\n enable_debug=enable_debug,\n base_work_dir=self.work_dir,\n resource_manager=resource_manager)\n\n core_log.debug(\"Adding %r to %r\", test_item, self.data)\n\n elif issubclass(test_component, TestFlow):\n test_item = test_component(parent=self,\n config=config,\n indexer=indexer,\n run_data=run_data,\n skip_init=skip_init,\n save_state=save_state,\n enable_debug=enable_debug,\n base_work_dir=self.work_dir,\n resource_manager=resource_manager)\n\n core_log.debug(\"Adding %r to %r\", test_item, self.data)\n\n elif issubclass(test_component, TestSuite):\n test_item = test_component(parent=self,\n config=config,\n indexer=indexer,\n run_data=run_data,\n skip_init=skip_init,\n save_state=save_state,\n enable_debug=enable_debug,\n base_work_dir=self.work_dir,\n resource_manager=resource_manager)\n\n core_log.debug(\"Adding %r to %r\", test_item, self.data)\n\n else:\n raise TypeError(\"Components under TestSuite must be classes \"\n \"inheriting from TestCase or TestSuite, \"\n \"got %r\" % test_component)\n\n core_log.debug(\"Initialized %r test-suite successfully\", self.data)",
"def gen_pytest_xmls(args):\n if args.testcases and args.testsuites:\n return\n\n if not args.testrun_id:\n raise TestcasesException('The testrun id was not specified')\n gen_xmls.run_pytest(args.testrun_id)",
"def execute_random_suite(project):\n test_name = TestUtils.random_string()\n tests = [test_name]\n for t in tests:\n TestUtils.create_test(project, name=t)\n suite_name = TestUtils.random_string()\n TestUtils.create_suite(project, name=suite_name, tests=tests)\n execution = TestUtils.execute_suite(project, suite_name)\n execution['tests'] = tests\n return execution",
"def build_tests(self):\n from django.core.exceptions import ImproperlyConfigured\n from django.test.simple import build_suite, build_test\n try:\n from django.apps import apps\n get_app = apps.get_app_config\n except ImportError:\n from django.db.models import get_app\n tests = []\n packages = [self.options['label'], ] if \\\n self.options['label'] else self.packages\n for package in packages:\n try:\n if not self.options['autoreload']:\n if self.options['label']:\n try:\n tests.append(build_test(package))\n except (ImproperlyConfigured, ValueError) as e:\n self.handle_label_exception(e)\n else:\n app = get_app(package)\n tests.append(build_suite(app))\n else:\n # Wait for exceptions to be resolved.\n exception = None\n while True:\n try:\n if self.options['label']:\n try:\n tests.append(build_test(package))\n except (ImproperlyConfigured, ValueError) as e:\n self.handle_label_exception(e)\n else:\n app = get_app(package)\n tests.append(build_suite(app))\n break\n except LabelException:\n raise\n except Exception as e:\n if exception != str(e):\n traceback.print_exc()\n exception = str(e)\n time.sleep(1)\n except ImproperlyConfigured as e:\n log.info(\"Warning: %s\" % traceback.format_exc())\n except ImportError as e:\n log.info(\"Warning: %s\" % traceback.format_exc())\n\n return tests"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that if you create a card with rank 12, its rank_name will be "Queen"
|
def test_1_queen(self):
card = cards.Card(0, 12)
self.assertEqual(card.rank_name, "Queen")
|
[
"def testRankNames(self):\n RN = ['Two', 'Three', 'Four', 'Five', 'Six',\n 'Seven', 'Eight', 'Nine', 'Ten', \n 'Jack', 'Queen', 'King', 'Ace']\n s = \"c\" #testing rank not suit\n for r in range(2,14):\n myCard = Card(r,s)\n self.assertEqual(myCard.rankName(),RN[r-2]) #index of rank - 2 ",
"def testRanks(self): #GIVEN\n \n for i in range(2,15):\n myCard = Card(i,'c')\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is 'i'",
"def testRanks(self): # unit test for ranks 1-13\r\n \r\n for i in range(1,14):\r\n myCard = Card(i,'c') # create i of clubs\r\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is i\r",
"def testCreation(self):\n kickers = Cards.fromString(\"JC 8D 4S\")\n rank = PokerRank(PokerRank.PAIR, primaryCard=Rank.KING,\n kickers=kickers)\n self.assertIsNotNone(rank)\n self.assertNotEqual(rank, 0)\n type = rank.getType()\n self.assertEqual(type, PokerRank.PAIR,\n \"rank = (%s) %d != PAIR\" % (str(type), type))\n primaryRank = rank.getPrimaryCardRank()\n self.assertEqual(primaryRank, Rank.KING,\n \"primary rank = %s (%d) != KING\" % (str(primaryRank),\n primaryRank))\n secondaryRank = rank.getSecondaryCardRank()\n # Manual test here, otherwise string creation fails on sucess\n if secondaryRank is not None:\n self.fail(\"rank = %s (%d) != None\" % (str(secondaryRank),\n secondaryRank))\n kickerRanks = rank.getKickerRanks()\n for i, card in enumerate(kickers):\n self.assertEqual(card.rank, kickerRanks[i])",
"def test_card_ranks(self):\n sf = \"6C 7C 8C 9C TC\".split()\n fk = \"9D 9H 9S 9C 7D\".split()\n fh = \"TD TC TH 7C 7D\".split()\n\n self.assertEqual(poker_game.card_ranks(sf), [10, 9, 8, 7, 6])\n self.assertEqual(poker_game.card_ranks(fk), [9, 9, 9, 9, 7])\n self.assertEqual(poker_game.card_ranks(fh), [10, 10, 10, 7, 7])",
"def test_hand_rank(self):\n\n sf = \"6C 7C 8C 9C TC\".split()\n fk = \"9D 9H 9S 9C 7D\".split()\n fh = \"TD TC TH 7C 7D\".split()\n\n self.assertEqual(poker_game.hand_rank(sf), (8, 10))\n self.assertEqual(poker_game.hand_rank(fk), (7, 9, 7))\n self.assertEqual(poker_game.hand_rank(fh), (6, 10, 7))",
"def test_2_club(self):\n card = cards.Card(1, 2)\n self.assertEqual(card.suit_name, \"Clubs\")",
"def test_card_value(rank, suit, expected_value):\n my_card = card_game.Card(rank, suit)\n assert my_card.card_value == expected_value",
"def rank(card):\n\n if card % 100 == 1:\n return ' A'\n elif card % 100 == 11:\n return ' J'\n elif card % 100 == 12:\n return ' Q'\n elif card % 100 == 13:\n return ' K'\n else:\n return card % 100",
"def test_player_add_card(player):\n new_card = card_game.Card(14, \"Clubs\")\n assert (len(player.cards) == 1) == (player.score == 56\n )",
"def card(rank, suit):\n return card_game.Card(rank, suit)",
"def rank_card(card):\n return RANKS[card[0]]",
"def test_Serialize(self):\n #Confirm its number than suit\n test_card = Card(3, 'Hearts')\n self.assertEqual(test_card.serialize(), (3, 'Hearts'))",
"def name(card):\n suit = card[1]\n val = card[0]\n if val == \"A\":\n val = \"Ace\"\n elif val == \"K\":\n val = \"King\"\n elif val == \"Q\":\n val = \"Queen\"\n elif val == \"J\":\n val = \"Jack\"\n elif val == \"0\":\n val = \"10\"\n\n if suit == \"H\":\n suit = \"Hearts\"\n elif suit == \"D\":\n suit = \"Diamonds\"\n elif suit == \"C\":\n suit = \"Clubs\"\n elif suit == \"S\":\n suit = \"Spades\"\n return f\"{val} of {suit}\"",
"def test_rank_challenge_within_2(self):\n self.opponent.userprofile.rank = 1\n self.challenger.userprofile.rank = 2\n self.opponent.save()\n self.challenger.save()\n self.assertTrue(self.opponent.userprofile.can_challenge(self.challenger))",
"def test_rank_challenge_greater_then_2(self):\n self.opponent.userprofile.rank = 1\n self.challenger.userprofile.rank = 4\n self.opponent.save()\n self.challenger.save()\n self.assertFalse(self.opponent.userprofile.can_challenge(self.challenger))",
"def __init__(self, suit, nb):\n self.suit = suit\n self.rank = nb\n if nb in [11, 12, 13]:\n # assigns the rank and value for jack, queen and king\n self.rank = Card.heads[nb-10]\n self.value = 10\n elif nb == 1:\n # assigns the rank and value for ace\n self.rank = Card.heads[nb-1]\n self.value = 11\n else:\n # assigns the value for all other cards\n self.value = nb",
"def test_5_deal_card_return(self):\n deck = cards.Deck()\n card = cards.Card(3, 13).__str__()\n dealt = deck.deal_card(i=-1).__str__()\n self.assertEqual(dealt, card)",
"def test_show_winner(game):\n # Assumes sorted deck\n game.play_card_game()\n assert game.show_winner() == \"Player 2\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that if you create a card instance with suit 1, its suit_name will be "Clubs"
|
def test_2_club(self):
card = cards.Card(1, 2)
self.assertEqual(card.suit_name, "Clubs")
|
[
"def test_1_queen(self):\n card = cards.Card(0, 12)\n self.assertEqual(card.rank_name, \"Queen\")",
"def test_Construction(self):\n #Can make jokers (suit None)\n test_card = Card(0, None)\n #Any number given for a joker is set to 0\n test_card = Card(9999, None)\n self.assertEqual(test_card.number, 0)\n #All suits are options, '' or \"\" works for strings\n test_card = Card(1, 'Spades')\n test_card = Card(2, \"Hearts\")\n test_card = Card(13, \"Diamonds\")\n test_card = Card(10, 'Clubs')\n #Non-suit strings and non-plural suitnames are invalid\n with self.assertRaises(ValueError):\n test_card = Card(1, 'fakityFake')\n with self.assertRaises(ValueError):\n test_card = Card(1, 'Spade')\n #0 and numbers over 13 are invalid for non-Joker cards\n with self.assertRaises(ValueError):\n test_card = Card(0, 'Spades')\n with self.assertRaises(ValueError):\n test_card = Card(14, 'Spades')",
"def test_player_add_card(player):\n new_card = card_game.Card(14, \"Clubs\")\n assert (len(player.cards) == 1) == (player.score == 56\n )",
"def test_build_deck(self):\n suits = [\"Clubs\", \"Spades\", \"Hearts\", \"Diamonds\"]\n\n self.deck.build_deck()\n res_list = self.deck.deck\n exp_list = []\n for suit in suits:\n for value in range(2, 15):\n exp_list.append(card.Card(suit, value))\n index = 0\n\n for i in exp_list:\n self.assertEqual(i.show(), res_list[index].show())\n index += 1\n\n exp = 52\n res = len(res_list)\n self.assertEqual(res, exp)",
"def testRanks(self): #GIVEN\n \n for i in range(2,15):\n myCard = Card(i,'c')\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is 'i'",
"def test_card_value(rank, suit, expected_value):\n my_card = card_game.Card(rank, suit)\n assert my_card.card_value == expected_value",
"def testRankNames(self):\n RN = ['Two', 'Three', 'Four', 'Five', 'Six',\n 'Seven', 'Eight', 'Nine', 'Ten', \n 'Jack', 'Queen', 'King', 'Ace']\n s = \"c\" #testing rank not suit\n for r in range(2,14):\n myCard = Card(r,s)\n self.assertEqual(myCard.rankName(),RN[r-2]) #index of rank - 2 ",
"def testRanks(self): # unit test for ranks 1-13\r\n \r\n for i in range(1,14):\r\n myCard = Card(i,'c') # create i of clubs\r\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is i\r",
"def test_5_deal_card_return(self):\n deck = cards.Deck()\n card = cards.Card(3, 13).__str__()\n dealt = deck.deal_card(i=-1).__str__()\n self.assertEqual(dealt, card)",
"def test_GetColor(self):\n #Jokers have no color\n test_card = Card(0, None)\n self.assertEqual(test_card.getColor(), None)\n #Spades are \"Black\"\n test_card = Card(2, 'Spades')\n self.assertEqual(test_card.getColor(), \"Black\")\n #Clubs are \"Black\"\n test_card = Card(3, 'Clubs')\n self.assertEqual(test_card.getColor(), 'Black')\n #Hearts are \"Red\"\n test_card = Card(5, 'Hearts')\n self.assertEqual(test_card.getColor(), 'Red')\n #Diamonds are \"Red\"\n test_card = Card(6, 'Diamonds')\n self.assertEqual(test_card.getColor(), \"Red\")",
"def suit(card):\n\n # Clubs\n if card in range(100,114):\n return \" \\u2663\"\n # Diamonds\n elif card in range(200,214):\n return f\" {color.red}\\u2666{color.blk}\"\n # Hearts\n elif card in range(300,314):\n return f\" {color.red}\\u2665{color.blk}\"\n # Spades\n else:\n return \" \\u2660\"",
"def test_Serialize(self):\n #Confirm its number than suit\n test_card = Card(3, 'Hearts')\n self.assertEqual(test_card.serialize(), (3, 'Hearts'))",
"def test_check_cards(self):\n self.game.set_player(1, \"Wille\")\n self.game.set_player(2, \"Timmy\")\n p_1, p_2 = self.game.get_players()\n cards = []\n card1 = card.Card(\"Diamonds\", 5)\n cards.append(card1)\n p_1.cardhand.recieve_cards(cards)\n p_2.cardhand.recieve_cards(cards)\n self.assertTrue(self.game.check_cards())\n\n dck = deck.Deck()\n dck.build_deck()\n dck1 = dck.get_deck()\n p_1.cardhand.recieve_cards(dck1)\n self.assertTrue(self.game.check_cards())\n\n self.game.start()\n self.assertFalse(self.game.check_cards())",
"def test_take_card_from_market(self):\n wheat1 = cards.CardWheat(self.game)\n wheat2 = cards.CardWheat(self.game)\n market = markets.MarketBase(self.game, name='Test Market', deck=[wheat1, wheat2])\n got_wheat = market.take_card(wheat1)\n self.assertEqual(type(got_wheat), cards.CardWheat)\n available = market.cards_available()\n self.assertEqual(len(available), 1)\n for (card, count) in available.items():\n self.assertEqual(type(card), cards.CardWheat)\n self.assertEqual(count, 1)",
"def name(card):\n suit = card[1]\n val = card[0]\n if val == \"A\":\n val = \"Ace\"\n elif val == \"K\":\n val = \"King\"\n elif val == \"Q\":\n val = \"Queen\"\n elif val == \"J\":\n val = \"Jack\"\n elif val == \"0\":\n val = \"10\"\n\n if suit == \"H\":\n suit = \"Hearts\"\n elif suit == \"D\":\n suit = \"Diamonds\"\n elif suit == \"C\":\n suit = \"Clubs\"\n elif suit == \"S\":\n suit = \"Spades\"\n return f\"{val} of {suit}\"",
"def test_card_holder_name_is_provided(self):\n\t\tself.assertTrue(self.payment.get_card_holder())",
"def card(rank, suit):\n return card_game.Card(rank, suit)",
"def testCreation(self):\n kickers = Cards.fromString(\"JC 8D 4S\")\n rank = PokerRank(PokerRank.PAIR, primaryCard=Rank.KING,\n kickers=kickers)\n self.assertIsNotNone(rank)\n self.assertNotEqual(rank, 0)\n type = rank.getType()\n self.assertEqual(type, PokerRank.PAIR,\n \"rank = (%s) %d != PAIR\" % (str(type), type))\n primaryRank = rank.getPrimaryCardRank()\n self.assertEqual(primaryRank, Rank.KING,\n \"primary rank = %s (%d) != KING\" % (str(primaryRank),\n primaryRank))\n secondaryRank = rank.getSecondaryCardRank()\n # Manual test here, otherwise string creation fails on sucess\n if secondaryRank is not None:\n self.fail(\"rank = %s (%d) != None\" % (str(secondaryRank),\n secondaryRank))\n kickerRanks = rank.getKickerRanks()\n for i, card in enumerate(kickers):\n self.assertEqual(card.rank, kickerRanks[i])",
"def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that if you invoke the deal_card method on a deck, it will return a card instance.
|
def test_5_deal_card_return(self):
deck = cards.Deck()
card = cards.Card(3, 13).__str__()
dealt = deck.deal_card(i=-1).__str__()
self.assertEqual(dealt, card)
|
[
"def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)",
"def test_deal(deck):\n # get a copy of the top card\n # make sure deal delivers top card of deck\n card_count = len(deck.the_deck)\n top_card = deck.the_deck[0]\n dealt_card = deck.deal()\n assert (top_card == dealt_card) == (len(deck.the_deck) == (card_count - 1))",
"def deal_card(self):\r\n deal = Deck()._deal()\r\n return deal[0]",
"def test_deal_hand(self):\n cards = self.deck.deal_hand(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def deal_card(self):\n self.deal_deck=self.deck_list[-1]\n self.deck_list.pop(-1)\n # Deal a card object from the deck\n return self.deal_deck",
"def deal_card():\n new_card = random.choice(cards)\n return new_card",
"def test_deal_sufficient_cards(self):\n cards = self.deck._deal(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def give_card(deck):\n while deck:\n return deck.pop()",
"def deal(deck, hand):\n hand.add_card(deck.draw_card())",
"def test_deal_deck(self):\n self.deck.build_deck()\n self.deck.shuffle_deck()\n res = self.deck.deal_deck()\n player1 = res[0]\n player2 = res[1]\n self.assertEqual(len(player1), len(player2))",
"def card(rank, suit):\n return card_game.Card(rank, suit)",
"def test_show_deck(self):\n dealer = Dealer()\n self.assertEqual(dealer.show_deck(), self.__class__.fifty_two_cards_hidden)\n self.assertEqual(dealer.show_deck(True), self.__class__.fifty_two_cards_visible)",
"def dealDealerCard(self):\n newCard = self.dealCard()\n if self.verbose:\n vPrint(\"Dealer dealt {}\".format(newCard.getPrettyStr()), self.verbose)\n self.dealerHand.receiveCard(newCard)",
"def test_deal_insufficient_cards(self):\n cards = self.deck._deal(65)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)",
"def test_from_deck(self):\n self.assertRaises(\n InvalidDeckError,\n ExtendedDeckManager.from_deck,\n None\n )\n self.assertRaises(\n InvalidDeckError,\n ExtendedDeckManager.from_deck,\n \"A,K,Q,J,10\"\n )\n\n # now build a valid deck\n deck = ExtendedDeckManager.make_deck(\n Helper.normal_deck_suits(),\n Helper.normal_deck_values()\n )\n\n self.assertRaises(\n InvalidDeckError,\n ExtendedDeckManager.from_deck,\n deck,\n \"1,2,3,4\",\n None\n )\n self.assertRaises(\n InvalidDeckError,\n ExtendedDeckManager.from_deck,\n deck,\n None,\n \"1,2,3,4,5,6,7\"\n )\n deck_mgr = ExtendedDeckManager.from_deck(\n deck\n )\n # should be 52 card deck right now\n self.assertEqual(52, len(deck_mgr.deck()))\n # top card\n self.assertEqual(\n Card(\"Spades\", \"2\", 1, 2),\n deck_mgr.peek_card(52)\n )\n deck_mgr.empty_deck()\n # now should be 0\n self.assertEqual(0, len(deck_mgr.deck()))\n # no card\n self.assertEqual(\n None,\n deck_mgr.peek_card(1)\n )",
"def test_6_deal_card_fewer(self):\n deck = cards.Deck()\n original_card = len(deck.cards)\n deck.deal_card()\n dealt_card = len(deck.cards)\n self.assertGreater(original_card, dealt_card)",
"def test_deal_no_cards(self):\n self.deck._deal(self.deck.count())\n # The \"with\" statement checks for errors\n with self.assertRaises(ValueError):\n self.deck._deal(1)",
"def test_take_card_from_market(self):\n wheat1 = cards.CardWheat(self.game)\n wheat2 = cards.CardWheat(self.game)\n market = markets.MarketBase(self.game, name='Test Market', deck=[wheat1, wheat2])\n got_wheat = market.take_card(wheat1)\n self.assertEqual(type(got_wheat), cards.CardWheat)\n available = market.cards_available()\n self.assertEqual(len(available), 1)\n for (card, count) in available.items():\n self.assertEqual(type(card), cards.CardWheat)\n self.assertEqual(count, 1)",
"def test_take_card_from_market_2(self):\n wheat = cards.CardWheat(self.game)\n bakery = cards.CardBakery(self.game)\n market = markets.MarketBase(self.game, name='Test Market', deck=[wheat, bakery])\n got_wheat = market.take_card(wheat)\n self.assertEqual(type(got_wheat), cards.CardWheat)\n available = market.cards_available()\n self.assertEqual(len(available), 1)\n for (card, count) in available.items():\n self.assertEqual(type(card), cards.CardBakery)\n self.assertEqual(count, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that if you invoke the deal_card method on a deck, the deck has one fewer cards in it afterwards.
|
def test_6_deal_card_fewer(self):
deck = cards.Deck()
original_card = len(deck.cards)
deck.deal_card()
dealt_card = len(deck.cards)
self.assertGreater(original_card, dealt_card)
|
[
"def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)",
"def test_deal_sufficient_cards(self):\n cards = self.deck._deal(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def test_deal(deck):\n # get a copy of the top card\n # make sure deal delivers top card of deck\n card_count = len(deck.the_deck)\n top_card = deck.the_deck[0]\n dealt_card = deck.deal()\n assert (top_card == dealt_card) == (len(deck.the_deck) == (card_count - 1))",
"def test_deal_insufficient_cards(self):\n cards = self.deck._deal(65)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)",
"def test_deal_hand(self):\n cards = self.deck.deal_hand(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def test_deal_deck(self):\n self.deck.build_deck()\n self.deck.shuffle_deck()\n res = self.deck.deal_deck()\n player1 = res[0]\n player2 = res[1]\n self.assertEqual(len(player1), len(player2))",
"def test_deal_no_cards(self):\n self.deck._deal(self.deck.count())\n # The \"with\" statement checks for errors\n with self.assertRaises(ValueError):\n self.deck._deal(1)",
"def test_5_deal_card_return(self):\n deck = cards.Deck()\n card = cards.Card(3, 13).__str__()\n dealt = deck.deal_card(i=-1).__str__()\n self.assertEqual(dealt, card)",
"def test_7_replace_card_more(self):\n deck = cards.Deck()\n removed = deck.deal_card()\n removed_list = len(deck.cards)\n deck.replace_card(removed)\n replaced_list = len(deck.cards)\n self.assertGreater(replaced_list, removed_list)",
"def verify_deck(self):\n if len(self.deck) == 5:\n self.deck.extend(self.discarded_cards)\n self.discarded_cards = []\n else:\n pass",
"def test_deplete_high_cards(self):\n card_to_take = cards.CardMine(self.game)\n deck = [\n # 1-6 Regular\n cards.CardWheat(self.game),\n cards.CardRanch(self.game),\n cards.CardBakery(self.game),\n cards.CardCafe(self.game),\n cards.CardConvenienceStore(self.game),\n cards.CardForest(self.game),\n \n # Major Establishments\n cards.CardStadium(self.game),\n cards.CardTVStation(self.game),\n cards.CardBusinessCenter(self.game),\n\n # 7+ Regular\n cards.CardCheeseFactory(self.game),\n card_to_take,\n cards.CardMackerelBoat(self.game),\n cards.CardTunaBoat(self.game),\n cards.CardAppleOrchard(self.game),\n ]\n market = markets.MarketBrightLights(self.game, deck=deck)\n self.assertEqual(len(market.cards_available()), 12)\n self.assertEqual(len(market.stock_low.cards_available()), 5)\n self.assertEqual(len(market.stock_major.cards_available()), 2)\n self.assertEqual(len(market.stock_high.cards_available()), 5)\n self.assertEqual(len(market.stock_low.deck), 1)\n self.assertEqual(len(market.stock_major.deck), 1)\n self.assertEqual(len(market.stock_high.deck), 0)\n card = market.take_card(card_to_take)\n self.assertEqual(len(market.cards_available()), 11)\n self.assertEqual(len(market.stock_low.cards_available()), 5)\n self.assertEqual(len(market.stock_major.cards_available()), 2)\n self.assertEqual(len(market.stock_high.cards_available()), 4)\n self.assertEqual(len(market.stock_low.deck), 1)\n self.assertEqual(len(market.stock_major.deck), 1)\n self.assertEqual(len(market.stock_high.deck), 0)",
"def test_cli_cut_deck(engine):\n assert len(engine.deck) == 52 * 6 - 65",
"def test_get_total_cards(self):\n assert_equals(Deck.objects.get(ID=1).get_total_cards(), 3)",
"def test_deplete_low_cards(self):\n card_to_take = cards.CardWheat(self.game)\n deck = [\n # 1-6 Regular\n card_to_take,\n cards.CardRanch(self.game),\n cards.CardBakery(self.game),\n cards.CardCafe(self.game),\n cards.CardConvenienceStore(self.game),\n \n # Major Establishments\n cards.CardStadium(self.game),\n cards.CardTVStation(self.game),\n cards.CardBusinessCenter(self.game),\n\n # 7+ Regular\n cards.CardCheeseFactory(self.game),\n cards.CardMine(self.game),\n cards.CardMackerelBoat(self.game),\n cards.CardTunaBoat(self.game),\n cards.CardAppleOrchard(self.game),\n cards.CardFruitAndVeg(self.game),\n ]\n market = markets.MarketBrightLights(self.game, deck=deck)\n self.assertEqual(len(market.cards_available()), 12)\n self.assertEqual(len(market.stock_low.cards_available()), 5)\n self.assertEqual(len(market.stock_major.cards_available()), 2)\n self.assertEqual(len(market.stock_high.cards_available()), 5)\n self.assertEqual(len(market.stock_low.deck), 0)\n self.assertEqual(len(market.stock_major.deck), 1)\n self.assertEqual(len(market.stock_high.deck), 1)\n card = market.take_card(card_to_take)\n self.assertEqual(len(market.cards_available()), 11)\n self.assertEqual(len(market.stock_low.cards_available()), 4)\n self.assertEqual(len(market.stock_major.cards_available()), 2)\n self.assertEqual(len(market.stock_high.cards_available()), 5)\n self.assertEqual(len(market.stock_low.deck), 0)\n self.assertEqual(len(market.stock_major.deck), 1)\n self.assertEqual(len(market.stock_high.deck), 1)",
"def test_card_piles_full(self):\n self.assertTrue(len(self.game.library)>5)\n self.assertEqual(len(self.game.jacks), 6)",
"def test_deplete_major_cards(self):\n card_to_take = cards.CardStadium(self.game)\n deck = [\n # 1-6 Regular\n cards.CardWheat(self.game),\n cards.CardRanch(self.game),\n cards.CardBakery(self.game),\n cards.CardCafe(self.game),\n cards.CardConvenienceStore(self.game),\n cards.CardForest(self.game),\n \n # Major Establishments\n card_to_take,\n cards.CardTVStation(self.game),\n\n # 7+ Regular\n cards.CardCheeseFactory(self.game),\n cards.CardMine(self.game),\n cards.CardMackerelBoat(self.game),\n cards.CardTunaBoat(self.game),\n cards.CardAppleOrchard(self.game),\n cards.CardFruitAndVeg(self.game),\n ]\n market = markets.MarketBrightLights(self.game, deck=deck)\n self.assertEqual(len(market.cards_available()), 12)\n self.assertEqual(len(market.stock_low.cards_available()), 5)\n self.assertEqual(len(market.stock_major.cards_available()), 2)\n self.assertEqual(len(market.stock_high.cards_available()), 5)\n self.assertEqual(len(market.stock_low.deck), 1)\n self.assertEqual(len(market.stock_major.deck), 0)\n self.assertEqual(len(market.stock_high.deck), 1)\n card = market.take_card(card_to_take)\n self.assertEqual(len(market.cards_available()), 11)\n self.assertEqual(len(market.stock_low.cards_available()), 5)\n self.assertEqual(len(market.stock_major.cards_available()), 1)\n self.assertEqual(len(market.stock_high.cards_available()), 5)\n self.assertEqual(len(market.stock_low.deck), 1)\n self.assertEqual(len(market.stock_major.deck), 0)\n self.assertEqual(len(market.stock_high.deck), 1)",
"def test_build_deck(self):\n suits = [\"Clubs\", \"Spades\", \"Hearts\", \"Diamonds\"]\n\n self.deck.build_deck()\n res_list = self.deck.deck\n exp_list = []\n for suit in suits:\n for value in range(2, 15):\n exp_list.append(card.Card(suit, value))\n index = 0\n\n for i in exp_list:\n self.assertEqual(i.show(), res_list[index].show())\n index += 1\n\n exp = 52\n res = len(res_list)\n self.assertEqual(res, exp)",
"def test_play_card_game(game):\n game.play_card_game()\n assert (len(game.players[0].cards) >= 3) == (len(game.players[1].cards) >= 3)",
"def dealer_action(cards, deck):\n while cards_value(cards) < 16:\n cards = draw(cards, deck)\n return cards"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that if you invoke the replace_card method, the deck has one more card in it afterwards. (Use deal_card function first to remove a card from the deck and then add the same card back in)
|
def test_7_replace_card_more(self):
deck = cards.Deck()
removed = deck.deal_card()
removed_list = len(deck.cards)
deck.replace_card(removed)
replaced_list = len(deck.cards)
self.assertGreater(replaced_list, removed_list)
|
[
"def test_deal_card(self):\n card = self.deck.cards[-1]\n dealt_card = self.deck.deal_cards()\n self.assertEqual(card, dealt_card)\n self.assertEqual(self.deck.count(), 51)",
"def test_deal(deck):\n # get a copy of the top card\n # make sure deal delivers top card of deck\n card_count = len(deck.the_deck)\n top_card = deck.the_deck[0]\n dealt_card = deck.deal()\n assert (top_card == dealt_card) == (len(deck.the_deck) == (card_count - 1))",
"def test_6_deal_card_fewer(self):\n deck = cards.Deck()\n original_card = len(deck.cards)\n deck.deal_card()\n dealt_card = len(deck.cards)\n self.assertGreater(original_card, dealt_card)",
"def deal_card():\n new_card = random.choice(cards)\n return new_card",
"def test_5_deal_card_return(self):\n deck = cards.Deck()\n card = cards.Card(3, 13).__str__()\n dealt = deck.deal_card(i=-1).__str__()\n self.assertEqual(dealt, card)",
"def verify_deck(self):\n if len(self.deck) == 5:\n self.deck.extend(self.discarded_cards)\n self.discarded_cards = []\n else:\n pass",
"def test_market_replace_with_new_pile(self):\n deck = [\n cards.CardWheat(self.game),\n cards.CardRanch(self.game),\n cards.CardBakery(self.game),\n ]\n market = markets.MarketHarbor(self.game, deck=deck, pile_limit=2)\n available = market.cards_available()\n self.assertEqual(len(available), 2)\n initial_cardlist = list(sorted(available.keys()))\n for card in initial_cardlist:\n deck.remove(card)\n got_card = market.take_card(initial_cardlist[0])\n available = market.cards_available()\n self.assertEqual(len(available), 2)\n initial_cardlist.remove(got_card)\n in_market = sorted([deck[0], initial_cardlist[0]])\n new_cardlist = list(sorted(available.keys()))\n self.assertEqual(new_cardlist, in_market)",
"def test_market_replace_with_new_pile(self):\n deck = [\n cards.CardWheat(self.game),\n cards.CardRanch(self.game),\n cards.CardBakery(self.game),\n cards.CardCafe(self.game),\n cards.CardConvenienceStore(self.game),\n cards.CardForest(self.game),\n ]\n market = markets.MarketBrightLights(self.game, deck=deck)\n available = market.cards_available()\n self.assertEqual(len(available), 5)\n got_card = market.take_card(list(available.keys())[0])\n available = market.cards_available()\n self.assertEqual(len(available), 5)",
"def interact_with(deck: List[Card], card: Card, add: bool = False) -> None:\n if add:\n deck.append(card)\n else:\n for i, c in enumerate(deck):\n if c is card:\n deck.pop(i)",
"def test_deal_hand(self):\n cards = self.deck.deal_hand(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def give_card(deck):\n while deck:\n return deck.pop()",
"def test_remove_card(self) -> None:\r\n self.localisation.apply_user_change(3, self.user)\r\n ownership = self.localisation.ownerships.get(owner=self.user)\r\n self.assertEqual(ownership.count, 3)\r\n self.localisation.apply_user_change(-3, self.user)\r\n self.assertFalse(self.localisation.ownerships.filter(owner=self.user).exists())",
"def deal_cards(player_decks: List[Deck], deck: Deck) -> None:\n\n for card in range(len(deck)):\n player_decks[card %len(player_decks)].append(deck.pop())",
"def deal_card(self):\n self.deal_deck=self.deck_list[-1]\n self.deck_list.pop(-1)\n # Deal a card object from the deck\n return self.deal_deck",
"def test_deal_deck(self):\n self.deck.build_deck()\n self.deck.shuffle_deck()\n res = self.deck.deal_deck()\n player1 = res[0]\n player2 = res[1]\n self.assertEqual(len(player1), len(player2))",
"def remove_card(self, card):\n if card not in self._cards:\n print('you dont have that card')\n self._cards.remove(card) # O(n)",
"def deal(deck, hand):\n hand.add_card(deck.draw_card())",
"def test_deal_sufficient_cards(self):\n cards = self.deck._deal(5)\n self.assertEqual(len(cards), 5)\n self.assertEqual(self.deck.count(), 47)",
"def test_add_card(self):\n self.hand.clear()\n\n # try adding a non-card object\n self.assertRaises(ValueError, self.hand.add_card, \"2C\")\n\n # try adding a valid card\n valid_card = card.Card(2, \"C\")\n self.assertTrue(self.hand.add_card(valid_card))\n\n # try adding an invalid card\n invalid_card_detected = False\n try:\n card.Card(15, \"C\")\n except card.InvalidCardError:\n invalid_card_detected = True\n\n self.assertTrue(invalid_card_detected)\n\n # try adding too many cards - populate four more in besides 2C\n for card_value in range(3, 7):\n self.hand.add_card(card.Card(card_value, \"C\"))\n\n extra_card = card.Card(7, \"C\")\n self.assertRaises(hand.MaximumCardError, self.hand.add_card, extra_card)\n\n # try adding a duplicate card and checking for exception\n self.hand.clear()\n self.hand.add_card(card.Card(2, \"C\"))\n self.assertRaises(hand.DuplicateCardError, self.hand.add_card, card.Card(2, \"C\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns X, y for all images in img_path (list of full path to jpg images) Same parameters as utils.img_to_dataset()
|
def imgs_to_dataset(img_paths, window_size=10, squeeze=True, resize=100, padding=10):
X = list()
y = list()
for img_path in img_paths:
img = Image(img_path, resize=resize, padding=padding)
X_, y_ = img_to_dataset(img, window_size=window_size, squeeze=squeeze)
X.extend(X_)
y.extend(y_)
return np.array(X), np.array(y)
|
[
"def extract_images(paths):\n images = []\n for path in paths:\n ds = cv2.imread(path)\n ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)\n images.append(ds)\n return images",
"def gather_images(datasets, batch_img_paths):\r\n n_batch = len(batch_img_paths)\r\n\r\n images = [[] for d in datasets]\r\n image_idx = [[] for d in datasets]\r\n\r\n for img_path in batch_img_paths:\r\n\r\n img_path_idx = index_by_path(datasets, img_path) \r\n\r\n for j, path_idx in enumerate(img_path_idx):\r\n\r\n images[j].extend(load_dataset_images(datasets[j][path_idx[0]], path_idx[1], 1))\r\n image_idx[j].append(path_idx[0]) # the model/dataset that the image is mapped to\r\n\r\n return images, image_idx",
"def load_images(path: str, file_ending: str=\".png\") -> (list, int, int):\n\n paths = []\n images = []\n\n # read each image in path as numpy.ndarray and append to images\n for img in os.listdir(path):\n if img.endswith(file_ending):\n paths.append(img)\n paths.sort()\n\n for img in paths:\n images.append(np.asarray(mpl.image.imread(path + \"/\" + img), np.float64))\n\n # set dimensions according to first image in images\n dimension_y = images[0].shape[0]\n dimension_x = images[0].shape[1]\n\n return images, dimension_x, dimension_y",
"def obtainDataAsTensors(im_path, im_label):\n\ttransformations = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n\timg = Image.open(im_path)\n\timg = img.convert('RGB')\n\timg = transformations(img)\n\n\tlabel = torch.from_numpy(np.asarray(im_label).reshape([1,1]))\n\n\treturn (img, label)",
"def read_images(img_paths):\n imgs = np.empty([len(img_paths), 160, 320, 3])\n\n for i, path in enumerate(img_paths):\n imgs[i] = imread(path)\n #image = load_img(path, target_size=(160, 320))\n #imgs[i] = img_to_array(image)\n\n return imgs",
"def load_ocr_data(path):\r\n \r\n# create list of all files ending in .jpg\r\n imlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]\r\n# create labels\r\n labels = [int(imfile.split('/')[-1][0]) for imfile in imlist]\r\n \r\n# create features from the images\r\n features = []\r\n for imname in imlist:\r\n im = array(Image.open(imname).convert('L'))\r\n features.append(compute_feature(im))\r\n return array(features),labels",
"def extract_img_features(img_paths, model, device): \n start = time()\n img_features = []\n\n for image_path in img_paths:\n img_features.append(\n encode_image(model, image_path, device).cpu().data.numpy()\n )\n \n print(f\"Extracting image features took: {hms_string(time()-start)}\")\n\n return img_features",
"def load_images(image_folder_path):\n\n image_list = []\n for img in os.listdir(image_folder_path):\n image = plt.imread(image_folder_path + img)\n image = image.astype(np.float32)\n image = image / 255.\n image_list.append(image)\n\n return image_list",
"def find_images(self, path):\n paths = []\n for file in os.listdir(path):\n if not path.endswith('b.png'):\n paths.append(file)\n encoder = self.generate_encoder(paths)\n return np.array(paths), encoder",
"def loadData(path = \"../data/\"):\n\n I = None\n L = None\n s = None \n images = None\n \n for i in range(7):\n j = i+1\n temp = imread(path + 'input_' + str(j) + '.tif')\n temp = rgb2xyz(temp)\n fors = np.copy(temp)\n temp = temp[:,:,1] #Just take luminance (Y)\n ipyn = np.copy(temp)\n print(ipyn.shape)\n temp = np.reshape(temp, (temp.shape[0]*temp.shape[1]))\n \n \n if i == 0:\n I = np.copy(temp)\n images = np.copy(ipyn)\n else:\n I = np.vstack((I, temp))\n images = np.vstack((images, ipyn))\n \n sources = np.load(path + 'sources.npy')\n L = np.copy(sources)\n L = L.T\n \n # s = (431, 369, 3)\n s = (fors.shape[0], fors.shape[1])\n \n print(L.shape, temp.shape, I.shape, s)\n \n return I, L, s, images",
"def get_images(path):\n image_types = ['.jpg', '.png']\n images = [os.path.join(path, i) for i in os.listdir(path) if i[-4:]\n in image_types]\n return np.array(images)",
"def ReadTrainImages(input_dir):\n data_list = list()\n label_list = list()\n class_map = ClassMapping(\"./train_label\")\n for key in class_map.keys():\n path = input_dir + \"/\" + key + \"/images\"\n for fi in os.listdir(path):\n img = misc.imread(path + \"/\" + fi, mode='RGB')\n data_list.append(img)\n label_list.append(class_map[key])\n return (np.array(data_list, dtype=np.float32), \n np.array(label_list, dtype=np.int32))",
"def load():\n images = []\n for p in Path(DATASET_PATH).rglob('*' + PNG):\n images.append(str(p))\n return images",
"def find_images(self, path):\n paths = []\n for file in os.listdir(path):\n if not file.endswith('b.png'):\n paths.append(file)\n paths = np.array(paths)\n encoder, bases = self.generate_encoder(paths)\n return paths, encoder, bases",
"def prepare_dataset():\n dataset = []\n for img in os.listdir(dataset_path):\n label = label_image(img)\n path = os.path.join(dataset_path, img)\n print(path)\n\n try:\n # load image from the path\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n\n # resize images\n img = cv2.resize(img, (img_size, img_size))\n\n # append img and its label to dataset\n dataset.append([np.array(img), label])\n\n except Exception as e:\n logging.error(traceback.format_exc())\n\n shuffle(dataset)\n return dataset",
"def load_img(path: str) -> np.ndarray:\n return np.array(Image.open(path))",
"def __loadimagesAndLabelsToArrays(self, image_file_content, images_number: int, columns_number: int, rows_number: int, images_offset: int, label_file_content, labels_offset: int):\n images = np.frombuffer(image_file_content[images_offset:], dtype=np.uint8).reshape(\n images_number, columns_number*rows_number)/255\n labels = np.frombuffer(\n label_file_content[labels_offset:], dtype=np.uint8)\n return images, labels",
"def get_classifier_train_images():\n images, labels = get_images_labels_path((doors_path + \"*.jpg\", indoors_path + \"*.jpg\"), (1,2))\n \n x_train = numpy.array(images)\n y_train = numpy.array(labels)\n \n return x_train, y_train",
"def create_input(path):\n folder = path\n files = os.listdir(folder)\n x = []\n y = []\n image_paths = []\n scaler = MinMaxScaler(feature_range=(-0.1, 1.175))\n #noramlized as in LeCun, makes the mean input roughly 0 and the variance roughly 1.\n #This accelerates learning.\n for i, images in sorted(enumerate(files)):\n label = images[0:2] #class identifier is in these positions\n image_path = folder + '/' + images\n image_paths.append(image_path)\n image_read = cv2.imread(image_path, 0)\n resize = cv2.resize(image_read, (32, 32), interpolation=cv2.INTER_CUBIC)\n X_new = scaler.fit_transform(resize)\n x.append(X_new)\n y.append(int(label))\n X = np.array(x)\n n, m, p = X.shape\n x_aux = []\n for example in X:\n for row in example:\n for element in row:\n x_aux.append([element])\n x_aux = np.array(x_aux)\n x_aux = np.reshape(x_aux, (n, 32, 32, 1))\n return x_aux, y, image_paths"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get 0255 pixel value for an input vector. Returns uint8 vector.
|
def to_255_scale(vector):
return np.round(vector * 255).astype('uint8')
|
[
"def vectorizar(self):\n img = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n self.vector = img.T.flatten().T\n return None",
"def getValue (self, row, column):\n value = 0\n try:\n value = __image__ [row, column]\n if value > 255 or value < 0:\n value = 0\n except:\n value = 0\n return value",
"def cast_residual_integer(rvecs):\n # maybe don't round?\n # return np.clip(rvecs * 255.0, -127, 127).astype(np.int8)\n # TODO: -128, 127\n return np.clip(np.round(rvecs * 255.0), -127, 127).astype(np.int8)",
"def convert_from_uint8(img):\n return img.astype(np.float32) / 255.0",
"def to_uint8(img):\n return img.astype(numpy.uint8)",
"def GetMaskValue(self) -> \"unsigned char\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUC2IUC2_GetMaskValue(self)",
"def orange():\n\n return color2float(Uint8Tensor([[253, 135, 86]]))",
"def negative_image(image):\n max_pixel = np.max(image)\n return np.uint8(max_pixel - image)",
"def GetPixel(self, *args) -> \"unsigned char &\":\n return _itkImagePython.itkImageUC2_GetPixel(self, *args)",
"def pink():\n\n return color2float(Uint8Tensor([[254, 194, 194]]))",
"def GetPixel(self, *args) -> \"itkCovariantVectorF2 &\":\n return _itkImagePython.itkImageCVF22_GetPixel(self, *args)",
"def GetPixel(self, *args) -> \"itkCovariantVectorF2 &\":\n return _itkImagePython.itkImageCVF23_GetPixel(self, *args)",
"def GetMaskValue(self) -> \"unsigned char\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIF2IUC2_GetMaskValue(self)",
"def get_signed8(uint: int) -> int:\n if uint > 127:\n return uint - 256\n return uint",
"def GetPixel(self, *args) -> \"itkCovariantVectorF4 &\":\n return _itkImagePython.itkImageCVF43_GetPixel(self, *args)",
"def GetPixel(self, *args) -> \"itkCovariantVectorF4 &\":\n return _itkImagePython.itkImageCVF42_GetPixel(self, *args)",
"def GetPixel(self, *args) -> \"itkVectorD2 &\":\n return _itkImagePython.itkImageVD22_GetPixel(self, *args)",
"def as_uint8(img, shift_min_to_0=False, scale_max_to_255=False):\n if img.dtype.kind == 'f':\n # It's a floating point data-type.\n if img.min() > -1.0001 and img.max() < 1.0001:\n # Assume it's in range [0.0, 1.0] or [-1.0, 1.0].\n img = img * 255 # Scale it into range [0.0, 255.0].\n else:\n img = img.copy() # Just create a fresh copy for us to edit.\n elif shift_min_to_0 or scale_max_to_255:\n # The image has an integer data-type rather than floating-point.\n # Convert it to floating-point for shifting & scaling.\n img = img.astype(np.float32)\n\n if shift_min_to_0:\n img -= img.min()\n\n # Be wary of division-by-zero and negative maxima.\n img_max = img.max()\n if scale_max_to_255 and img_max > 0.0:\n img *= (255.0 / img_max)\n\n if img.dtype.kind == 'f':\n return np.clip(img, 0.0, 255.0).astype(np.uint8)\n else:\n return np.clip(img, 0, 255).astype(np.uint8)",
"def GetPixel(self, *args) -> \"itkVectorD2 &\":\n return _itkImagePython.itkImageVD23_GetPixel(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds padding to the image. Modifies .data.
|
def add_padding(self, padding=10, color=0):
self.data = cv2.copyMakeBorder(self.data, padding, padding, padding,
padding, cv2.BORDER_CONSTANT, value=color)
return self.data
|
[
"def AddPadding(data: str) -> str:\n last_block_width = len(data) % 8\n if last_block_width != 0:\n data += (8 - last_block_width) * Base32Const.PADDING_CHAR\n return data",
"def _pad_img(self, results):\n img = results[\"img\"]\n if self.size is not None:\n padded_img = mmcv.impad(img, shape=self.size, pad_val=self.pad_val)\n elif self.size_divisor is not None:\n padded_img = mmcv.impad_to_multiple(\n img, self.size_divisor, pad_val=self.pad_val\n )\n results[\"img\"] = padded_img\n results[\"pad_shape\"] = padded_img.shape\n results[\"pad_fixed_size\"] = self.size\n results[\"pad_size_divisor\"] = self.size_divisor",
"def padding_image_square(image, padd_value=(0,0,0)):\r\n width, height = image.size\r\n long_edge_size = width if width >= height else height\r\n\r\n img_padd = Image.new('RGB', (long_edge_size, long_edge_size), padd_value)\r\n if width > height:\r\n h_st = int((long_edge_size - height)/2)\r\n img_padd.paste(image, (0, h_st))\r\n else:\r\n w_st = int((long_edge_size - width)/2)\r\n img_padd.paste(image, (w_st, 0))\r\n return img_padd",
"def _unpad_img(padded_img, padding):\n if padding[0][1] == 0:\n img = padded_img[padding[0][0]:, padding[1][0]:-padding[1][1],:]\n elif padding[1][0] == 0:\n img = padded_img[padding[0][0]:-padding[0][1], padding[1][0]:,:]\n elif padding[0][1] == 0 and padding[1][0] == 0:\n img = padded_img[padding[0][0]:, padding[1][0]:,:]\n else:\n img = padded_img[padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1],:]\n return img",
"def _pad_img(img, window_size, subdivisions):\n aug = int(round(window_size * (1 - 1.0/subdivisions)))\n more_borders = ((aug, aug), (aug, aug), (0, 0))\n ret = np.pad(img, pad_width=more_borders, mode='reflect')\n # gc.collect()\n\n if PLOT_PROGRESS:\n # For demo purpose, let's look once at the window:\n plt.imshow(ret)\n plt.title(\"Padded Image for Using Tiled Prediction Patches\\n\"\n \"(notice the reflection effect on the padded borders)\")\n plt.show()\n return ret",
"def pad(img, padding, fill=0, padding_mode='constant'):\n if not (\n _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img)\n ):\n raise TypeError(\n 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format(\n type(img)\n )\n )\n\n if _is_pil_image(img):\n return F_pil.pad(img, padding, fill, padding_mode)\n elif _is_tensor_image(img):\n return F_t.pad(img, padding, fill, padding_mode)\n else:\n return F_cv2.pad(img, padding, fill, padding_mode)",
"def _pad_img(img, window_size, subdivisions):\n aug = int(round(window_size * (1 - 1.0/subdivisions)))\n more_borders = ((aug, aug), (aug, aug), (0, 0))\n ret = np.pad(img, pad_width=more_borders, mode='reflect')\n gc.collect()\n\n return ret",
"def addPadding(self, encodedText: str) -> str:\n padding = 8 - len(encodedText) % 8\n paddingBin = f'{padding:08b}'\n return paddingBin + encodedText + '0'*padding",
"def pad(img, padding, fill=None, **kwargs):\n check_type(img)\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, collections.Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, collections.Sequence) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if fill is not None:\n assert 'constant_values' not in kwargs, \\\n \"Only one argument of `fill` and `constant_values` should be specified\"\n kwargs['constant_values'] = fill\n\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), **kwargs)\n return img",
"def _pad_values(image, value, target_height, target_width):\n image -= value\n\n image_shape = tf.shape(image)\n height, width = image_shape[0], image_shape[1]\n\n pad_heights = target_height - height\n pad_widths = target_width - width\n\n height_params = tf.stack([pad_heights // 2, pad_heights - pad_heights // 2])\n width_params = tf.stack([pad_widths // 2, pad_widths - pad_widths // 2])\n channel_params = tf.stack([0, 0])\n # [3, 2]\n paddings = tf.stack([height_params, width_params, channel_params])\n pad_image = tf.pad(image, paddings, constant_values=0)\n pad_image += value\n\n return pad_image",
"def add_padding_to_wcs(wcs_in, pad=(64,256)):\n wcs = wcs_in.deepcopy()\n\n is_new = True\n for attr in ['naxis1', '_naxis1']:\n if hasattr(wcs, attr):\n is_new = False\n value = wcs.__getattribute__(attr)\n if value is not None:\n wcs.__setattr__(attr, value+2*pad[1])\n\n for attr in ['naxis2', '_naxis2']:\n if hasattr(wcs, attr):\n is_new = False\n value = wcs.__getattribute__(attr)\n if value is not None:\n wcs.__setattr__(attr, value+2*pad[0])\n\n # Handle changing astropy.wcs.WCS attributes\n if is_new:\n #for i in range(len(wcs._naxis)):\n # wcs._naxis[i] += 2*pad\n wcs._naxis[0] += 2*pad[1]\n wcs._naxis[1] += 2*pad[0]\n \n wcs.naxis1, wcs.naxis2 = wcs._naxis\n else:\n wcs.naxis1 = wcs._naxis1\n wcs.naxis2 = wcs._naxis2\n\n wcs.wcs.crpix[0] += pad[1]\n wcs.wcs.crpix[1] += pad[0]\n\n # Pad CRPIX for SIP\n for wcs_ext in [wcs.sip]:\n if wcs_ext is not None:\n wcs_ext.crpix[0] += pad[1]\n wcs_ext.crpix[1] += pad[0]\n\n # Pad CRVAL for Lookup Table, if necessary (e.g., ACS)\n for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:\n if wcs_ext is not None:\n wcs_ext.crval[0] += pad[1]\n wcs_ext.crval[1] += pad[0]\n\n return wcs",
"def _pad(self, a: bitarray) -> bitarray:\n pad_len = BLOCKSIZE - (len(a) % BLOCKSIZE) - 1\n padding = bitarray(\"1\" + \"0\" * pad_len)\n return a + padding",
"def pad4(image):\n return np.pad(image, [(0, 0), (4, 4), (4, 4), (0, 0)], mode='reflect')",
"def _dynamic_padding(self, batch_data, pad_id):\n pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = min(self.max_q_len, max(batch_data['question_length']))\n batch_data['passage_token_ids'] = [\n (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len] for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [\n (ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len] for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def _compute_padding(kernel_size, dilation, causal):\n\n if causal:\n return (kernel_size - 1) * dilation\n return ((kernel_size - 1) // 2) * dilation",
"def get_padded_image(original_image,\n patch_size=-1,\n padding={'x': 0, 'y': 0},\n pad_method='symmetric'):\n pad_with_patch_size = patch_size != -1\n\n if pad_with_patch_size:\n # this is the size of the edges around the image\n half_ps = patch_size // 2\n pad_x, pad_y = half_ps, half_ps\n else:\n pad_x, pad_y = padding['x'], padding['y']\n\n if original_image.ndim == 2:\n original_image = original_image[:, :, np.newaxis]\n\n if pad_method == \"symmetric\":\n padded_image = np.lib.pad(original_image,\n ((pad_y, pad_y), (pad_x, pad_x), (0, 0)),\n pad_method\n )\n elif pad_method == \"constant\":\n padded_image = np.lib.pad(original_image,\n ((pad_y, pad_y), (pad_x, pad_x), (0, 0)),\n pad_method,\n **{'constant_values': (\n (255, 255),\n (255, 255),\n (255, 255))\n }\n )\n\n return padded_image",
"def pad_icon(img, pad, fill=0):\n h, w, c = img.shape[:3]\n img_pad = np.full((h + 2 * pad, w + 2 * pad, c), fill, dtype=img.dtype)\n img_pad[pad : (pad + h), pad : (pad + w), :] = img\n return img_pad",
"def pad_img(img, window_size, channels=3, mode='symmetric'):\n height = width = window_size\n print('input shape {}'.format(img.shape))\n pad_shape = return_padding(img, height, width)\n img = np.pad(img,pad_shape,mode=mode)\n print('output shape {}'.format(img.shape))\n if PLOT_PROGRESS:\n # For demo purpose, let's look once at the window:\n plt.imshow(img)\n plt.title(\"Padded Image for Using Tiled Prediction Patches\\n\"\n \"(notice the reflection effect on the padded borders)\")\n plt.show()\n return img, pad_shape",
"def pad(array, height=100, width=100, pad_value=0):\n # Get the initial array size\n array_height, array_width = array.shape\n\n # Compute the top and bottom padding\n # If odd, add extra padding to bottom\n missing_height = (height - array_height)\n padded_height_top = max(0, missing_height//2)\n padded_height_bottom = max(0, missing_height//2 + missing_height%2)\n\n # Compute the left and right padding\n # If odd, add extra padding to right side\n missing_width = (height - array_width)\n padded_width_left = max(0, missing_width//2)\n padded_width_right = max(0, missing_width//2 + missing_width%2)\n\n # padding tuple-tuple\n padding = (\n (padded_height_top, padded_height_bottom),\n (padded_width_left, padded_width_right)\n )\n\n # Return the padded image array\n return np.pad(\n array,\n pad_width=padding,\n mode=\"constant\", # pad a constant value\n constant_values=pad_value, # set the constant pad value\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns gray values for a window around the target pixel at location i, j. The window_size will be rounded. e.g window size of 11 > 5 pixels to left, right, up and down of target Of the window, gray pixel values are turned. The target location's rgb values are returned. The blue, green, and red values represent the response feature, y. The gray array represents the feature array, X. Note that the zeta = (window_size1)/2 value SHOULD be less than or equal to the padding of the image. Be mindful of the padding when choosing window size. DO account for padding for i and j. Padding is not accounted for in i, j in this function.
|
def get_dataset_for_pixel(self, i, j, window_size=10, squeeze=True):
zeta = int((window_size-1)/2)
# The BGR values represent the target features, y
b, g, r = self.data[i, j]
# Gray represents the predictive features, X
gr = self.gray[i - zeta : i + zeta + 1,
j - zeta : j + zeta + 1]
if squeeze:
gray = gr.reshape(-1, 1).squeeze()
else:
gray = gr
return gray, b, g, r
|
[
"def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n x_start_stop[0] = x_start_stop[0] or 0\n x_start_stop[1] = x_start_stop[1] or img.shape[1]\n y_start_stop[0] = y_start_stop[0] or 0\n y_start_stop[1] = y_start_stop[1] or img.shape[0]\n\n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n \n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n \n # Compute the number of windows in x/y\n nx_windows = np.int(xspan/nx_pix_per_step) - 1\n ny_windows = np.int(yspan/ny_pix_per_step) - 1\n \n window_list = []\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs*nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys*ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n \n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list",
"def get_window(self):\n self.current_window = np.zeros_like(self.window).astype(np.uint8)\n for col, img in self.current_icons.items():\n self.current_window[:self.vui_part, col[0]:col[1]] = img\n if self.running_mode == \"color\":\n self.set_colors(col=self.cols[self.modes.index(\"color\")])\n if self.current_pointer is not None and self.current_pointer[0]>0:\n cv2.circle(self.current_window, (self.current_pointer[1], self.current_pointer[0]), self.point[0], self.pointer_color, self.point[1])\n \n return self.current_window",
"def get_window(im, center, width):\n if width % 2 == 0:\n i_slice = slice(center[0]-width/2, center[0]+width/2)\n j_slice = slice(center[1]-width/2, center[1]+width/2)\n else:\n i_slice = slice(center[0]-width/2, center[0]+width/2+1)\n j_slice = slice(center[1]-width/2, center[1]+width/2+1)\n \n return im[i_slice, j_slice]",
"def windowed_view(x, window_size):\r\n y = as_strided(x, shape=(x.size - window_size + 1, window_size),\r\n strides=(x.strides[0], x.strides[0]))\r\n return y",
"def convolve_grayscale_padding(images, kernel, padding):\n images = np.pad(images, ((0, 0), (padding[0], padding[0]),\n (padding[1], padding[1])),\n 'constant', constant_values=0)\n rows_im = images.shape[1]\n cols_im = images.shape[2]\n rows_k = kernel.shape[0]\n cols_k = kernel.shape[1]\n new_rows = rows_im - rows_k + 1\n new_cols = cols_im - cols_k + 1\n # print(new_cols, new_rows)\n new = np.ones((images.shape[0], new_rows, new_cols))\n # print(new.shape)\n # print(new)\n for i in range(new.shape[1]):\n for j in range(new.shape[2]):\n ans = images[:, i:rows_k + i, j:cols_k + j] * kernel\n # print(ans.shape)\n # print(ans.T.shape)\n # print(np.sum(ans, axis=2).shape)\n mat = np.sum(np.sum(ans.T, axis=1), axis=0)\n new[:, i, j] = mat\n return new",
"def kuan_filter(img, win_size=3, cu=0.25):\n if win_size < 3: raise Exception('[findpeaks] >ERROR: win size must be at least 3')\n if len(img.shape) > 2: raise Exception('[findpeaks] >ERROR: Image should be 2D. Hint: set the parameter: togray=True')\n if ((win_size % 2) == 0): print('[findpeaks] >It is highly recommended to user odd window sizes. You provided %s, an even number.' % (win_size))\n\n # we process the entire img as float64 to avoid type overflow error\n img = np.float64(img)\n img_filtered = np.zeros_like(img)\n\n N, M = img.shape\n # win_offset = win_size / 2\n win_offset = int(win_size / 2)\n\n for i in np.arange(0, N):\n xleft = i - win_offset\n xright = i + win_offset\n\n if xleft < 0:\n xleft = 0\n if xright >= N:\n xright = N\n\n for j in np.arange(0, M):\n yup = j - win_offset\n ydown = j + win_offset\n\n if yup < 0:\n yup = 0\n if ydown >= M:\n ydown = M\n\n pix_value = img[i, j]\n window = img[xleft:xright, yup:ydown]\n w_t = weighting(window, cu)\n window_mean = window.mean()\n new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))\n\n if (new_pix_value is None) or np.isnan(new_pix_value):\n new_pix_value = 0\n\n img_filtered[i, j] = round(new_pix_value)\n\n return img_filtered",
"def grey_dilation(input, size=None, footprint=None, structure=None,\n output=None, mode=\"reflect\", cval=0.0, origin=0):\n if size is None and footprint is None and structure is None:\n raise ValueError(\"size, footprint or structure must be specified\")\n if structure is not None:\n structure = numpy.asarray(structure)\n structure = structure[tuple([slice(None, None, -1)] *\n structure.ndim)]\n if footprint is not None:\n footprint = numpy.asarray(footprint)\n footprint = footprint[tuple([slice(None, None, -1)] *\n footprint.ndim)]\n\n input = numpy.asarray(input)\n origin = _ni_support._normalize_sequence(origin, input.ndim)\n for ii in range(len(origin)):\n origin[ii] = -origin[ii]\n if footprint is not None:\n sz = footprint.shape[ii]\n elif structure is not None:\n sz = structure.shape[ii]\n elif numpy.isscalar(size):\n sz = size\n else:\n sz = size[ii]\n if not sz & 1:\n origin[ii] -= 1\n\n return filters._min_or_max_filter(input, size, footprint, structure,\n output, mode, cval, origin, 0)",
"def extract_neighborhood(x, y, arr, radius):\n if x < radius or y < radius or x>=480-radius or y>=640-radius:\n return np.ones((radius*2+1,radius*2+1)).ravel()\n return arr[(x - radius) : (x + radius + 1), (y - radius) : (y + radius + 1)].ravel()",
"def convolve_grayscale_padding(images, kernel, padding):\n m, image_h, image_w = images.shape\n kernel_h, kernel_w = kernel.shape\n\n padding_h, padding_w = padding\n output_h = image_h + (2 * padding_h) - kernel_h + 1\n output_w = image_w + (2 * padding_w) - kernel_w + 1\n\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n multiply = images[img_m, i:kernel_h+i, j:kernel_w+j]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2))\n return conv_output",
"def get_neighbourhood(self, winner):\n\t\tnr_rows = self.W.shape[0]\n\t\tnr_cols = self.W.shape[1]\n\n\t\trow_span = np.arange(winner[0] - self.radius, winner[0] + self.radius + 1)\n\t\tcol_span = np.arange(winner[1] - self.radius, winner[1] + self.radius + 1)\n\n\t\tneighbourhood = []\n\t\tfor i in range((2*self.radius) + 1):\n\t\t\tfor j in range((2*self.radius) + 1):\n\t\t\t\tif((row_span[i] > (nr_rows - 1)) or (row_span[i] < 0) \\\n\t\t\t\t\tor (col_span[j] > (nr_cols - 1)) or (col_span[j] < 0)):\n\t\t\t\t\tcontinue\n\t\t\t\telse: \n\t\t\t\t\tneighbourhood.append([row_span[i], col_span[j]])\n\n\t\treturn neighbourhood",
"def color_thresh(input_img, rgb_thresh=(160, 160, 160),\n low_bound=(75, 130, 130), upp_bound=(255, 255, 255)):\n # Create arrays of zeros same xy size as input_img, but single channel\n nav_img = np.zeros_like(input_img[:, :, 0])\n obs_img = np.zeros_like(input_img[:, :, 0])\n\n # Convert BGR input_img to HSV for rock samples\n hsv_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV)\n\n # Require that each of the R(0), G(1), B(2) pixels be above all three\n # rgb_thresh values such that pixpts_above_thresh will now contain a\n # boolean array with \"True\" where threshold was met\n pixpts_above_thresh = ((input_img[:, :, 0] > rgb_thresh[0])\n & (input_img[:, :, 1] > rgb_thresh[1])\n & (input_img[:, :, 2] > rgb_thresh[2]))\n\n pixpts_nonzero = ((input_img[:, :, 0] > 0)\n & (input_img[:, :, 1] > 0)\n & (input_img[:, :, 2] > 0))\n\n # obstacle pixels are those non-zero pixels where rgb_thresh was not met\n obs_pixpts = np.logical_and(\n pixpts_nonzero, np.logical_not(pixpts_above_thresh)\n )\n # Index the array of zeros with the boolean array and set to 1\n # those pixels where ROI threshold was met\n nav_img[pixpts_above_thresh] = 1\n obs_img[obs_pixpts] = 1\n\n # Threshold the HSV image to get only colors for gold rock samples\n rock_img = cv2.inRange(hsv_img, low_bound, upp_bound)\n\n # Return the threshed binary images\n ThreshedImages = namedtuple('ThreshedImages', 'nav obs rock')\n thresh_imgs = ThreshedImages(nav_img, obs_img, rock_img)\n\n return thresh_imgs",
"def window_for_predict(values: pd.Series, x_size, y_size, step):\r\n last_input = values.size - y_size - 1 # last input right before last output\r\n first_output = x_size # index of first output right after first input\r\n window_x = window(values.loc[0:last_input], x_size, step)\r\n window_y = window(values.loc[first_output:values.size - 1].reset_index(drop=True), y_size, step)\r\n return window_x, window_y",
"def rg_dilation(image,size,origin=0):\n return filters.maximum_filter(image,size,origin=origin)",
"def find_central_window_dimensions(self):\n weight_matrix = self.generate_weights()\n self.write_to_im(weight_matrix, \"weights.png\")\n\n height, width = weight_matrix.shape\n centre_x = int(width / 2)\n centre_y = int(height / 2)\n max_val = weight_matrix[centre_y][centre_x]\n\n startx, stopx = 0, 0\n\n for i in range(width):\n if weight_matrix[centre_y][i] == max_val:\n startx = i\n for j in range(i, width):\n if weight_matrix[centre_y][j] < max_val:\n stopx = j\n break\n break\n\n for i in range(height):\n if weight_matrix[i][centre_y] == max_val:\n starty = i\n for j in range(i, width):\n if weight_matrix[centre_y][j] < max_val:\n stopy = j\n break\n break\n print(\"x1 = \" + str(startx) + \". x2 = \" + str(stopx) + \". y1 = \" + str(starty) + \". y2 = \" + str(stopy))\n\n return [stopx - startx, stopy - starty]",
"def window_sum(x, lag, win_size, win_geom):\n k = create_kernel(n=win_size, geom=win_geom)\n\n #create convolve function with reduced parameters for map_overlap\n pcon = functools.partial(convolve, weights=k)\n \n if isinstance(x, da.core.Array):\n conv_padding = int(win_size//2)\n res = x.map_overlap(pcon, depth={0: conv_padding, 1: conv_padding})\n else:\n res = pcon(x)\n \n #calculate 1/2N part of variogram\n neighbours = num_neighbours(lag)\n \n num_pix = np.sum(k)\n \n factor = 2 * num_pix * neighbours\n\n return res / factor",
"def get_subwindow(im: np.array, src_pos, src_sz, dst_sz,\n avg_chans=(0, 0, 0)) -> np.array:\n\n src_sz = _make_valid_int_pair(src_sz)\n dst_sz = _make_valid_int_pair(dst_sz)\n\n crop_cxywh = np.concatenate([np.array(src_pos), np.array(src_sz)], axis=-1)\n crop_xyxy = cxywh2xyxy(crop_cxywh)\n # warpAffine transform matrix\n M_13 = crop_xyxy[0]\n M_23 = crop_xyxy[1]\n M_11 = (crop_xyxy[2] - M_13) / (dst_sz[0] - 1)\n M_22 = (crop_xyxy[3] - M_23) / (dst_sz[1] - 1)\n mat2x3 = np.array([\n M_11,\n 0,\n M_13,\n 0,\n M_22,\n M_23,\n ]).reshape(2, 3)\n im_patch = cv2.warpAffine(im,\n mat2x3,\n dst_sz,\n flags=(cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP),\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=tuple(map(int, avg_chans)))\n return im_patch",
"def _nd_window(data, filter_function=np.hanning, inversed=False, epsilon=1e-20, rate=2.0):\n for axis, axis_size in enumerate(data.shape):\n # set up shape for numpy broadcasting\n filter_shape = [1, ] * data.ndim\n filter_shape[axis] = axis_size\n window = filter_function(axis_size * rate) + epsilon # Undersampled by ratio\n window = np.power(window, (1.0 / data.ndim))\n length = axis_size\n startx = int(axis_size * rate // 2 - length // 2)\n window = window[startx:startx + length]\n if inversed:\n window = 1 / window\n window = window.reshape(filter_shape)\n\n data *= window\n return data",
"def FindColors(img, min_U, max_U, min_V, max_V, kernel_size):\n img_Area = img.shape[0]*img.shape[1]\n \n (NI_thresh_U, NI_blackAndWhiteImage_U) = cv2.threshold(img[:,:,1], min_U, 255, cv2.THRESH_BINARY)\n (I_thresh_U, I_blackAndWhiteImage_U) = cv2.threshold(img[:,:,1], max_U, 255, cv2.THRESH_BINARY_INV)\n blackAndWhiteImage_U = cv2.bitwise_and(I_blackAndWhiteImage_U, NI_blackAndWhiteImage_U)\n \n (NI_thresh_V, NI_blackAndWhiteImage_V) = cv2.threshold(img[:,:,2], min_V, 255, cv2.THRESH_BINARY)\n (I_thresh_V, I_blackAndWhiteImage_V) = cv2.threshold(img[:,:,2], max_V, 255, cv2.THRESH_BINARY_INV)\n blackAndWhiteImage_V = cv2.bitwise_and(I_blackAndWhiteImage_V, NI_blackAndWhiteImage_V)\n \n # cv2.imshow('U', blackAndWhiteImage_U)\n # cv2.imshow('V', blackAndWhiteImage_V)\n \n blackAndWhiteImage_UV = cv2.bitwise_and(blackAndWhiteImage_V, blackAndWhiteImage_U)\n # cv2.imshow('UV', blackAndWhiteImage_UV)\n \n blackAndWhiteImage_Areas = blackAndWhiteImage_UV.sum()/255\n if (blackAndWhiteImage_Areas == 0):\n ratio = 22\n # print(\"Inf\")\n else:\n ratio = int(round(img_Area/blackAndWhiteImage_Areas))\n # print(ratio)\n if(ratio <= 21):\n #Morphological operations\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernel_size,kernel_size))\n closing = cv2.morphologyEx(blackAndWhiteImage_UV, cv2.MORPH_CLOSE, kernel, iterations = 1)\n # cv2.imshow('Closing', closing)\n return(closing)\n else:\n return(np.full((img.shape[0],img.shape[1]),255).astype(np.uint8))",
"def window_size(radius):\n return 2 * radius + 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A lineage between source and target entities.
|
def __init__(__self__, *,
source: 'outputs.GoogleCloudDatacatalogLineageV1EntityReferenceResponse',
target: 'outputs.GoogleCloudDatacatalogLineageV1EntityReferenceResponse'):
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "target", target)
|
[
"def target_lines(self):\n target_lines = self._target_source_lines[:]\n deps_begin, deps_end = self._dependencies_interval\n target_lines[deps_begin:deps_end] = self.dependency_lines()\n if self._provides:\n provides_begin, provides_end = self._provides_interval\n target_lines[provides_begin:provides_end] = self.get_provides_lines()\n return target_lines",
"def add_source_and_line(self, *nodes: List[nodes.Node]):\n location = self.node.source, self.node.line\n for node in nodes:\n node.source, node.line = location\n for child in node.traverse():\n child.source, child.line = location",
"def make_lines(self, old, new):\n c = self.c\n # Calculate all required lines.\n old_private_lines = self.makePrivateLines(old)\n new_private_lines = self.makePrivateLines(new)\n old_public_lines = self.makePublicLines(old_private_lines)\n new_public_lines = self.makePublicLines(new_private_lines)\n expected_private_lines = self.mungePrivateLines(\n new_private_lines, 'node:new', 'node:old')\n # Return the propagated results.\n results = self.shadow_controller.propagate_changed_lines(\n new_public_lines, old_private_lines, self.marker, p=c.p)\n if 0: # To verify that sentinels are as expected.\n print('')\n print(g.callers(1))\n g.printObj(old_private_lines, tag='old_private_lines')\n g.printObj(new_private_lines, tag='new_private_lines')\n g.printObj(old_public_lines, tag='old_public_lines')\n g.printObj(new_public_lines, tag='new_public_lines')\n return results, expected_private_lines",
"def virtual_entities(self) -> Iterator[Union[Line, Arc, Face3d]]:\n for e in virtual_polyline_entities(self):\n e.set_source_of_copy(self)\n yield e",
"def line_walk_edges(self,n1=None,n2=None,v1=None,v2=None,\n include_tangent=False,\n include_coincident=True):\n # this is a bit dicey in terms of numerical robustness - \n # face_in_direction is liable to give bad results when multiple faces are\n # indistinguishable (like a colinear set of points with many degenerate faces\n # basically on top of each other).\n\n # How can this be made more robust?\n # When the query line exactly goes through one or more vertex stuff starts\n # going nuts.\n # So is it possible to handle this more intelligently?\n # there are 3 possibilities for intersecting edges:\n # (1) intersect only at an end point, i.e. endpoint lies on query line\n # (2) intersect in interior of edge - one end point on one side, other endpoint\n # on the other side of the query line\n # (3) edge is coincident with query line\n\n\n # so for a first cut - make sure that we aren't just directly connected:\n if (n2 is not None) and (n1 is not None) and (n2 in self.delaunay_neighbors(n1)):\n return []\n\n if v1 is None:\n v1 = self.vh[n1]\n if v2 is None:\n v2 = self.vh[n2]\n\n # Get the points from the vertices, not self.points, because in some cases\n # (adjust_move_node) we may be probing\n p1 = np.array([ v1.point().x(), v1.point().y()] )\n p2 = np.array([ v2.point().x(), v2.point().y()] )\n\n # print \"Walking the line: \",p1,p2\n\n vec = p2 - p1\n unit_vec = vec / norm(vec)\n\n pnt = p1 \n\n # NB: this can be None - though not sure whether the context can\n # ensure that it never would be.\n f1 = self.face_in_direction(v1,vec)\n f2 = self.face_in_direction(v2,-vec)\n\n # do the search:\n f_trav = f1\n edges = []\n while 1:\n # print \"line_walk_edges: traversing face:\"\n # print [f_trav.vertex(i).point() for i in [0,1,2]]\n\n # Stop condition: we're in a face containing the final vertex\n # check the vertices directly, rather than the face\n still_close = 0\n for i in range(3):\n if f_trav.vertex(i) == v2:\n return edges\n\n if not still_close:\n # Check to see if this vertex is beyond the vertex of interest\n vertex_i_pnt = np.array( [f_trav.vertex(i).point().x(),f_trav.vertex(i).point().y()] )\n if norm(vec) > np.dot( vertex_i_pnt - p1, unit_vec):\n still_close = 1\n\n if not still_close:\n # We didn't find any vertices of this face that were as close to where we started\n # as the destination was, so we must have passed it.\n print(\"BAILING: n1=%s n2=%s v1=%s v2=%s\"%(n1,n2,v1,v2))\n raise Exception(\"Yikes - line_walk_edges exposed its numerical issues. We traversed too far.\")\n return edges\n\n edge,new_face = self.next_face(f_trav,pnt,vec)\n\n edges.append(edge)\n\n f_trav = new_face\n return edges",
"def _walk_line(p0, p1):\n # unpack the point tuples\n x0, y0 = p0\n x1, y1 = p1\n\n dx, dy = x1 - x0, y1 - y0\n yi = 1\n if dy < 0:\n yi = -1\n dy = -dy\n\n D = 2 * dy - dx\n x = np.arange(x0, x1 + 1, dtype=int).T\n y = np.zeros((len(x),), dtype=int)\n\n yy = y0\n for i in np.arange(len(x)):\n y[i] = yy\n if D > 0:\n yy = yy + yi\n D = D - 2 * dx\n\n D = D + 2 * dy\n\n # sort by major axis, and index the cells\n xI = np.argsort(x)\n x = x[xI]\n y = y[xI]\n\n return x, y",
"def copy_alembic_data(cls, source=None, target=None):\n selection = pm.ls(sl=1)\n if not source or not target:\n source = selection[0]\n target = selection[1]\n\n #\n # Move Alembic Data From Source To Target\n #\n # selection = pm.ls(sl=1)\n #\n # source = selection[0]\n # target = selection[1]\n\n source_nodes = source.listRelatives(ad=1, type=(pm.nt.Mesh, pm.nt.NurbsSurface))\n target_nodes = target.listRelatives(ad=1, type=(pm.nt.Mesh, pm.nt.NurbsSurface))\n\n source_node_names = []\n target_node_names = []\n\n for node in source_nodes:\n name = node.name().split(\":\")[-1].split(\"|\")[-1]\n source_node_names.append(name)\n\n for node in target_nodes:\n name = node.name().split(\":\")[-1].split(\"|\")[-1]\n target_node_names.append(name)\n\n lut = []\n\n for i, target_node in enumerate(target_nodes):\n target_node_name = target_node_names[i]\n try:\n index = source_node_names.index(target_node_name)\n except ValueError:\n pass\n else:\n lut.append((source_nodes[index], target_nodes[i]))\n\n for source_node, target_node in lut:\n if isinstance(source_node, pm.nt.Mesh):\n in_attr_name = \"inMesh\"\n out_attr_name = \"outMesh\"\n else:\n in_attr_name = \"create\"\n out_attr_name = \"worldSpace\"\n\n conns = source_node.attr(in_attr_name).inputs(p=1)\n if conns:\n for conn in conns:\n if isinstance(conn.node(), pm.nt.AlembicNode):\n conn >> target_node.attr(in_attr_name)\n break\n else:\n # no connection\n # just connect the shape itself\n source_node.attr(out_attr_name) >> target_node.attr(in_attr_name)",
"def linje(x1, y1, x2, y2): \n pu() # pen up - rita inte\n goto(x1, y1) # flytta markören\n pd() # pen down - rita \n goto(x2, y2) # flytta markören så att en linje ritas",
"def line_origins(origins):\n oiter = iter(origins)\n prev = next(oiter)\n\n for cur in oiter:\n try:\n refs = cur.extra_references\n except AttributeError:\n cur.extra_references = {prev}\n else:\n refs.add(prev)\n prev = cur",
"def add_edges_from(self, ebunch):\n for (source, target, new_attr) in ebunch:\n self.add_edge(source, target, new_attr)",
"def drawLine(tortle, x_start, y_start, x_end, y_end):\n tortle.up()\n tortle.goto(x_start, y_start)\n tortle.down()\n tortle.goto(x_end, y_end)",
"def __model_add_line (self, pos1, pos2, line):\n i1 = 0 # index for pos1\n i2 = 0 # index for pos2\n i2_prev = 0 # index for pos2 in previous pos1\n # [pos1-self.peaksize,pos1+self.peaksize]\n # region\n i1_max = len(pos1)\n i2_max = len(pos2)\n last_p2 = -1\n flag_find_overlap = False\n \n while i1<i1_max and i2<i2_max:\n p1 = pos1[i1]\n p2 = pos2[i2]\n if p1-self.peaksize > p2: # move pos2\n i2 += 1\n elif p1+self.peaksize < p2: # move pos1\n i1 += 1 \n i2 = i2_prev # search minus peaks from previous index\n flag_find_overlap = False\n else: # overlap!\n if not flag_find_overlap:\n flag_find_overlap = True\n i2_prev = i2 # only the first index is recorded\n # project\n for i in range(p2-p1+self.peaksize-self.tsize/2,p2-p1+self.peaksize+self.tsize/2):\n if i>=0 and i<len(line):\n line[i]+=1\n i2+=1\n return line",
"def connect_inline(target, source):\n dependents = source.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)\n target.setInput(0, source)\n\n if target.maxOutputs():\n for node in dependents:\n print node.fullName()\n for i in xrange(node.inputs()):\n print \"setting input {0}\".format(i)\n print node.input(i)\n if node.input(i) == source:\n print \"setting that input\"\n node.setInput(i, target)",
"def _assign_links(self, dest_set, source_set, search_range):\n spl, dpl = [], []\n diag = self.diag\n # while there are particles left to link, link\n while len(dest_set) > 0:\n p = dest_set.pop()\n bc_c = len(p.back_cands)\n # no backwards candidates\n if bc_c == 0:\n # particle will get a new track\n dpl.append(p)\n spl.append(None)\n if diag:\n p.diag['search_range'] = search_range\n continue # do next dest_set particle\n if bc_c == 1:\n # one backwards candidate\n b_c_p = p.back_cands[0]\n # and only one forward candidate\n b_c_p_0 = b_c_p[0]\n if len(b_c_p_0.forward_cands) == 1:\n # schedule these particles for linking\n dpl.append(p)\n spl.append(b_c_p_0)\n source_set.discard(b_c_p_0)\n if diag:\n p.diag['search_range'] = search_range\n continue # do next dest_set particle\n # we need to generate the sub networks\n done_flg = False\n s_sn = set() # source sub net\n d_sn = set() # destination sub net\n # add working particle to destination sub-net\n d_sn.add(p)\n while not done_flg:\n d_sn_sz = len(d_sn)\n s_sn_sz = len(s_sn)\n for dp in d_sn:\n for c_sp in dp.back_cands:\n s_sn.add(c_sp[0])\n source_set.discard(c_sp[0])\n for sp in s_sn:\n for c_dp in sp.forward_cands:\n d_sn.add(c_dp[0])\n dest_set.discard(c_dp[0])\n done_flg = (len(d_sn) == d_sn_sz) and (len(s_sn) == s_sn_sz)\n\n # sort and add in penalty for not linking\n for _s in s_sn:\n # If we end up having to recurse for adaptive search, this final\n # element will be dropped and re-added, because search_range is\n # decreasing.\n _s.forward_cands.sort(key=lambda x: x[1])\n _s.forward_cands.append((None, search_range))\n\n try:\n sn_spl, sn_dpl = self.subnet_linker(s_sn, len(d_sn), search_range,\n max_size=self.max_subnet_size,\n diag=diag)\n\n if diag:\n # Record information about this invocation of the subnet linker.\n for dp in d_sn:\n dp.diag['subnet'] = self.subnet_counter\n dp.diag['subnet_size'] = len(s_sn)\n dp.diag['search_range'] = search_range\n for dp in d_sn - set(sn_dpl):\n # Unclaimed destination particle in subnet\n sn_spl.append(None)\n sn_dpl.append(dp)\n self.subnet_counter += 1\n except SubnetOversizeException:\n if self.adaptive_stop is None:\n raise\n # Reduce search_range\n new_range = search_range * self.adaptive_step\n if search_range <= self.adaptive_stop:\n # adaptive_stop is the search_range below which linking\n # is presumed invalid. So we just give up.\n raise\n\n # Prune the candidate lists of s_sn, d_sn; then recurse.\n for sp in s_sn:\n sp.forward_cands = [fc for fc in sp.forward_cands\n if fc[1] <= new_range]\n for dp in d_sn:\n dp.back_cands = [bc for bc in dp.back_cands\n if bc[1] <= new_range]\n sn_spl, sn_dpl = self._assign_links(\n d_sn, s_sn, new_range)\n\n spl.extend(sn_spl)\n dpl.extend(sn_dpl)\n\n # Leftovers\n for pp in source_set:\n spl.append(pp)\n dpl.append(None)\n\n return spl, dpl",
"def add_source(self, source):\n agents = self.nodes(type=Agent)\n for agent in agents:\n source.connect(whom=agent)",
"def tile_line(self, start_tile, end_tile):\n\t\tdef iround(x):\n\t\t\t\"\"\"iround(number) -> integer\n\t\t\tRound a number to the nearest integer.\"\"\"\n\t\t\treturn int(round(x) - .5) + (x > 0)\n\t\tif start_tile.in_range(end_tile, 1):\n\t\t\treturn [end_tile]\n\t\tline_tiles = []\n\n\t\tx_dist = end_tile.x - start_tile.x\n\t\ty_dist = end_tile.y - start_tile.y\n\n\t\tif abs(x_dist) > abs(y_dist): # x is the independent variable\n\t\t\tslope = float( float(y_dist)/float(x_dist) )\n\t\t\tincrement = 1\n\t\t\tif start_tile.x > end_tile.x:\n\t\t\t\tincrement = -1\n\t\t\tcurrent_x = start_tile.x + increment\n\t\t\tstart_y = start_tile.y\n\t\t\twhile current_x != end_tile.x:\n\t\t\t\tx_off = current_x - start_tile.x\n\t\t\t\tcurrent_y = iround(float(x_off)*slope) + start_y\n\t\t\t\tline_tiles.append(self.tile_at(current_x, current_y))\n\t\t\t\tcurrent_x += increment \n\t\telse: # y is the independent variable\n\t\t\tslope = float( float(x_dist)/float(y_dist) )\n\t\t\tincrement = 1\n\t\t\tif start_tile.y > end_tile.y:\n\t\t\t\tincrement = -1\n\t\t\tcurrent_y = start_tile.y + increment\n\t\t\tstart_x = start_tile.x\n\t\t\twhile current_y != end_tile.y:\n\t\t\t\ty_off = current_y - start_tile.y\n\n\t\t\t\tcurrent_x = iround(float(y_off)*slope) + start_x\n\t\t\t\tline_tiles.append(self.tile_at(current_x, current_y))\n\t\t\t\tcurrent_y += increment \n\t\tline_tiles.append(end_tile)\n\t\treturn line_tiles",
"def make_links(self, node0, node1):\r\n Link(node0, node1)\r\n Link(node1, node0)",
"def fused_with(self, other):\n seg = LineSegment(\n a=self.a,\n b=other.b,\n width=self.width,\n color=self.color,\n start_slant=self.start_slant,\n end_slant=other.end_slant,\n )\n seg.a_left = self.a_left\n seg.a_right = self.a_right\n seg.b_left = other.b_left\n seg.b_right = other.b_right\n return seg",
"def trimJoin_Coro(self):\n offsetLines = []\n moveEnd = yield\n moveStart = yield\n while not(moveStart is None):\n _, point = moveEnd.segmentsIntersect(moveStart, c.ALLOW_PROJECTION)\n moveEnd = l.Line(moveEnd.start, point, moveEnd)\n moveStart = l.Line(point, moveStart.end, moveStart)\n offsetLines.append(moveEnd)\n moveEnd = moveStart\n moveStart = yield\n _, point = moveEnd.segmentsIntersect(offsetLines[0], c.ALLOW_PROJECTION)\n moveEnd = l.Line(moveEnd.start, point, moveEnd)\n offsetLines.append(moveEnd)\n offsetLines[0] = l.Line(point, offsetLines[0].end, offsetLines[0])\n yield offsetLines"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Type of the source. Use of a source_type other than `CUSTOM` for process creation or updating is highly discouraged, and may be restricted in the future without notice.
|
def source_type(self) -> str:
return pulumi.get(self, "source_type")
|
[
"def get_type(self, source: Source):\n return source.type_class",
"def source_type(self) -> str:\r\n return SOURCE_TYPE_GPS",
"def SetSourceType(self, source_type):\n if self.source_type is None:\n self.source_type = source_type",
"def get_source_type(source: str) -> str:\n parsed = urlparse(source)\n if parsed.scheme in [\"http\", \"https\"]:\n return \"url\"\n else:\n return \"filepath\"",
"def source_type_name(self, source_type_name):\n\n self._source_type_name = source_type_name",
"def IsSourceTypeFile(self):\n if not self.source_type:\n return None\n\n return self.source_type == definitions.SOURCE_TYPE_FILE",
"def source_icmp_type(self, source_icmp_type):\n\n self._source_icmp_type = source_icmp_type",
"def source_entity_type(self, source_entity_type):\n if self.local_vars_configuration.client_side_validation and source_entity_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `source_entity_type`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n source_entity_type is not None and len(source_entity_type) < 1):\n raise ValueError(\"Invalid value for `source_entity_type`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._source_entity_type = source_entity_type",
"def get_sources_by_type(self, source_type):\r\n\t\tif not source_type:\r\n\t\t\treturn self.sources\r\n\t\telse:\r\n\t\t\tmeth_name = \"get_%s_sources\" % source_type\r\n\t\t\treturn getattr(self, meth_name)()",
"def source(self, source):\n allowed_values = [\"None\", \"Embedded\", \"File\", \"Template\"]\n if source not in allowed_values:\n raise ValueError(\n \"Invalid value for `source` ({0}), must be one of {1}\"\n .format(source, allowed_values)\n )\n\n self._source = source",
"def __init__(self,\n source_type: str,\n *,\n git: 'ExternalSourceGit' = None) -> None:\n self.source_type = source_type\n self.git = git",
"def inputSchemaType(self):\n return self.sourceType + \"_schema\"",
"def value_type(self):\n return self.identifier.source",
"def get_service_type_from_source_type(source_type: str) -> ServiceType:\n\n return _get_service_type_from(_clean(source_type))",
"def is_source(self) -> bool:\n return self.kind is SlotType.SOURCE",
"def target_type(self) -> pulumi.Input['ResourceSpecificLoggingTargetType']:\n return pulumi.get(self, \"target_type\")",
"def create_app_source(cls, request, pipeline):\n\n requested_source = request.get(\"source\", {})\n requested_source_type = requested_source.get(\"type\", None)\n requested_source_class = requested_source.get(\"class\", None)\n source_class = None\n\n if ((requested_source_type == \"application\") and requested_source_class):\n for source_class in AppSource.__subclasses__():\n if (source_class.__name__ == requested_source_class):\n break\n else:\n source_class = None\n\n if source_class:\n try:\n return source_class(request, pipeline)\n except Exception as error:\n raise Exception(\"Error Creating App Source: {},\"\n \"Exception: {} {}\".format(requested_source_class,\n type(error),\n error)) from error\n return None",
"def type(self):\n return self.recipe_settings[\"type\"]",
"def _source(self) -> Source:\n pass",
"def run_type(self) -> Optional[str]:\n return pulumi.get(self, \"run_type\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests whether ``put_referenced_message_into`` works as intended.
|
def test__put_referenced_message_into():
message_id_0 = 202305010029
channel_id_0 = 202305010030
guild_id_0 = 202305010031
content_0 = 'Rot in hell'
message_id_1 = 202305010032
channel_id_1 = 202305010033
guild_id_1 = 202305010034
content_1 = 'Afraid'
message_0 = Message.precreate(
message_id_0,
channel_id = channel_id_0,
guild_id = guild_id_0,
content = content_0,
)
message_1 = Message.precreate(
message_id_1,
channel_id = channel_id_1,
guild_id = guild_id_1,
content = content_1,
referenced_message = message_0
)
for input_value, defaults, recursive, message_type, expected_output in (
(
None,
False,
False,
MessageType.default,
{},
), (
None,
True,
True,
MessageType.default,
{},
), (
message_0,
False,
False,
MessageType.default,
{'message_reference': message_0.to_message_reference_data()},
), (
message_0,
False,
True,
MessageType.inline_reply,
{
'message_reference': message_0.to_message_reference_data(),
'referenced_message': message_0.to_data(include_internals = True, recursive = True),
},
), (
message_1,
False,
False,
MessageType.default,
{'message_reference': message_1.to_message_reference_data()},
), (
message_1,
False,
False,
MessageType.inline_reply,
{'message_reference': message_1.to_message_reference_data()},
), (
message_1,
False,
True,
MessageType.inline_reply,
{
'message_reference': message_1.to_message_reference_data(),
'referenced_message': message_1.to_data(include_internals = True, recursive = True),
},
)
):
output = put_referenced_message_into(
input_value, {}, defaults, recursive = recursive, message_type = message_type
)
vampytest.assert_eq(output, expected_output)
|
[
"def is_referenced(target):",
"def test_publish_message(self):\n pass",
"def test_fk_ref_preservation(self):\n self.login()\n article = TestArticle(\n slug = 'article',\n title = 'Title',\n status = PUBLISHED_STATE\n )\n article.save()\n ref_thing = FKReferencingThing(ref=article)\n ref_thing.save()\n # Make a draft copy.\n response = self.client.post(\n self.get_admin_url('copy', article),\n {'id': article.pk}\n )\n draft_copy = TestArticle.objects.draft_copies()[0]\n # Merge it back.\n response = self.client.post(\n self.get_admin_url('merge', draft_copy),\n {'id': article.pk}\n )\n # Verify refs are preserved.\n articles = TestArticle.objects.all()\n ref_thing = FKReferencingThing.objects.all()[0]\n self.assertEqual(len(articles), 1)\n self.assertEqual(articles[0], ref_thing.ref)",
"def test_m2m_ref_preservation(self):\n \n self.login()\n article = TestArticle(\n slug = 'article',\n title = 'Title',\n status = PUBLISHED_STATE\n )\n article.save()\n ref_thing = M2MReferencingThing()\n ref_thing.save()\n ref_thing.ref.add(article)\n self.assertTrue(article.m2mreferencingthing_set.all()[0] == ref_thing)\n # Make a draft copy.\n response = self.client.post(\n self.get_admin_url('copy', article),\n {'id': article.pk}\n )\n draft_copy = TestArticle.objects.draft_copies()[0]\n self.assertFalse(bool(draft_copy.m2mreferencingthing_set.all()))\n # Merge it back.\n response = self.client.post(\n self.get_admin_url('merge', draft_copy),\n {'id': article.pk}\n )\n # Verify refs are preserved.\n articles = TestArticle.objects.all()\n ref_thing = M2MReferencingThing.objects.all()[0]\n self.assertEqual(len(articles), 1)\n self.assertEqual(articles[0], ref_thing.ref.all()[0])",
"async def test_duplicate_forwardmsg_caching(self):\n with patch_config_options({\"global.minCachedMessageSize\": 0}):\n await self.runtime.start()\n\n client = MockSessionClient()\n session_id = self.runtime.create_session(\n client=client, user_info=MagicMock()\n )\n\n msg1 = create_dataframe_msg([1, 2, 3], 1)\n\n # Send the message, and read it back. It will not have been cached.\n self.enqueue_forward_msg(session_id, msg1)\n await self.tick_runtime_loop()\n\n uncached = client.forward_msgs.pop()\n self.assertEqual(\"delta\", uncached.WhichOneof(\"type\"))\n\n # Send an equivalent message. This time, it should be cached,\n # and a \"hash_reference\" message should be received instead.\n msg2 = create_dataframe_msg([1, 2, 3], 123)\n self.enqueue_forward_msg(session_id, msg2)\n await self.tick_runtime_loop()\n\n cached = client.forward_msgs.pop()\n self.assertEqual(\"ref_hash\", cached.WhichOneof(\"type\"))\n # We should have the *hash* of msg1 and msg2:\n self.assertEqual(msg1.hash, cached.ref_hash)\n self.assertEqual(msg2.hash, cached.ref_hash)\n # And the same *metadata* as msg2:\n self.assertEqual(msg2.metadata, cached.metadata)",
"def test_put_merges_with_payload(self, call_mock, merge_mock):\n client.put(self.req_ctx, self.url, self.payload)\n merge_mock.assert_called_once_with(mock.ANY, mock.ANY, self.payload)",
"def test_generic_ref_preservation(self):\n self.login()\n article = TestArticle(\n slug = 'article',\n title = 'Title',\n status = PUBLISHED_STATE\n )\n article.save()\n ref_thing = GenericReferencingThing(content_object=article)\n ref_thing.save()\n # Make a draft copy.\n response = self.client.post(\n self.get_admin_url('copy', article),\n {'id': article.pk}\n )\n draft_copy = TestArticle.objects.draft_copies()[0]\n # Merge it back.\n response = self.client.post(\n self.get_admin_url('merge', draft_copy),\n {'id': article.pk}\n )\n # Verify refs are preserved.\n articles = TestArticle.objects.all()\n self.assertEqual(len(articles), 1)\n ref_thing = GenericReferencingThing.objects.all()[0]\n self.assertEqual(ref_thing.content_object, articles[0])",
"def test_msg_relationship(self):\n\n self.assertEqual(self.u, self.msg.user)\n self.assertEqual(len(self.u.messages), 1)\n\n msg2 = Message(\n text=\"Some random text again\",\n )\n\n self.u.messages.append(msg2)\n db.session.commit()\n\n # Can you test that self.u.messages is a list containing self.msg and msg2?\n self.assertEqual(len(self.u.messages), 2)\n self.assertIn(self.msg, self.u.messages)\n self.assertIn(msg2, self.u.messages)",
"def test_update_ref():",
"def test_add_jobs_message_idempotency(self):\n message_count = Message.objects.count()\n TRANSACTION_BODY['blockNumber'] += 1 # New unique message\n success_message = {\"type\": \"ETH_TRANSACTION\", \"body\": TRANSACTION_BODY}\n\n async_job = self.shipments[0].asyncjob_set.all()[:1].get()\n url = reverse('job-message', kwargs={'version': 'v1', 'pk': async_job.id})\n\n response = self.client.post(url, json.dumps(success_message), content_type=\"application/json\",\n X_NGINX_SOURCE='internal', X_SSL_CLIENT_VERIFY='SUCCESS',\n X_SSL_CLIENT_DN='/CN=engine.test-internal')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n message_count += 1\n assert Message.objects.count() == message_count\n\n # Send the same message again\n response = self.client.post(url, json.dumps(success_message), content_type=\"application/json\",\n X_NGINX_SOURCE='internal', X_SSL_CLIENT_VERIFY='SUCCESS',\n X_SSL_CLIENT_DN='/CN=engine.test-internal')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n assert Message.objects.count() == message_count",
"def test_put_merges_no_payload(self, call_mock, merge_mock):\n client.put(self.req_ctx, self.url)\n merge_mock.assert_called_once_with(mock.ANY, mock.ANY, None)",
"def test_forwarder(self, message=None):\n pass",
"def test_newReference(self):\n yield self._addAttachment(u\"home1\", \"calendar1\", \"1.4.ics\", \"1.4\", \"attach_1_4.txt\")\n\n txn = self._sqlCalendarStore.newTransaction()\n\n home = (yield txn.calendarHomeWithUID(u\"home1\"))\n calendar = (yield home.calendarWithName(\"calendar1\"))\n event4 = (yield calendar.calendarObjectWithName(\"1.4.ics\"))\n event5 = (yield calendar.calendarObjectWithName(\"1.5.ics\"))\n\n dattachment = (yield DropBoxAttachment.load(txn, \"1.4.dropbox\", \"attach_1_4.txt\"))\n self.assertNotEqual(dattachment, None)\n self.assertTrue(dattachment._path.exists())\n mattachment = (yield dattachment.convertToManaged())\n self.assertNotEqual(mattachment, None)\n self.assertNotEqual(mattachment.managedID(), None)\n\n mnew4 = (yield mattachment.newReference(event4._resourceID))\n self.assertNotEqual(mnew4, None)\n self.assertEqual(mnew4.managedID(), mattachment.managedID())\n\n mnew5 = (yield mattachment.newReference(event5._resourceID))\n self.assertNotEqual(mnew5, None)\n self.assertEqual(mnew5.managedID(), mattachment.managedID())\n\n yield txn.commit()\n\n # Managed attachment present\n txn = self._sqlCalendarStore.newTransaction()\n mtest4 = (yield ManagedAttachment.load(txn, event4._resourceID, mnew4.managedID()))\n self.assertNotEqual(mtest4, None)\n self.assertTrue(mtest4.isManaged())\n self.assertEqual(mtest4._objectResourceID, event4._resourceID)\n yield txn.commit()\n\n # Managed attachment present\n txn = self._sqlCalendarStore.newTransaction()\n mtest5 = (yield ManagedAttachment.load(txn, event5._resourceID, mnew5.managedID()))\n self.assertNotEqual(mtest5, None)\n self.assertTrue(mtest5.isManaged())\n self.assertEqual(mtest5._objectResourceID, event5._resourceID)\n yield txn.commit()",
"def test_upload_reference_is_working_properly_for_image_files(self):\n # upload an image file as a reference\n with open(self.test_image_path) as f:\n link = self.test_media_manager.upload_reference(\n self.test_task2, f, 'test_image.png'\n )\n\n # now expect the return_val to be a Link instance\n self.assertIsInstance(link, Link)\n\n # check if it is in the given tasks references\n self.assertIn(link, self.test_task2.references)\n\n # expect the Link full_path to be:\n # Task.path/References/Stalker_Pyramid/\n self.assertEqual(\n os.path.dirname(link.full_path),\n os.path.join(\n self.test_task2.path,\n 'References/Stalker_Pyramid'\n )\n )\n\n # and the file exists there\n self.assertTrue(\n os.path.exists(\n os.path.join(\n self.test_repository.path,\n link.full_path\n )\n )\n )\n\n # expect the Link.thumbnail.full_path to be in\n # Task.path/References/Stalker_Pyramid/ForWeb/\n self.assertEqual(\n os.path.dirname(link.thumbnail.full_path),\n os.path.join(\n self.test_task2.path,\n 'References/Stalker_Pyramid/ForWeb'\n )\n )\n\n # and the file exists there\n self.assertTrue(\n os.path.exists(\n os.path.join(\n self.test_repository.path,\n link.thumbnail.full_path\n )\n )\n )\n\n # and expect the Link.thumbnail.thumbnail.full_path to be in\n # Task.path/References/Stalker_Pyramid/Thumbnail/\n self.assertEqual(\n os.path.dirname(link.thumbnail.thumbnail.full_path),\n os.path.join(\n self.test_task2.path,\n 'References/Stalker_Pyramid/Thumbnail'\n )\n )\n\n # and the file exists there\n self.assertTrue(\n os.path.exists(\n os.path.join(\n self.test_repository.path,\n link.thumbnail.thumbnail.full_path\n )\n )\n )",
"def _handle_message(self, generic_message):\n info = self._sync_callbacks.get( # pylint: disable=protected-access\n generic_message.correlation_id\n )\n if info is not None:\n info._queue.put(generic_message) # pylint: disable=protected-access\n elif self._local_message_queue is not None: # pylint: disable=protected-access\n self._local_message_queue.put(generic_message) # pylint: disable=protected-access\n else:\n self._process_message(generic_message) # pylint: disable=protected-access",
"def test_assign_existing_reference(self):\n with pulse.build() as sched_x1:\n pulse.play(pulse.Constant(100, 0.1), pulse.DriveChannel(0))\n\n with pulse.build() as sched_y1:\n pulse.play(pulse.Constant(100, 0.2), pulse.DriveChannel(0))\n\n with pulse.build() as sched_z1:\n pulse.call(sched_x1, name=\"conflict_name\")\n\n with self.assertRaises(pulse.exceptions.PulseError):\n sched_z1.assign_references({(\"conflict_name\",): sched_y1})",
"async def test_forwardmsg_hashing(self):\n await self.runtime.start()\n\n client = MockSessionClient()\n session_id = self.runtime.create_session(client=client, user_info=MagicMock())\n\n # Create a message and ensure its hash is unset; we're testing\n # that _send_message adds the hash before it goes out.\n msg = create_dataframe_msg([1, 2, 3])\n msg.ClearField(\"hash\")\n self.enqueue_forward_msg(session_id, msg)\n await self.tick_runtime_loop()\n\n received = client.forward_msgs.pop()\n self.assertEqual(populate_hash_if_needed(msg), received.hash)",
"def at_message_send(self, message, to_object):\r\n pass",
"def publish_message(self, message, queue):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will Navigate to the home screen of the device
|
def navigate_home_mobile(self):
if config.desiredCapabilities_mobile['platformName'] == 'android':
self.mob_conn.press_keycode(187)
elif config.desiredCapabilities_mobile['platformName'] == 'ios':
pressHome = {"name": "home"}
# self.mob_conn.execute_script("mobile: pressButton", pressHome)
self.mob_conn.execute_script("seetest:client.deviceAction(\"Home\")")
return self
|
[
"def click_home(self):\n self.find_element_by_xpath(self.home_xpath).click()",
"def set_home(self):\n print(\"Setting home position.\")\n self._command('2H HERE X Y')",
"def go_home(self):\n if self.home_url is not None:\n self.set_url(self.home_url)",
"def _returnhome(self) -> None:\n call([self.adbpath, \"shell\", \"input\", \"keyevent\", Adbkeycodes.KEYCODE_HOME])",
"def home(self):\n self.window.show_view(Menu())",
"def home_callback(self):\n return self.send_command('/keypress/home')",
"def __load_app_first_screen_welcome(self):\n self.fc.reset_app()\n self.driver.wdvr.start_activity(self.pkg_name, LAUNCH_ACTIVITY.SMART)\n self.driver.wait_for_context(WEBVIEW_CONTEXT.SMART, timeout=20)\n self.web_welcome.verify_welcome_screen()",
"def nav_home(self):\n self.br.open(\"http://news.ycombinator.com/\")",
"def go_to_main(self):\n\n self.model.current_screen = \"Main\"",
"def _bot_go_home():\n bot.ts3conn.clientmove(bots_home, int(bot.ts3conn.whoami()[\"client_id\"])) #bot.default_channel",
"def open_recent_apps(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # params = {\"element\": element, \"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: swipe\", params)",
"def open_menu_mobile(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(82)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # pressBack = {\"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: pressButton\", pressBack)\n return self",
"def come_home(self, device):\n self.devices_home.append(device)",
"def refresh_home(self):\r\n\t\tself.home.set_view(self._app_data.users.my_jobs_at_a_glance)",
"def navigate():\n FusionUIBase.navigate_to_section(SectionType.SETTINGS)",
"def _open_homepage(self):\r\n if(self.web_browser_name == \"ie\"):\r\n self.driver = webdriver.Ie()\r\n elif(self.web_browser_name == \"chrome\"):\r\n self.driver = webdriver.Chrome()\r\n elif(self.web_browser_name == \"ff\"):\r\n self.driver = webdriver.Firefox()\r\n \r\n self.driver.maximize_window()\r\n self.driver.get(self.myrta_homepage)\r\n time.sleep(self.action_wait_time)\r\n booking_btn = self.driver.find_element_by_link_text('Manage booking');\r\n booking_btn.click();\r\n time.sleep(self.action_wait_time)",
"async def site_home_alias(self, ctx: Context) -> None:\n await self.invoke(ctx, \"site home\")",
"def back_to_home_gui(self):\n self.forget_non_home_gui()\n self.seeds_path.set(\"\")\n self.initilize_gui()",
"def press_home():\n\tif pygame.key.get_pressed()[pygame.K_HOME]:\n\t\treturn True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will go to the previous screen in the mobile device
|
def go_back_mobile(self):
if config.desiredCapabilities_mobile['platformName'] == 'android':
self.mob_conn.press_keycode(4)
elif config.desiredCapabilities_mobile['platformName'] == 'ios':
self.mob_conn.back()
return self
|
[
"def go_previous_page(self):\n\n self.webView.back()",
"def go_back():\n pyautogui.moveTo(100, 200)\n pyautogui.click()",
"def to_prev_screen(self) -> None:\n if self.game_mode == 'comp' and self.num_players == 2:\n self.reset_num_screen()\n self.parent.current = 'menu'\n elif self.game_mode == 'game' or (self.game_mode == 'comp' and self.num_players > 2):\n self.reset_num_screen()\n self.parent.current = 'number'\n elif self.game_mode == 'solo':\n self.reset_goal_screen()\n self.parent.current = 'goal'\n self.clear_widgets(self.children[:-2])",
"def back( self ):\n super( ProbesScreen, self ).back()\n\n #self._current_option = self._current_option - 1\n #if self._current_option < 0:\n #self._current_option = 0",
"def back(self):\n self.driver.back()",
"def back(self):\n self._command(\"goBack\")",
"def cmd_to_prev_screen(self):\r\n return self.toScreen(\r\n (self.screens.index(self.currentScreen) - 1) % len(self.screens)\r\n )",
"def turn_to_previous_page(self):\r\n if service.help_pages_img.index(self.__current_page) == 0:\r\n pass\r\n else:\r\n self.__current_page = service.help_pages_img[service.help_pages_img.index(self.__current_page) - 1]\r\n change_img(self, self.__current_page)",
"def go_back(self, event):\n self.controller.show_frame(TkMainMenu)",
"def media_previous_track(self) -> None:\n self.send_keypress(KEY_BACKWARD)",
"def media_previous_track(self) -> None:\n self.send_command([\"button\", \"rew\"])",
"def goto_prevpage(self):\n if self.n_page > 0:\n self.n_page -= 1\n self.current_page = self.document.get_page(self.n_page)\n self.Refresh()\n\n else:\n print 'Already at first page'\n return",
"def history_back(state):\n\n state.nav.undo_step()",
"def moveToPrevious(self):\n pass",
"def backWidget(self):\n self.setWidget('start')",
"def __switch_to_prev_page(self):\n if self._ChannelStripController__can_switch_to_prev_page():\n if (self._ChannelStripController__assignment_mode == CSM_PLUGINS):\n self._ChannelStripController__plugin_mode_offsets[self._ChannelStripController__plugin_mode] -= len(self._ChannelStripController__channel_strips)\n if (self._ChannelStripController__plugin_mode == PCM_DEVICES):\n self._ChannelStripController__update_vpot_leds_in_plugins_device_choose_mode()\n elif (self._ChannelStripController__assignment_mode == CSM_SENDS):\n self._ChannelStripController__send_mode_offset -= len(self._ChannelStripController__channel_strips)\n self._ChannelStripController__reassign_channel_strip_parameters(for_display_only=False)\n self._ChannelStripController__update_channel_strip_strings()\n self._ChannelStripController__update_page_switch_leds()\n self.request_rebuild_midi_map()",
"def on_page_up(event: tk.Event) -> None:\n go_prev()",
"def navigate_home_mobile(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n pressHome = {\"name\": \"home\"}\n # self.mob_conn.execute_script(\"mobile: pressButton\", pressHome)\n self.mob_conn.execute_script(\"seetest:client.deviceAction(\\\"Home\\\")\")\n return self",
"def test_10_verify_back_btn(self):\n\n self.__load_app_first_screen_welcome()\n self.web_welcome.click_manage_options()\n self.privacy_preference.verify_privacy_preference_screen()\n self.privacy_preference.click_back_btn()\n self.web_welcome.verify_welcome_screen()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will change the orientation of the the screen to LandscapeView
|
def change_orientation_landscape(self):
self.mob_conn.orientation = 'LANDSCAPE'
return self
|
[
"def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width",
"def change_orientation_portrait(self):\n\n self.mob_conn.orientation = 'PORTRAIT'\n return self",
"def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width",
"def set_horizontal(self) -> None:\n self.orientation = constants.HORIZONTAL",
"def set_orientation(self, value):\n self._selenium_web_driver().orientation = value.upper()",
"def landscape(self):\n lscape = self._lscape_ref()\n return lscape",
"def set_landscape_parameters(self, landscape, params):",
"def switchLayoutDirection(self):\n if self.layoutDirection() == QtCore.Qt.LeftToRight:\n QtGui.qApp.setLayoutDirection(QtCore.Qt.RightToLeft)\n else:\n QtGui.qApp.setLayoutDirection(QtCore.Qt.LeftToRight)",
"def clear_landscape(self):\n self.__landscaped = False\n self.__landscape_graph = None\n self.__landscape_data = LandscapeData()",
"def set_orientation(self, z_axis=(0,0,1), x_axis=(1,0,0)):\n self.antenna.set_orientation(z_axis=z_axis, x_axis=x_axis)",
"def landscape(frame):\n\n if len(frame.shape) == 2:\n return np.flipud( np.fliplr( frame.T ) )\n elif len(frame.shape) == 3:\n return np.transpose(frame, (0, 2, 1) )[:, ::-1, ::-1]\n else:\n print \"landscape: data layout not understood\"\n print \"frame.shape: \", frame.shape\n return frame",
"def get_orientation(self):\n if -4.9 < accelerometer.acceleration[0] < 4.9:\n self.orientation = 0\n else:\n self.orientation = 1",
"def westView(self):\n self.changeStyleDynamic(self.ui.telescopeView, 'running', False)\n self.camera.setViewCenter(QVector3D(0.0, 1.5, 0.0))\n self.camera.setPosition(QVector3D(-5.0, 1.5, 0.0))\n self.camera.setUpVector(QVector3D(0.0, 1.0, 0.0))\n return True",
"def lock_orientation(self, orientation):\n q = Quaternion()\n q.x, q.y, q.z, q.w = orientation.GetQuaternion()\n self.__lock_orientation_pub.publish(q);",
"def set_vertical(self) -> None:\n self.orientation = constants.VERTICAL",
"def toggle_aspect_ratio(self):\n self.pres_aspect_ratio = self.aspect_ratio.isChecked()",
"def on_change_orientation(self):\n detector, _, position = self.get_current_detector()\n if detector is None:\n return\n #Change x coordinate\n x_orientation = self.x_orientation_tcl.GetValue().lstrip().rstrip()\n if x_orientation == \"\" or x_orientation == str(None):\n x_orientation = None\n detector.orientation.x = x_orientation\n else:\n if check_float(self.x_orientation_tcl):\n if detector.orientation.x != float(x_orientation):\n self._notes += \"Change x of orientation from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.x,\n x_orientation)\n detector.orientation.x = float(x_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation \"\n self._notes += \"'s x won't changes x orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.x,\n x_orientation)\n #Change y coordinate\n y_orientation = self.y_orientation_tcl.GetValue().lstrip().rstrip()\n if y_orientation == \"\" or y_orientation == str(None):\n y_orientation = None\n detector.orientation.y = y_orientation\n else:\n if check_float(self.y_orientation_tcl):\n if detector.orientation.y != float(y_orientation):\n self._notes += \"Change y of orientation from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.y,\n y_orientation)\n detector.orientation.y = float(y_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation's \"\n self._notes += \" y won't changes y orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.y,\n y_orientation)\n #Change z coordinate\n z_orientation = self.z_orientation_tcl.GetValue().lstrip().rstrip()\n if z_orientation == \"\" or z_orientation == str(None):\n z_orientation = None\n detector.orientation.z = z_orientation\n else:\n if check_float(self.z_orientation_tcl):\n if detector.orientation.z != float(z_orientation):\n self._notes += \"Change z of offset from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.z,\n z_orientation)\n detector.orientation.z = float(z_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation 's\"\n self._notes += \" x won't changes z orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.z,\n z_orientation)\n #change the orientation unit\n unit = self.orientation_unit_tcl.GetValue().lstrip().rstrip()\n if detector.orientation_unit != unit:\n self._notes += \" Change orientation's unit from \"\n self._notes += \"%s to %s\" % (detector.orientation_unit, unit)\n\n self.detector_cbox.SetString(position, str(detector.name))\n self.detector_cbox.SetClientData(position, detector)\n self.detector_cbox.SetStringSelection(str(detector.name))",
"def setOutwardOrientation(tag):\n ierr = c_int()\n lib.gmshModelMeshSetOutwardOrientation(\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetOutwardOrientation returned non-zero error code: \",\n ierr.value)",
"def test_change_orientation_W(self):\n\t\torientation = 'S'\n\t\tspin = 'R'\n\t\tres = marsRover.changeOrientation(orientation, spin)\n\t\tself.assertEqual(res, 'W')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will change the orientation of the the screen to PortraitView
|
def change_orientation_portrait(self):
self.mob_conn.orientation = 'PORTRAIT'
return self
|
[
"def change_orientation_landscape(self):\n\n self.mob_conn.orientation = 'LANDSCAPE'\n return self",
"def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width",
"def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width",
"def set_orientation(self, value):\n self._selenium_web_driver().orientation = value.upper()",
"def set_vertical(self) -> None:\n self.orientation = constants.VERTICAL",
"def decide_orientation(width, height):\n if width < height:\n return Orientation.HORIZONTAL\n elif width > height:\n return Orientation.VERTICAL\n else:\n return Orientation.HORIZONTAL if random.randint(0, 1) == 0 else Orientation.VERTICAL",
"def set_orientation(self, z_axis=(0,0,1), x_axis=(1,0,0)):\n self.antenna.set_orientation(z_axis=z_axis, x_axis=x_axis)",
"def toggle_aspect_ratio(self):\n self.pres_aspect_ratio = self.aspect_ratio.isChecked()",
"def set_landscape_parameters(self, landscape, params):",
"def get_orientation(self):\n if -4.9 < accelerometer.acceleration[0] < 4.9:\n self.orientation = 0\n else:\n self.orientation = 1",
"def on_change_orientation(self):\n detector, _, position = self.get_current_detector()\n if detector is None:\n return\n #Change x coordinate\n x_orientation = self.x_orientation_tcl.GetValue().lstrip().rstrip()\n if x_orientation == \"\" or x_orientation == str(None):\n x_orientation = None\n detector.orientation.x = x_orientation\n else:\n if check_float(self.x_orientation_tcl):\n if detector.orientation.x != float(x_orientation):\n self._notes += \"Change x of orientation from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.x,\n x_orientation)\n detector.orientation.x = float(x_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation \"\n self._notes += \"'s x won't changes x orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.x,\n x_orientation)\n #Change y coordinate\n y_orientation = self.y_orientation_tcl.GetValue().lstrip().rstrip()\n if y_orientation == \"\" or y_orientation == str(None):\n y_orientation = None\n detector.orientation.y = y_orientation\n else:\n if check_float(self.y_orientation_tcl):\n if detector.orientation.y != float(y_orientation):\n self._notes += \"Change y of orientation from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.y,\n y_orientation)\n detector.orientation.y = float(y_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation's \"\n self._notes += \" y won't changes y orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.y,\n y_orientation)\n #Change z coordinate\n z_orientation = self.z_orientation_tcl.GetValue().lstrip().rstrip()\n if z_orientation == \"\" or z_orientation == str(None):\n z_orientation = None\n detector.orientation.z = z_orientation\n else:\n if check_float(self.z_orientation_tcl):\n if detector.orientation.z != float(z_orientation):\n self._notes += \"Change z of offset from \"\n self._notes += \"%s to %s \\n\" % (detector.orientation.z,\n z_orientation)\n detector.orientation.z = float(z_orientation)\n else:\n self._notes += \"Error: Expected a float for the orientation 's\"\n self._notes += \" x won't changes z orientation from \"\n self._notes += \"%s to %s\" % (detector.orientation.z,\n z_orientation)\n #change the orientation unit\n unit = self.orientation_unit_tcl.GetValue().lstrip().rstrip()\n if detector.orientation_unit != unit:\n self._notes += \" Change orientation's unit from \"\n self._notes += \"%s to %s\" % (detector.orientation_unit, unit)\n\n self.detector_cbox.SetString(position, str(detector.name))\n self.detector_cbox.SetClientData(position, detector)\n self.detector_cbox.SetStringSelection(str(detector.name))",
"def yourportrait():\n\n return render_template(\n 'your_portrait.html',\n your_portraitActive='active'\n )",
"def set_horizontal(self) -> None:\n self.orientation = constants.HORIZONTAL",
"def landscape(frame):\n\n if len(frame.shape) == 2:\n return np.flipud( np.fliplr( frame.T ) )\n elif len(frame.shape) == 3:\n return np.transpose(frame, (0, 2, 1) )[:, ::-1, ::-1]\n else:\n print \"landscape: data layout not understood\"\n print \"frame.shape: \", frame.shape\n return frame",
"def setOutwardOrientation(tag):\n ierr = c_int()\n lib.gmshModelMeshSetOutwardOrientation(\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetOutwardOrientation returned non-zero error code: \",\n ierr.value)",
"def rotate(self):\r\n self.width, self.height = self.height, self.width",
"def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return",
"def test_portrait_check():\n portrait_angles = [90, 270, -90]\n landscape_angles = [0, 180, -180, 360]\n\n for angle in portrait_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_portrait_frame()\n assert not compass.is_landscape_frame()\n\n for angle in landscape_angles:\n compass = orientation.Compass()\n compass.set_angle(angle)\n assert compass.is_landscape_frame()\n assert not compass.is_portrait_frame()",
"def set_orient(self, new_orient):\n self[:2, :2] = new_orient"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will open the menu/app_drawer of the device (only for Android)
|
def open_menu_mobile(self):
if config.desiredCapabilities_mobile['platformName'] == 'android':
self.mob_conn.press_keycode(82)
# elif config.desiredCapabilities_mobile['platformName'] == 'ios':
# pressBack = {"name": "back"}
# self.mob_conn.execute_script("mobile: pressButton", pressBack)
return self
|
[
"def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()",
"def open_admin_side_menu(self):\n self.click_on_element_by_css(adpl.ADMIN_SIDE_NAVIGATION_MENU)",
"def menu(self):\n self.parent.switch_screen(\"Menu\")",
"def setMenuMode(string):\n pass",
"def click_menu(self):\n pass",
"def open_recent_apps(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # params = {\"element\": element, \"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: swipe\", params)",
"def accessoriesMenu():\n pref = QtGui.QAction(mw)\n pref.setText(\"Command panel\")\n pref.setObjectName(\"CommandPanel\")\n pref.triggered.connect(onPreferences)\n try:\n import AccessoriesMenu\n AccessoriesMenu.addItem(\"CommandPanel\")\n except ImportError:\n a = mw.findChild(QtGui.QAction, \"AccessoriesMenu\")\n if a:\n a.menu().addAction(pref)\n else:\n mb = mw.menuBar()\n action = QtGui.QAction(mw)\n action.setObjectName(\"AccessoriesMenu\")\n action.setIconText(\"Accessories\")\n menu = QtGui.QMenu()\n action.setMenu(menu)\n menu.addAction(pref)\n\n def addMenu():\n \"\"\"Add accessories menu to the menu bar.\"\"\"\n mb.addAction(action)\n action.setVisible(True)\n\n addMenu()\n mw.workbenchActivated.connect(addMenu)",
"def on_action_open(self, content):\n self.widget().open()",
"def home(self):\n self.window.show_view(Menu())",
"def open_menu(menu, on_open, widgets, args, kwargs, timeout=0):\n\n def internal_on_open(on_open, widgets, windows, args, kwargs):\n dialog = [w for w in Gtk.Window.list_toplevels() if w\n not in windows and w.get_mapped()]\n if not dialog:\n # Will try again after same timeout or idle\n return True\n\n dialog = dialog[0]\n\n for name in widgets:\n if not get_widget_by_name(name, dialog):\n # Wrong dialog\n return True\n\n params = tuple([dialog] + [get_widget_by_name(name, dialog)\n for name in widgets])\n apply(on_open, params + args, kwargs)\n\n windows = Gtk.Window.list_toplevels()\n if timeout == 0:\n GObject.idle_add(lambda: internal_on_open(on_open, widgets,\n windows, args, kwargs))\n else:\n GObject.timeout_add(timeout, lambda: internal_on_open(\n on_open, widgets, windows, args, kwargs))\n GPS.Menu.get(menu).action.execute_if_possible()",
"def mode_start(self, **kwargs):\n self.add_mode_event_handler(\"show_mainmenu\", self.show_menu)",
"def open_menu_section(self, url):\n if url == ADMIN_USERS:\n self.click_on_element_by_css(adpl.DASHBOARD_USERS)\n elif url == ADMIN_USERS_STATUSES:\n self.click_on_element_by_css(adpl.DASHBOARD_USERS_STATUSES)\n elif url == ADMIN_ROLE_REQUESTS:\n self.click_on_element_by_css(adpl.DASHBOARD_ROLE_REQUESTS)\n elif url == ADMIN_LOGS:\n self.click_on_element_by_css(adpl.DASHBOARD_LOGS)\n self.wait.until(base_page.EC.url_to_be(url))",
"def open_door(self):\n if not self.door_open:\n self.do(2, \"Opening door\")\n self.door_open = True",
"def _open_device(self):\n pass",
"def browser_menu():\n def on_setup_menus(browser):\n \"\"\"\n on browser setupMenus was called\n \"\"\"\n # main menu\n menu = browser.form.menubar.addMenu(\"Import Video\")\n\n # import\n action = QAction(APP_ICON, _(\"IMPORT_VIDEO\"), mw)\n action.triggered.connect(lambda: show_dialog())\n menu.addAction(action)\n\n # check update\n action = QAction(_('CHECK_UPDATE'), browser)\n action.triggered.connect(lambda: check_updates(background=False, parent=browser))\n menu.addAction(action)\n\n # About\n action = QAction(_('ABOUT'), browser)\n action.triggered.connect(lambda: show_about_dialog(browser))\n menu.addAction(action)\n\n addHook('browser.setupMenus', on_setup_menus)",
"def show_menu(self, item):\n _item = item.get_item()\n menu_items = [\n {'viewclass': 'MDMenuItem', 'text': 'edit',\n 'callback': lambda x: self.app.goto(SCREENS_TYPE.EDIT, task=_item)},\n {'viewclass': 'MDMenuItem', 'text': 'remove',\n 'callback': lambda x: self.remove_task(_item)},\n {'viewclass': 'MDMenuItem', 'text': 'mark as finished',\n 'callback': lambda x: self.mark_as_finished(_item)},\n ]\n MDDropdownMenu(items=menu_items, width_mult=4).open(item)",
"def render_menu(message=None):\n return render_template('menu.html', menu=mode_manager.menu_config, message=message)",
"def open_adobe(self):\n self.driver.start_activity(const.PACKAGE.ADOBE,const.LAUNCH_ACTIVITY.ADOBE, wait_activity=const.PACKAGE.ADOBE + \"*\")\n if self.driver.wait_for_object(\"welcome_screen_exit_button\", timeout=10, raise_e=False):\n self.driver.click(\"welcome_screen_exit_button\")\n if self.has_overlay_ui():\n self.turn_off_overlay_ui_guide()",
"def _on_click(self):\n Log.debug(\"Pressed sliding menu button - {}\".format(self._name))\n get_manager().screen = getattr(Screen, self._name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Hold the element for a duration of time
|
def touch_and_hold_element(self, element, time_duration=3000):
actions = TouchAction(self.mob_conn)
actions.long_press(element, duration=time_duration)
actions.perform()
return self
|
[
"def at_repeat(self):\r\n self.obj.blink()",
"def hold_piece(self):\r\n if self.pieces[2]:\r\n self.pieces[0], self.pieces[2] = self.pieces[2], self.pieces[0]\r\n else:\r\n self.pieces[:3] = [self.pieces[1], Piece(), self.pieces[0]]\r\n\r\n self.pieces[0].reset()\r\n self.pieces[2].hold()\r\n self.hold = False",
"def at_repeat(self):\r\n pass",
"def update_delay(self, delay):",
"def delay(self):\n self.status = Status(1)",
"def decrease_time(self):\n pass",
"def update_timer(self, time):\n self.timer += time",
"def enable(self, timeout):",
"def timeout(self, timeout):\n with self._lock:\n old, self._timeout = self._timeout, timeout\n if timeout < old:\n self._shrink(self.size)",
"def count_down(self):\n self.time -= 1",
"def finish_delay(self):",
"def drop_after_pause():\n actions.sleep(\"1000ms\")\n actions.self.drop()",
"def add_time(self, amount):\n self._time += amount",
"def increase_time(self):\n\n self._alive_time += 1\n self.cell_changed.emit(self._value, self._alive_time) # Signal the change of alive time",
"def onClick2(self, event):\n sleep(int(60 * event.x // self.canvas_size[0]) + 1)",
"def advanceTime(self, amount):\n if self.blocked:\n assert self.workTime == 0\n self.timeWaiting += amount\n else:\n assert self.workTime - amount >= -FLOAT_ERR\n self.workTime = max(self.workTime - amount, 0)\n if self.workTime == 0:\n printHandler(\"I\", self.name, \"finishes a - \", self.currentComponent.name)\n \n if self.workTime == 0:\n oldComponent = self.currentComponent\n workstationUsed = self.placeComponentInBuffer()\n if workstationUsed:\n printHandler(\"I\", self.name, \"places a\", oldComponent.name, 'in', workstationUsed.name)\n self.blocked = False\n self.workOnNextComponent()\n else:\n self.blocked = True",
"def timeouted(self, t):\n self._timeouted = t",
"def progress(self):\n self.remaining_duration -= 1",
"def duration(self, duration):\n self._duration = duration"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will open background apps section
|
def open_recent_apps(self):
if config.desiredCapabilities_mobile['platformName'] == 'android':
self.mob_conn.press_keycode(187)
# elif config.desiredCapabilities_mobile['platformName'] == 'ios':
# params = {"element": element, "name": "back"}
# self.mob_conn.execute_script("mobile: swipe", params)
|
[
"def the_apps_page_load_open_installed_applications(driver):\n if is_element_present(driver, '//mat-ink-bar[@style=\"visibility: visible; left: 0px; width: 183px;\"]') is False:\n assert wait_on_element(driver, 10, '//div[contains(text(),\"Installed Applications\")]', 'clickable')\n driver.find_element_by_xpath('//div[contains(text(),\"Installed Applications\")]').click()\n assert wait_on_element(driver, 7, '//h3[contains(.,\"No Applications Installed\")]')",
"def is_other_app_active(self):\n return self.device.app_display_name not in ('Backdrop', self.dashboard_app_name)",
"def background_app(self, duration):\n self._selenium_web_driver().background_app(duration / 1000.0)",
"def start_app(self):\n subcmd = self.master_config.Global.subcommand\n if subcmd=='create' or subcmd=='list':\n return\n elif subcmd=='start':\n self.start_app_start()\n elif subcmd=='stop':\n self.start_app_stop()",
"def open_adobe(self):\n self.driver.start_activity(const.PACKAGE.ADOBE,const.LAUNCH_ACTIVITY.ADOBE, wait_activity=const.PACKAGE.ADOBE + \"*\")\n if self.driver.wait_for_object(\"welcome_screen_exit_button\", timeout=10, raise_e=False):\n self.driver.click(\"welcome_screen_exit_button\")\n if self.has_overlay_ui():\n self.turn_off_overlay_ui_guide()",
"def get_enabled_apps():\n return spectator_apps.enabled()",
"def openApp(self, app_name):\n time.sleep(2)\n locatorStr = ('//*[@title=\"' + app_name + '\"]')\n self.double_click_object(By.XPATH, locatorStr)",
"def nav_home(self):\n self.br.open(\"http://news.ycombinator.com/\")",
"def background(self):\n pass",
"def home(self):\n self.window.show_view(Menu())",
"def activate_window_desktop(self, window: wrappers.Window) -> Optional[bool]:\n pass",
"def app_activity(ctx, app_name):\n gigalixir_app_activity.get(ctx.obj['host'], app_name)",
"def _launch_app(self, url):\n raise NotImplementedError()",
"def get_enabled_apps():\n return ditto_apps.enabled()",
"def AreAppsConfigured():\n return False",
"async def display_available_apps(ctx):\n # get all available application files.\n description = ''\n for file in os.listdir('cogs'):\n if file.endswith('.py') and not file.startswith('bot'):\n description += f'- {file.replace(\".py\", \"\")}\\n'\n\n await send_embed(ctx, title=get_dev_title(), text=description)",
"def activate():\n ActivityListener()",
"def open(self):\n\t\tfor each in self.sections:\n\t\t\tif each.open() == True:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tpass\n\t\treturn False",
"def send_app_to_background(self, background_time=100):\n self.mob_conn.background_app(background_time)\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will validate of the app is installed in the mobile device or not
|
def validate_app_installed(self, appPackage):
assert self.mob_conn.is_app_installed(
appPackage), f"The Application {appPackage} is not installed in the device."
return self
|
[
"def on_mobile(self):\n ua = get_user_agent(self.request)\n if ua:\n if detect_mobile_browser(ua):\n return True\n else:\n return False\n return False",
"def check_devices(self) -> bool:\n\t\tpass",
"def _has_widevine(self):\n if self._os() == 'Android': # widevine is built in on android\n return True\n else:\n if self._widevine_path():\n self._log('Found Widevine binary at {0}'.format(self._widevine_path().encode('utf-8')))\n return True\n else:\n self._log('Widevine is not installed.')\n return False",
"def test_installed_apps_are_displayed(self):\n app = self.add_application()\n response = self.install_app(app.uuid)\n self.assertIn(b'/launch_app?app_id=%s' % (str(app.uuid)), response.data)",
"def is_dev_installed(self):\n result = False\n r = urllib2.urlopen(self.ecp_address + \"query/apps\").read()\n xml_root = ElementTree.fromstring(r)\n for app in xml_root.getiterator(\"app\"):\n if app.get(\"id\") == \"dev\":\n result = True\n break\n return result",
"def is_application(self):\n\n elements = self.get(CPE.KEY_APP)\n return len(elements) > 0",
"def is_app_installed(self, bundle_id):\n return self._selenium_web_driver().is_app_installed(bundle_id)",
"def is_installed(self) -> bool:\n return True",
"def is_installed(self):\n return False",
"def test_product_installed(self):\n self.assertTrue(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )",
"def AreAppsConfigured():\n return False",
"def GetAppAvailable(self, app):\n return bool(self.wifi.AppAvailable(app) or self.wired.AppAvailable(app))",
"def appNeedsSetup(self, app):\n return app.getLink('setup') and app['configured'] == '0'",
"def check(self):\n chk = begoneads.check.callback()\n if chk:\n messagebox.showinfo(title='Check', message=\"Begoneads IS installed\")\n else:\n messagebox.showinfo(title='Check', message=\"Begoneads NOT installed\")",
"def verify_app_exists(self,app):\n locator=eda_lex_locators[\"eda_settings\"][\"app_tile\"].format(app)\n self.selenium.wait_until_page_contains_element(locator,timeout=60,error=f'{app} did not open in 1 min')",
"def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('rapido.plone'))",
"def is_app_installed(bundle_id: str) -> bool:\n return Seldom.driver.is_app_installed(bundle_id=bundle_id)",
"def _is_system_installed( self ):\n return self._system.test_library(self._library, self._headers)",
"def IsApplicationAvailable(application_name):\n return application_name in GetListOfAvailableApplications()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will start a new activity on the mobile device
|
def start_new_activity_android(self, appPackage, activityName):
self.mob_conn.start_activity(appPackage, activityName)
return self
|
[
"def open_menu_mobile(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(82)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # pressBack = {\"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: pressButton\", pressBack)\n return self",
"def start_app(self, app):\n\t\tif isinstance(app, str):\n\t\t\tpackage_name = app\n\t\telif isinstance(app, App):\n\t\t\tpackage_name = app.get_package_name()\n\t\t\tif app.get_main_activity():\n\t\t\t\tpackage_name = \"/%s\" % app.get_main_activity()\n\t\telse:\n\t\t\tself.logger.warning(\"Unsupported param \" + app + \" with type: \", type(app))\n\t\t\treturn\n\t\tintent = Intent(suffix = package_name)\n\t\tself.send_intent(intent)",
"def open_recent_apps(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n # elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n # params = {\"element\": element, \"name\": \"back\"}\n # self.mob_conn.execute_script(\"mobile: swipe\", params)",
"def start_activity_via_monkey(self, package):\n\t\tcmd = \"monkey\"\n\t\tif package:\n\t\t\tcmd += \" -p %s\" % package\n\t\tout = adb.shell(cmd)\n\t\tif re.search(r\"(Error)|(Cannot find 'App')\", out, re.IGNORECASE | re.MULTILINE):\n\t\t\traise RuntimeError(out)",
"def navigate_home_mobile(self):\n\n if config.desiredCapabilities_mobile['platformName'] == 'android':\n self.mob_conn.press_keycode(187)\n elif config.desiredCapabilities_mobile['platformName'] == 'ios':\n pressHome = {\"name\": \"home\"}\n # self.mob_conn.execute_script(\"mobile: pressButton\", pressHome)\n self.mob_conn.execute_script(\"seetest:client.deviceAction(\\\"Home\\\")\")\n return self",
"def launch(self):\n\t\tif not self.isrunning() and self.AS_appdata.constructor == 'path' \\\n\t\t\t\tand self.AS_appdata.relaunchmode != 'never':\n\t\t\taem.Application.launch(self.AS_appdata.identifier)\n\t\t\tself.AS_appdata.target().reconnect() # make sure aem.Application object's AEAddressDesc is up to date\n\t\telse: # send launch event to app (will error if not already running)\n\t\t\tCommand(self, 'launch', 'ascrnoop', {})()",
"def _launch_app(self, url):\n raise NotImplementedError()",
"def activate():\n ActivityListener()",
"def start_launch(self, context, **kwargs):\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerunOf=self._cfg.rerun_of,\n **kwargs\n )\n if not self._skip_analytics:\n send_event(self.agent_name, self.agent_version)",
"def launch_sync_mobile(*args, **kwargs):\n if not os.path.exists(settings.SYNC_MOBILE_ROOT):\n os.mkdir(settings.SYNC_MOBILE_ROOT)\n\n print('Sync mobile started')\n\n try:\n current_task.update_state(\n state='PROGRESS',\n meta={\n 'name': current_task.name,\n 'current': 5,\n 'total': 100,\n 'infos': _(\"Init sync ...\"),\n }\n )\n sync_mobile_options = {\n 'url': kwargs.get('url'),\n }\n sync_mobile_options.update(settings.SYNC_MOBILE_OPTIONS)\n call_command(\n 'sync_mobile',\n settings.SYNC_MOBILE_ROOT,\n verbosity=2,\n task=current_task,\n **sync_mobile_options\n )\n\n except Exception:\n raise\n\n print('Sync mobile ended')\n\n return {\n 'name': current_task.name,\n }",
"def start_anontunnel_android(self):\n from android import AndroidService\n service = AndroidService('Anonymous downloading Service', 'Anonymous tunnels are running...')\n service.start('Anonymous tunnels service started')\n self.service = service",
"def _KickLauncher(self):\n logging.info('kicking launcher...')\n self.ExecOnDevice([\n 'am',\n 'start',\n '-a',\n 'android.intent.action.MAIN',\n '-c',\n 'android.intent.category.HOME'])\n\n self._kicked_launcher = True",
"def run(self):\n\n runActivity = ''\n runPackage = ''\n for activity in activities:\n if activityaction.has_key(activity) and activityaction[activity] == 'android.intent.action.MAIN':\n if activity[0] == '.':\n runActivity = activity\n runPackage = packageNames[0]\n else:\n for package in packageNames:\n splitAct = activity.split(package)\n if len(splitAct) > 1:\n runActivity = splitAct[1]\n runPackage = package\n break\n else:\n runActivity = splitAct[0]\n runPackage = package\n break\n \n call(['monkeyrunner', 'scripts/monkeyrunner.py', apkName, runPackage, runActivity], stderr=PIPE)\n \n break",
"def __load_app_first_screen_welcome(self):\n self.fc.reset_app()\n self.driver.wdvr.start_activity(self.pkg_name, LAUNCH_ACTIVITY.SMART)\n self.driver.wait_for_context(WEBVIEW_CONTEXT.SMART, timeout=20)\n self.web_welcome.verify_welcome_screen()",
"async def nextlaunch(self, ctx, *args):\n if not can_answer(ctx):\n return\n launches = launchlibrary.Launch.next(api, 1)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchtime_tz = launch.net\n utc = datetime.now(timezone.utc)\n tz = launchtime_tz.tzname()\n T = chop_microseconds(launchtime_tz - utc)\n launchtime = launchtime_tz.replace(tzinfo=None)\n probability = launch.probability\n if probability == -1:\n probabilitystr = \"not available\"\n else:\n probabilitystr = '{0}%'.format(probability)\n msg = ''\n if '-n' in args:\n if can_notify:\n msg = notify(msg, ctx)\n else:\n msg = \"Notifying disabled. \"\n msg += '**__{0}__**\\nNET {1} {2}\\nWeather probability: {3}\\nT- {4}\\n'\n msg = msg.format(launchname, launchtime, tz, probabilitystr, T)\n for arg, formatter in (('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n await send(ctx, msg, args)",
"def launch_app(self):\n self._selenium_web_driver().launch_app()",
"def get_start_intent(self):\n\t\tpackage_name = self.package_name\n\t\tif self.main_activity:\n\t\t\tpackage_name += \"/%s\" % self.main_activity\n\t\treturn Intent(suffix = package_name)",
"def startDevice(self, item, **args):\n dev = self.startDevices(item, **args)\n if len(dev) < 1:\n print \"Error loading device: '%s'\" % item\n else:\n return dev[0]",
"def start_activity(self, app_package, app_activity, app_wait_package=None, app_wait_activity=None,\n intent_action=None, intent_category=None, intent_flags=None,\n optional_intent_arguments=None, stop_app_on_reset=None):\n options = {}\n if app_wait_package is not None:\n options[\"app_wait_package\"] = app_wait_package\n if app_wait_activity is not None:\n options[\"app_wait_activity\"] = app_wait_activity\n if intent_action is not None:\n options[\"intent_action\"] = intent_action\n if intent_category is not None:\n options[\"intent_category\"] = intent_category\n if intent_flags is not None:\n options[\"intent_flags\"] = intent_flags\n if optional_intent_arguments is not None:\n options[\"optional_intent_arguments\"] = optional_intent_arguments\n if stop_app_on_reset is not None:\n options[\"stop_app_on_reset\"] = stop_app_on_reset\n\n self._selenium_web_driver().start_activity(app_package, app_activity, **options)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the appActivity and appPackage of the current running application
|
def current_app_info(self):
app_info = {}
app_activity = self.mob_conn.current_activity
app_package = self.mob_conn.current_package
app_info['current_activity'] = app_activity
app_info['current_package'] = app_package
return app_info
|
[
"def main_activity(self):\n MAIN_ACTIVITY_ACTION = \"android.intent.action.MAIN\"\n\n package = self.package()\n\n for activity in self.activities():\n for intent_filter in activity[\"intent_filters\"]:\n if MAIN_ACTIVITY_ACTION in intent_filter[\"actions\"]:\n return \"{}{}\".format(package, activity[\"name\"].replace(\n package, \"\"))\n\n return None",
"def app_activity(ctx, app_name):\n gigalixir_app_activity.get(ctx.obj['host'], app_name)",
"def current_app(self):\n return self.app",
"def getApplication(self):\r\n return self.app",
"def getApplication():",
"def app_id(self):\n return self._app_id or self._modules['default'].data['application']",
"def getApp() -> SDApplication:\n\tcontext = sd.getContext()\n\treturn context.getSDApplication()",
"def _get_launchable_activity(self) -> str:\n\n activity = ''\n aapt = self._get_appt_output().split('\\n')\n\n for line in aapt:\n if 'launchable-activity' in line:\n # ['launchable-activity: name=', 'com.app.activity', ' label=', 'bob']\n activity = line.split('\\'')[1]\n\n # If we got the activity using aapt, great, return that.\n if activity != '':\n return activity\n\n # if we dont have the activity yet, check out activity aliases\n\n click.secho(('Unable to determine the launchable activity using aapt, trying '\n 'to manually parse the AndroidManifest for activity aliases...'), dim=True, fg='yellow')\n\n # Try and parse the manifest manually\n manifest = self._get_android_manifest()\n root = manifest.getroot()\n\n # grab all of the activity-alias tags\n for alias in root.findall('./application/activity-alias'):\n\n # Take not of the current activity\n current_activity = alias.get('{http://schemas.android.com/apk/res/android}targetActivity')\n categories = alias.findall('./intent-filter/category')\n\n # make sure we have categories for this alias\n if categories is None:\n continue\n\n for category in categories:\n\n # check if the name of this category is that of LAUNCHER\n # its possible to have multiples, but once we determine one\n # that fits we can just return and move on\n category_name = category.get('{http://schemas.android.com/apk/res/android}name')\n\n if category_name == 'android.intent.category.LAUNCHER':\n return current_activity\n\n # getting here means we were unable to determine what the launchable\n # activity is\n click.secho('Unable to determine the launchable activity for this app.', fg='red')\n raise Exception('Unable to determine launchable activity')",
"def _get_app_data(self):\n return self._get_base_app_data()",
"def _get_app(self):\n try:\n app = App.objects.filter(product=self.product, tags=self.tag).latest(\"creation_date\")\n except:\n app = None\n\n return app",
"def app_apk(self) -> Optional[pulumi.Input['FileReferenceArgs']]:\n return pulumi.get(self, \"app_apk\")",
"def app_info(self):\r\n if \"Registered App\" not in self.typeKeywords:\r\n return {}\r\n url = \"{base}content/users/{user}/items/{itemid}/registeredAppInfo\".format(base=self._portal.resturl,\r\n user=self._user_id,\r\n itemid=self.id)\r\n params = {'f': 'json'}\r\n try:\r\n return self._portal.con.get(url, params)\r\n except:\r\n return {}",
"def get_app_info_string():\n app_info_list = _get_formatted_thread_stack_traces()\n return '\\n'.join(app_info_list)",
"def get_app_id():\n return yaml.safe_load(open(APP_DIR + '/app.yaml'))['application']",
"def get_app_name(self):\n return self._APP_NAME",
"def _app_info(self):\n redirect_url = parse.urlparse(self._get_redirect_url())\n if re.search(\"onelogin\", redirect_url.hostname):\n subdomain = re.match(\n r\"^([a-z0-9\\-]+).onelogin.com\",\n redirect_url.hostname)\n app_id = re.match(\n r\"^\\/trust\\/saml2\\/http-redirect\\/sso/(\\d+)$\",\n redirect_url.path)\n\n return (subdomain.group(1), app_id.group(1))",
"def get_app_json(self):\n return {\n 'app_id': self.proj.app_id,\n 'app_package': self.package_name,\n 'app_version': str(self.version),\n 'app_project': self.proj.proj_name,\n 'app_language': 'Java'\n }",
"def getapp():\n return PypeApp._instance",
"def getAppVersion():\n return os.environ.get('CURRENT_VERSION_ID')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Will send the app to background for a specific amount of time
|
def send_app_to_background(self, background_time=100):
self.mob_conn.background_app(background_time)
return self
|
[
"def background_app(self, duration):\n self._selenium_web_driver().background_app(duration / 1000.0)",
"def do_something_every_hour():\n sleep(5)",
"def catch_alarm():\n comm_time_to_call_heart_beat = True",
"def worker_function(time_left):\r\n timer = TimerApp(time_left)",
"async def join_timer(self, ctx):\r\n time = 30\r\n while time != 0:\r\n if time == 5:\r\n await ctx.send(\"Entries close in 5 seconds.\")\r\n time -= 1\r\n await asyncio.sleep(1)",
"def launch(self):\n Worker.time += 1",
"def auto_wallpaper(loop):\n # while True:\n _change_wallpaper()\n loop.call_later(1800, auto_wallpaper, loop)",
"async def sleep(self, ctx):\r\n\r\n await self.client.change_presence(status=discord.Status.invisible)\r\n\r\n Database.Bot[\"sleeping\"] = True\r\n\r\n await ctx.send(\r\n f'Bot going to sleep.. will not respond again until `{Database.Main[ctx.guild.id].get(\"prefix\", \".\")}wake` is sent'\r\n )",
"def sleep(self, *args):\n if (len(args) == 0):\n interval = self.default_interval\n else:\n interval=args[0]\n \n time.sleep(interval)",
"def wait_time_interval(interval):\n\ttime.sleep(interval*60)",
"def _sleep(self):\n self.kill()",
"def run_in_background(self):\r\n self.t = Thread(target=self.run_forever)\r\n self.t.daemon = True\r\n self.t.start()",
"def start_send_photos(vk, target_id, frequency_min):\n while True:\n take_photo()\n send_photo(vk, target_id)\n time.sleep(60*int(frequency_min))",
"def _thread_sleep(self) -> None:\n local_jm_interval = 2\n if isinstance(self._launcher, (LocalLauncher)):\n time.sleep(local_jm_interval)\n else:\n time.sleep(CONFIG.jm_interval)",
"def sleepDelay(ms):\r\n time.sleep(ms/1000.0)",
"def _apply_time_limit(self, args, thisTask, cmd_args, payload, setup):\n if (not (thisTask.time is None)) and thisTask.time > 0:\n cmd_args.append(\"-l\")\n cmd_args.append(\"walltime=\" + str(int(thisTask.time) * 60))\n return True",
"def alarm(t):\n \n alarm_time = time.time() + t",
"async def wake(self, ctx):\r\n await self.client.change_presence(status=discord.Status.online)\r\n\r\n Database.Bot[\"sleeping\"] = False\r\n\r\n await ctx.send(\"Huh? What? Oh... I'm awake.\")",
"def sleep(seconds=0):\n loop = evergreen.current.loop\n current = Fiber.current()\n assert loop.task is not current\n timer = loop.call_later(seconds, current.switch)\n try:\n loop.switch()\n finally:\n timer.cancel()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the given app status on the device
|
def get_app_status(self, appPackage):
state = self.mob_conn.query_app_state(appPackage)
if state == 0:
return "App not installed"
elif state == 1:
return "App not running"
elif state == 2:
return " App running in background or suspended"
elif state == 3:
return "App running in background"
elif state == 4:
return "App running in foreground"
|
[
"def status_get(self, *, is_app=False):\n return self._run('status-get', '--include-data', f'--application={is_app}')",
"def status_get(self, *, is_app=False):\n return self._run('status-get', '--include-data', '--application={}'.format(is_app))",
"def getStatus(self):\n exitcode, output = q.system.process.execute(self._status_cmd, dieOnNonZeroExitCode=False, outputToStdout=False)\n if exitcode == os.EX_OK:\n return AppStatusType.RUNNING\n else:\n return AppStatusType.HALTED\n\n return AppStatusType.UNKNOWN",
"def _display_app_status(self):\n apps = self._get_apps(refresh=True)\n if len(apps) > 0:\n sysout(\"{} {} {} {} {} {}\".format(\n 'Name'.ljust(CFApplication.max_name_length),\n 'State'.ljust(7), 'Inst'.ljust(5), 'Mem'.ljust(4),\n 'Disk'.ljust(4), 'URLs',\n ))\n for app in apps:\n app.print_status()",
"def get_application_status(application=None, unit=None, model_name=None):\n status = get_full_juju_status(model_name=model_name)\n\n if unit and not application:\n application = unit.split(\"/\")[0]\n\n if application:\n status = status.applications.get(application)\n if unit:\n status = status.get(\"units\").get(unit)\n return status",
"def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))",
"def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')",
"def get_status(self):\n if self.running_arping:\n return \"Arping\"\n else:\n if self._online:\n return \"Online\"\n else:\n return \"Offline\"",
"def status(ctx: click.Context) -> None:\n info = get(\"status\", lambda: status_call(ctx.obj[\"session\"]))\n click.echo(json_pretty(info))",
"def device_status(device: str) -> Status:\n res = check_output(['amixer', 'get', _validate_device(device)])\n return _parse_status(res.decode())",
"def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()",
"def getStatusString(self):\n status = self.wlbt.GetStatus()[0]\n if status == 0:\n return \"STATUS_DISCONNECTED\"\n elif status == 1:\n return \"STATUS_CONNECTED\"\n elif status == 2:\n return \"STATUS_IDLE\"\n elif status == 3:\n return \"STATUS_SCANNING\"\n elif status == 4:\n return \"STATUS_CALIBRATING\"",
"def sdwanapps_status(self, sdwanapp_id, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sdwanapps/{}/status\".format(api_version,\n tenant_id,\n sdwanapp_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"get\")",
"def _device_info(self) -> dict:\n response = self._send(\"getStatus\")\n return self._json_decode(response)",
"def status(self, result, config=None):\r\n return result['status']",
"def status(self):\n \n return self._make_request(\"server/status\").json()",
"def get_status(self):\n return StatusAPI.from_client(self)",
"def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)",
"def get_yarn_application_state(environment, app_id, silent=False):\n hadoop_base = \"hadoop \"\n if environment.hadoop_conf_dir:\n hadoop_base += \"--config %s \" % environment.hadoop_conf_dir\n\n hadoop_jar = environment._get_hadoop_jar()\n\n hadoop_cmd = \"%s jar %s -jar %s -checkAppId %s -json\" % \\\n (hadoop_base, hadoop_jar, hadoop_jar, app_id)\n\n if not silent:\n __LOGGER__.info((\"Retrieving current job status from Hadoop cluster\"\n \" using command= \\n %s\") % hadoop_cmd)\n\n proc = _subprocess.Popen(hadoop_cmd, shell=True,\n stderr=_subprocess.STDOUT,\n stdout=_subprocess.PIPE)\n app_report = None\n lines = []\n for line in proc.stdout:\n lines.append(line)\n if HadoopExecutionEnvironment._json_flag in line:\n clean = line.split(HadoopExecutionEnvironment._json_flag)\n app_report = _json.loads(clean[1])\n break\n\n # print the output in case something wrong talking to the yarn\n if app_report == None:\n for l in lines:\n __LOGGER__.error(l)\n\n return app_report"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Flick starting at on_element, and moving by the x and y with specified speed.
|
def flick_mobile_element(self, element, x_coordinate, y_coordinate, speed):
self.mob_conn.flick_element(element, x_coordinate, y_coordinate, speed)
return self
|
[
"def update_position(self):\n \t\t\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed",
"def accelerate(self):\r\n self.__x_speed += math.cos(math.radians(self.__direction))\r\n self.__y_speed += math.sin(math.radians(self.__direction))",
"def accelerate(self):\n self.speed += 2",
"def set_speed(self):\n\n sender = self.sender()\n speed = 1\n if sender is self.speed_x05:\n speed = 0.5\n if sender is self.speed_x025:\n speed = 0.25\n if sender is self.speed_x0125:\n speed = 0.125\n self.highlight_selected_speed(sender)\n refresh_rate = round(1000/(self.fps * speed))\n self.timer.start(refresh_rate)",
"def setSpeed(self, speed):\n # Changes a class wide variable that affects the move commands in self.refresh()\n self.__speed = speed",
"def rightJoystickEvent(self, x_speed, y_speed):\r\n pass",
"def forward(speedleft, speedright, duration):\n robot.wheels(speedleft,speedright)\n robot.sleep(duration)\n pass",
"def flick(self, start_x, start_y, end_x, end_y):\n self._selenium_web_driver().flick(start_x, start_y, end_x, end_y)",
"def forward(self, speed, runTime=None):\n assert -100.0 <= speed <= 100.0\n assert self.leftMotor is not None\n assert self.rightMotor is not None\n if runTime is None:\n self.tankMovement.on(speed, speed)\n else:\n self.tankMovement.on_for_seconds(speed, speed, runTime)",
"def GoToPosition(x,y):\n Movement.GoStraightToPoint(x,y)",
"def Fly_movement(self):\n num = len(self.points)\n if self.points[self.i][0] == self.points[(self.i+1) % num][0] and self.points[self.i][1] < self.points[(self.i+1) % num][1]: # down\n if self.hit_box.y > self.points[(self.i+1) % num][1]:\n self.i = (self.i +1) % num\n self.hit_box = self.hit_box.move(0,self.speed)\n elif self.points[self.i][0] == self.points[(self.i+1) % num][0] and self.points[self.i][1] > self.points[(self.i+1) % num][1]: # up\n if self.hit_box.y < self.points[(self.i+1) % num][1]:\n self.i = (self.i +1) % num\n self.hit_box = self.hit_box.move(0,-self.speed)\n\n elif self.points[self.i][1] == self.points[(self.i+1) % num][1] and self.points[self.i][0] < self.points[(self.i+1) % num][0]:\n if self.hit_box.x > self.points[(self.i+1) % num][0]:\n self.i = (self.i +1) % num\n self.hit_box = self.hit_box.move(self.speed,0)\n elif self.points[self.i][1] == self.points[(self.i+1) % num][1] and self.points[self.i][0] > self.points[(self.i+1) % num][0]:\n if self.hit_box.x < self.points[(self.i+1) % num][0]:\n self.i = (self.i +1) % num\n self.hit_box = self.hit_box.move(-self.speed,0)",
"def set_speed(self, speed):\r\n self._speed = speed",
"def move(self, step):\n self.position += step * self.speed",
"def move(self):\n #The goal here is to have a bouncing movement.\n #So the first part of the code checks if the entity has\n #reached any of the screen's edges. If so, it changes to\n #the opposite direction.\n width, height = self.get_size()\n if self.x - width/2 <=0 and self.horizontal_dir == -1:\n self.horizontal_dir = 1\n elif self.x + width/2 >= SCREEN_WIDTH and self.horizontal_dir == 1:\n self.horizontal_dir = -1\n if self.y - height/2 <=0 and self.vertical_dir == -1:\n self.vertical_dir = 1\n elif self.y + height/2 >= SCREEN_HEIGHT and self.vertical_dir == 1:\n self.vertical_dir = -1\n\n #This is the movement part.\n self.x+=self.horizontal_dir*self.speed\n self.y+=self.vertical_dir*self.speed",
"def forward(self, speed, seconds=None):\n\n if (self.moving_state==self.MOVING_FORWARD):\n return\n\n self.moving_state = self.MOVING_FORWARD\n\n # Set motor speed and move both forward.\n self.move(speed)\n # If an amount of time is specified, move for that time and then stop.\n if seconds is not None:\n time.sleep(seconds)\n self.stop()",
"def moveFast(self):\n\n if(self.gear!=0 and self.speed<=190):\n\n self.speed+=10\n\n else:\n\n self.speed=0\n if self.speed==200:\n print (\"Maximum speed reached\")\n\n\n \n #since rpm has to change with speed hence if gear>0 with every increment rpm will inc. by 200\n\n if(self.gear!=0):\n\n self.rpm+=200\n\n else:\n\n self.rpm=0\n\n\n \n #since engine temp has to change with speed hence if gear>0 with every increment temp will inc. by 10\n\n if(self.gear!=0):\n\n self.engine_temp+=15\n\n else:\n\n self.engine_temp=35",
"def move_object(self,object):\n x = object.get_x_cor()\n y = object.get_y_cor()\n x_s = object.get_speed_x()\n y_s = object.get_speed_y()\n min_x = self.game.get_screen_min_x()\n max_x = self.game.get_screen_max_x()\n min_y = self.game.get_screen_min_y()\n max_y = self.game.get_screen_max_y()\n delta_x = max_x - min_x\n delta_y = max_y - min_y\n x = (x_s + x - min_x) % delta_x + min_x\n y = (y_s + y - min_y) % delta_y + min_y\n object.move(x,y)",
"def pixelMove():\n pass",
"def set(self, direction, speed):\n\n logger.info(\"direction_cmd: \" + str(direction)+\"; speed_cmd: \" + str(speed))\n \n # Update current speed\n # --------------------\n if speed == self.SpeedCommands.SPEED_UP:\n self.speed += self.SPEED_STEPS\n if self.speed>self.HIGHEST_SPEED:\n self.speed = self.HIGHEST_SPEED\n\n elif speed == self.SpeedCommands.SPEED_DOWN:\n self.speed -= self.SPEED_STEPS\n if self.speed<self.LOWEST_SPEED:\n self.speed = self.LOWEST_SPEED\n\n # Tell the two electric motors what to do \n # --------------------\n if direction == self.DirectionCommands.FORWARD:\n Tb6612fn.moveForward(Tb6612fn.Channel.LEFT, 100*self.speed)\n Tb6612fn.moveForward(Tb6612fn.Channel.RIGHT,93*self.speed)\n\n elif direction == self.DirectionCommands.FORWARD_LEFT:\n Tb6612fn.moveForward(Tb6612fn.Channel.LEFT, 35*self.speed)\n Tb6612fn.moveForward(Tb6612fn.Channel.RIGHT,100*self.speed)\n\n elif direction == self.DirectionCommands.FORWARD_RIGHT:\n Tb6612fn.moveForward(Tb6612fn.Channel.LEFT, 100*self.speed)\n Tb6612fn.moveForward(Tb6612fn.Channel.RIGHT,35*self.speed)\n\n elif direction == self.DirectionCommands.LEFT_WHILE_STOPPED:\n Tb6612fn.moveForward(Tb6612fn.Channel.RIGHT, 100*self.speed)\n Tb6612fn.moveBackwards(Tb6612fn.Channel.LEFT,100*self.speed)\n\n elif direction == self.DirectionCommands.RIGHT_WHILE_STOPPED:\n Tb6612fn.moveBackwards(Tb6612fn.Channel.RIGHT, 100*self.speed)\n Tb6612fn.moveForward(Tb6612fn.Channel.LEFT,100*self.speed)\n\n elif direction == self.DirectionCommands.BACKWARDS:\n Tb6612fn.moveBackwards(Tb6612fn.Channel.LEFT, 100*self.speed)\n Tb6612fn.moveBackwards(Tb6612fn.Channel.RIGHT,100*self.speed)\n\n elif direction == self.DirectionCommands.BACKWARDS_LEFT:\n Tb6612fn.moveBackwards(Tb6612fn.Channel.LEFT, 35*self.speed)\n Tb6612fn.moveBackwards(Tb6612fn.Channel.RIGHT,100*self.speed)\n\n elif direction == self.DirectionCommands.BACKWARDS_RIGHT:\n Tb6612fn.moveBackwards(Tb6612fn.Channel.LEFT, 100*self.speed)\n Tb6612fn.moveBackwards(Tb6612fn.Channel.RIGHT,35*self.speed)\n\n elif direction == self.DirectionCommands.SOFT_STOP:\n Tb6612fn.softStop(Tb6612fn.Channel.LEFT)\n Tb6612fn.softStop(Tb6612fn.Channel.RIGHT)\n self.speed = self.LOWEST_SPEED\n\n elif direction == self.DirectionCommands.HARD_STOP:\n Tb6612fn.hardStop(Tb6612fn.Channel.LEFT)\n Tb6612fn.hardStop(Tb6612fn.Channel.RIGHT)\n self.speed = self.LOWEST_SPEED"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the text from Clipboard of the system
|
def get_text_from_clipboard(self):
self.mob_conn.get_clipboard()
text_from_clipboard = self.mob_conn.get_clipboard_text()
return text_from_clipboard
|
[
"def tkinter_clipboard_get():\n try:\n from tkinter import Tk, TclError\n except ImportError:\n raise TryNext(\n \"Getting text from the _clipboard on this platform requires tkinter.\"\n )\n\n root = Tk()\n root.withdraw()\n try:\n text = root.clipboard_get()\n except TclError:\n raise ClipboardEmpty\n finally:\n root.destroy()\n return text",
"def get_text(self) -> str:\n return self.clipboard.wait_for_text()",
"def copyToClipboard(self):\n return self.clipboard.text()",
"def clipboard_get( self ):\n return self.root.clipboard_get()",
"def _getClipboardText( self ):\n\n # We attempt to get two pieces of information from the clipboard:\n # the formatted text and the plain text.\n \n # Try to get plaintext from unicode text in clipboard; this\n # is likely to be a better version of the unformatted text than\n # what we could produce by stripping out format tags, and it's\n # also easier to use.\n if win32clipboard.IsClipboardFormatAvailable( CF_UNICODETEXT ):\n try:\n plainText = win32clipboard.GetClipboardData( CF_UNICODETEXT )\n except win32clipboard.error as e:\n # This is a fix for ticket #415.\n if e.args[0] == 0:\n logging.info( \"GetClipboardData() error suppressed.\" )\n return {}\n else:\n raise\n assert isinstance( plainText, str ), \\\n \"GetClipboardData returned not-a-unicode object!!\"\n else:\n # If UNICODETEXT is not available, then all other\n # plain-text formats are unavailable; however,\n # we can fall back on getting the plaintext by stripping\n # formatting info out of the formatted text.\n plainText = None\n\n # Try to get HTML from clipboard:\n if win32clipboard.IsClipboardFormatAvailable( CF_HTML ):\n logging.debug( \"HTML is available, getting it.\" )\n formatText = win32clipboard.GetClipboardData( CF_HTML )\n else:\n formatText = None\n\n\n # TODO if plainText is none and formatText is not none, then\n # try to create plainText from formatText by stripping the HTML --\n # see how this is done in EnsoTextObject.\n\n newTextDict = {}\n if plainText != None:\n newTextDict[ \"text\" ] = plainText\n if formatText != None:\n newTextDict[ \"html\" ] = formatText \n\n return newTextDict",
"def GetClipboardData(self):\n win32clipboard.OpenClipboard()\n clipboard_data = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n return(clipboard_data)",
"def update_clipboard():\n text = request.data.decode('utf-8')\n\n temp_file = \"/home/rodri/.clip.txt\"\n\n with open(temp_file, \"w\") as text_file:\n text_file.write(text.encode(\"ascii\", errors='ignore'))\n\n cmd = \"clip.exe < {}\".format(temp_file)\n run_cmd(cmd)\n\n\n return text",
"def user32_GetClipboardData(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"uFormat\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def clipboard_copy(text: str) -> None:\n if pyperclip:\n pyperclip.copy(text)\n elif shutil.which(\"xclip\"):\n subprocess.run(\n [\"xclip\", \"-in\", \"-selection\", \"clipboard\"],\n input=text,\n text=True,\n check=True,\n )\n else:\n raise RuntimeError(\"No way to copy\")",
"def get_selected_clipboard(self, textview):\n textbuffer = textview.get_buffer()\n start,end=textbuffer.get_selection_bounds()\n text = textbuffer.get_text(start, end)\n return (text,(start, end))",
"def _copy_to_clipboard(value):\n bus = dbus.SessionBus()\n clipboard = bus.get_object('org.kde.klipper', '/klipper')\n clipboard.setClipboardContents(value)",
"def copyToClipboard(data, type=CF_TEXT):\n#-------------------------------------------------------------------------------\n OpenClipboard()\n EmptyClipboard()\n SetClipboardData(type, data)\n CloseClipboard()",
"def __init__(self, _clipboard=None, *args, **kwargs):\n if win32clipboard is None:\n print(\n \"Getting text from the _clipboard requires the pywin32 \"\n \"extensions: http://sourceforge.net/projects/pywin32/\"\n )\n win32clipboard.OpenClipboard()\n self.clipboard = _clipboard\n super(WindowsClipboard, self).__init__(*args, **kwargs)",
"def get_translation(self, sleep_before_click_to_clipboard=2):\n button = self.get_translation_copy_button()\n self.scroll_to_element(button, sleep_before_click_to_clipboard)\n button = self.get_translation_copy_button()\n button.click() # self.sleep(1)\n content = clipboard.paste()\n return content",
"def copy_and_paste_text(self):\n text = self.get_text()\n pyperclip.copy(text)\n keyboard.press(\"ctrl+v\")\n keyboard.release(\"ctrl+v\")\n self.typeout_ending_keystroke()",
"def clipboard_translator(self):\n self.text = pyperclip.paste()\n PyElant.translate_text(self)",
"def talon_add_context_clipboard():\n friendly_name = actions.app.name()\n executable = actions.app.executable().split(os.path.sep)[-1]\n if app.platform != \"windows\":\n result = \"os: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, actions.win.title())\n\n #on windows, it's best to include both the friendly name and executable name in case the muicache breaks....\n else:\n result = \"os: {}\\napp: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, executable, actions.win.title())\n\n clip.set(result)",
"def win_get_text(title, text=u'', bufsize=64 * 1024):\r\n buf = create_unicode_buffer(bufsize)\r\n _audll.AU3_WinGetText(title, text, buf, bufsize)\r\n return buf.value",
"def get_text(self):\n data = self.txtbox.get(1.0, END)\n test = self.txtbox.selection_get()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the capabilities of the specified session
|
def get_session_capabilities(self):
return self.mob_conn.session
|
[
"def get_capabilities(connection):\n for capability in connection.server_capabilities:\n six.print_(capability)",
"def capabilities(self):\n return []",
"def getCapabilities(self):\n \n # initialise the request\n resp = CapabilityConfigList()\n \n # make the call\n apiproxy_stub_map.MakeSyncCall('capability_service', 'getCapabilities', resp, resp)\n \n # iterate over the configs found\n for config in resp.config_list():\n logging.debug(\"has capability: %s\", config)",
"def get_capabilities(capability=None):\n capabilities = {\n # We rely on consumption because DSMR readings might be flushed in the future.\n 'electricity': ElectricityConsumption.objects.exists(),\n 'electricity_returned': ElectricityConsumption.objects.filter(\n # We can not rely on meter positions, as the manufacturer sometimes initializes meters\n # with testing data. So we just have to wait for the first power returned.\n currently_returned__gt=0\n ).exists(),\n 'gas': GasConsumption.objects.exists(),\n 'weather': WeatherSettings.get_solo().track and TemperatureReading.objects.exists()\n }\n capabilities['any'] = any(capabilities.values())\n\n # Single selection.\n if capability is not None:\n return capabilities[capability]\n\n return capabilities",
"def get_capability(connection, model_name):\n for server_capability in connection.server_capabilities:\n model = re.search(model_name, server_capability)\n if model is not None:\n six.print_(server_capability)",
"def get_desired_capabilities(self):\n return self._selenium_web_driver().desired_capabilities",
"def getCapabilities4User(self, user=None, sessionKey=None):\n \n roles = []\n capabilities = []\n \n # Get user info\n if user is not None:\n logger.debug(\"Retrieving role(s) for current user: %s\", user)\n userEntities = entity.getEntities('authentication/users/%s' % user, count=-1, sessionKey=sessionKey)\n\n for stanza, settings in userEntities.items():\n if stanza == user:\n for key, val in settings.items():\n if key == 'roles':\n logger.debug(\"Successfully retrieved role(s) for user: %s\", user)\n roles = val\n \n # Get capabilities\n for role in roles:\n logger.debug(\"Retrieving capabilities for current user: %s\", user)\n roleEntities = entity.getEntities('authorization/roles/%s' % role, count=-1, sessionKey=sessionKey)\n \n for stanza, settings in roleEntities.items():\n if stanza == role:\n for key, val in settings.items():\n if key == 'capabilities' or key == \"imported_capabilities\":\n logger.debug('Successfully retrieved %s for user: %s' % (key, user))\n capabilities.extend(val)\n\n return capabilities",
"def get_capabilities(options=None):\n if not options:\n options = {}\n return {\n \"app\": environ.get(\"APPIUM_APPFILE\") or options.get(\"app\"),\n \"automationName\": environ.get(\"APPIUM_AUTOMATION\") or options.get(\"automationName\"),\n \"deviceName\": environ.get(\"APPIUM_DEVICE\") or options.get(\"deviceName\", \"Local Device\"),\n \"platformName\": environ.get(\"APPIUM_PLATFORM\") or options.get(\"platformName\"),\n \"platformVersion\": environ.get(\"APPIUM_PLATFORM_VERSION\") or options.get(\"platformVersion\"),\n \"bundleId\": environ.get(\"APPIUM_BUNDLE_ID\") or options.get(\"bundleId\"),\n \"newCommandTimeout\": environ.get(\"NEW_COMMAND_TIMEOUT\") or options.get(\"newCommandTimeout\", 60),\n \"defaultCommandTimeout\": environ.get(\"DEFAULT_COMMAND_TIMEOUT\") or options.get(\"defaultCommandCommandTimeout\", 500),\n \"testdroid_testTimeout\": environ.get(\"TESTDROID_TEST_TIMEOUT\") or options.get(\"testdroid_testTimeout\", 600),\n \"screenshotWaitTimeout\": environ.get(\"SCREENSHOT_WAIT_TIMEOUT\") or options.get(\"screenshotWaitTimeout\", 3)\n }",
"def capability(self):\n return self.data['capability']",
"def list(self, detailed=False):\n return self._list(\"/os-host-capability\", \"hostcapabilities\")",
"def get_device_capabilities(self):\n\n capability_query = self.state_db.keys(self.state_db.STATE_DB, '{}|*'.format(DEBUG_COUNTER_CAPABILITY_TABLE))\n\n if not capability_query:\n return None\n\n counter_caps = {}\n for counter_type in capability_query:\n # Because keys returns the whole key, we trim off the DEBUG_COUNTER_CAPABILITY prefix here\n counter_caps[counter_type[len(DEBUG_COUNTER_CAPABILITY_TABLE) + 1:]] = self.state_db.get_all(self.state_db.STATE_DB, counter_type)\n return counter_caps",
"def capabilities(self):\n ret = self._get_attr(\"capabilities\")\n return [MediumFormatCapabilities(a) for a in ret]",
"def get_user_capabilities(user, instance, **kwargs):\n access_class = access_registry[instance.__class__]\n return access_class(user).get_user_capabilities(instance, **kwargs)",
"def get_serving_capabilities(self):\n caps = self.get_datascience_capabilities()\n return caps[\"serving\"][\"frameworks\"]",
"def account_capabilities(self):\n return self._account_capabilities",
"def default_capabilities(self):",
"def capabilities(cls, request):\n caps = {}\n endpointCheck = request.endpoint.type_uris\n caps['signon_icon'] = \"http://specs.openid.net/extensions/ui/1.0/icon\" in endpointCheck\n caps['ax'] = ax.AXMessage.ns_uri in endpointCheck\n caps['auth2'] = \"http://specs.openid.net/auth/2.0/server\" in endpointCheck\n caps['popup'] = \"http://specs.openid.net/extensions/ui/1.0/mode/popup\" in endpointCheck\n caps['pape'] = \"http://specs.openid.net/extensions/pape/1.0\" in endpointCheck\n caps['sreg'] = sreg.ns_uri in endpointCheck\n return caps",
"def get_capabilities(self):\n params = {\n \"request\": OGCOperationEnum.GET_CAPABILITIES.value,\n \"version\": self.service_version.value if self.service_version is not None else \"\",\n \"service\": (self.service_type.value if self.service_type is not None else \"\").upper(),\n }\n concat = \"&\" if self.service_connect_url[-1] != \"&\" else \"\"\n self.service_connect_url = \"{}{}{}\".format(self.service_connect_url, concat, urlencode(params))\n ows_connector = CommonConnector(\n url=self.service_connect_url,\n external_auth=self.external_authentification,\n connection_type=ConnectionEnum.REQUESTS\n )\n ows_connector.http_method = 'GET'\n try:\n ows_connector.load()\n if ows_connector.status_code != 200:\n raise ConnectionError(ows_connector.status_code)\n except ReadTimeout:\n raise ConnectionError(CONNECTION_TIMEOUT.format(self.service_connect_url))\n\n tmp = ows_connector.content.decode(\"UTF-8\")\n # check if tmp really contains an xml file\n xml = xml_helper.parse_xml(tmp)\n\n if xml is None:\n raise Exception(tmp)\n\n self.service_capabilities_xml = tmp\n self.connect_duration = ows_connector.run_time\n self.descriptive_document_encoding = ows_connector.encoding",
"def capability(self):\n return self._capability"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Acquire a resource on the semaphore, or else quit after cancellation.
|
def acquire(self):
# print('{}: Getting ready...'.format(self.name))
with self.ready:
while not self.cancelled:
# print('{}: Trying to acquire...'.format(self.name))
if self.semaphore.acquire(blocking=False):
# print('{}: Acquired!'.format(self.name))
return True
self.ready.wait()
# print('{}: Cancelled!'.format(self.name))
return False # returns False after cancellation
|
[
"def _cancelAcquire(self: _DeferredLockT, d: Deferred[_DeferredLockT]) -> None:\n self.waiting.remove(d)",
"def resource_acquiring_iteration(acquired, released, barrier):\n acquired.set()\n try:\n yield 1\n barrier.wait(timeout=TIMEOUT)\n yield 2\n finally:\n released.set()",
"def wait_acquire():\n\n token = None\n\n while token is None:\n # make sure we're observing load and memory maximums\n if not JobServer._check_conditions():\n time.sleep(0.01)\n continue\n\n # try to get a job token\n token = JobServer._acquire()\n\n return token",
"def acquire(self):\n self._change_state(\"acquire\")",
"def release(self):\n with self._cache.transact(retry=True):\n value = self._cache.get(self._key, default=self._value)\n assert self._value > value, 'cannot release un-acquired semaphore'\n value += 1\n self._cache.set(\n self._key,\n value,\n expire=self._expire,\n tag=self._tag,\n )",
"def consumer(cond):\n\tlogging.debug(\"Starting consumer thread\")\n\twith cond:\n\t\tcond.wait()\n\t\tlogging.debug(\"Resource is available to consumer\")",
"def access_limited_resource(process_id: int) -> None:\n\n with Semaphore(value=2):\n pprint(f\"Process {process_id} Entering critical section\", process_id)\n use_protected_resource()\n pprint(f\"Process {process_id} Leaving critical section\", process_id)",
"def try_acquire():\n # make sure we're observing load and memory maximums\n if JobServer._check_conditions() and running_jobs() < max_jobs():\n # try to get a job token\n token = JobServer._acquire()\n return token\n\n return None",
"def acquire(self, obj_id=None, i=None):\r\n if not isinstance(obj_id,Process):\r\n raise Exception(\"semaphore requires items added to be of type 'Process'\")\r\n self.sem_dict[int(i)].add(obj_id)\r\n self.val[i]-=1",
"def test_error_on_leasing_unknown_semaphore():\n with throttle(b\"[semaphores]\") as url:\n with pytest.raises(Exception, match=r\"Unknown semaphore\"):\n with lock(url, \"Unknown\"):\n pass",
"def acquire(self, timeout=None):\n if timeout is None:\n # Wait forever (INFINITE)\n timeout = 0xFFFFFFFF\n else:\n timeout = int(round(timeout * 1000))\n ret = _WaitForSingleObject(self.handle, timeout)\n if ret in (0, 0x80):\n # Note that this doesn't distinguish between normally acquired (0) and\n # acquired due to another owning process terminating without releasing (0x80)\n self.acquired = True\n return True\n elif ret == 0x102:\n # Timeout\n self.acquired = False\n return False\n else:\n # Waiting failed\n raise ctypes.WinError()",
"def _ensure_initialized_acquire_ready_semaphore(\n wrapped: Callable, instance: GoPro, args: Any, kwargs: Any\n) -> Callable:\n if instance._maintain_ble:\n logger.debug(f\"{wrapped.__name__} acquiring semaphore\")\n with instance._ready:\n logger.debug(f\"{wrapped.__name__} has the semaphore\")\n ret = wrapped(*args, **kwargs)\n else:\n ret = wrapped(*args, **kwargs)\n if instance._maintain_ble:\n logger.debug(f\"{wrapped.__name__} released the semaphore\")\n return ret",
"def release(self,o_in):\n self.plock.acquire()\n for o,lock in self.pool:\n if o_in==o:\n lock.release()\n break\n else:\n print \"!!! lock to release not found o=%s pool=%s\"%(o_in,pool)\n self.plock.release()",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def test_context_manager_failure_to_acquire(self):\n lock2 = self.locker.lock('test_it', blocking=False)\n assert lock2.acquire() is True\n\n with pytest.raises(pals.AcquireFailure):\n with self.locker.lock('test_it'):\n pass # we should never hit this line",
"def acquire(self: _DeferredLockT) -> Deferred[_DeferredLockT]:\n d: Deferred[_DeferredLockT] = Deferred(canceller=self._cancelAcquire)\n if self.locked:\n self.waiting.append(d)\n else:\n self.locked = True\n d.callback(self)\n return d",
"def acquire_concurrency_lock(self):\n\n if self.concurrency_type is None:\n return None\n\n result = None\n start_time = local_time()\n if self.concurrency_type == CONCURRENCY_TYPE_NETWORK_SEMAPHORE:\n logging.debug(f\"acquiring network concurrency semaphore {self.concurrency_semaphore} \"\n f\"for hunt type {self.hunt_type}\")\n result = NetworkSemaphoreClient(cancel_request_callback=self.manager_control_event.is_set)\n # make sure we cancel outstanding request \n # when shutting down\n result.acquire(self.concurrency_semaphore)\n else:\n logging.debug(f\"acquiring local concurrency semaphore for hunt type {self.hunt_type}\")\n while not self.manager_control_event.is_set():\n if self.concurrency_semaphore.acquire(blocking=True, timeout=0.1):\n result = self.concurrency_semaphore\n break\n\n if result is not None:\n total_seconds = (local_time() - start_time).total_seconds()\n logging.debug(f\"acquired concurrency semaphore for hunt type {self.hunt_type} in {total_seconds} seconds\")\n\n return result",
"def release(self):\n #print \"RELEASING LOCK\"\n self.locked = False\n if self.timer:\n self.timer.cancel()",
"def acquire(self):\n if self.value is not None:\n self.value -= 1\n if self.value < 0:\n raise ValueError(\"Too many acquires\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute Unweighted UniFrac using fp64 math and write to file
|
def unweighted_fp64_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'unweighted_fp64',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def extract_unw_phase(infilename, outfilename):\n _, _, unw = isce_read_write.read_scalar_data(infilename, band=2); # reading second band\n ny, nx = np.shape(unw);\n isce_read_write.write_isce_data(unw, nx, ny, \"FLOAT\", outfilename);\n return;",
"def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def test_write_resolution_float():\n data = random_data('uint8', (2, 219, 301))\n resolution = (92.0, 92.0)\n with TempFileName('resolution_float') as fname:\n imwrite(fname, data, resolution=resolution)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].tags['XResolution'].value == (92, 1)\n assert tif.pages[0].tags['YResolution'].value == (92, 1)\n assert tif.pages[1].tags['XResolution'].value == (92, 1)\n assert tif.pages[1].tags['YResolution'].value == (92, 1)\n assert__str__(tif)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def save_mulaw(fn, y, sr=22050, quantization_channel=256):\n mu = quantization_channel - 1\n safe_audio_abs = np.minimum(np.abs(y), 1.)\n magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)\n signal = np.sign(y) * magnitude\n y_ = ((signal + 1) / 2 * mu + 0.5).astype(np.uint8)\n np.save(fn, y_)",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def solve_wave_FD8(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(4,ny-4):\n for j in range(4,nx-4):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-4,j-4]* \\\n ((-9*U[i,j-4]+128*U[i,j-3]-1008*U[i,j-2]+8064*U[i,j-1]-14350*U[i,j+0]+8064*U[i,j+1]-1008*U[i,j+2]+128*U[i,j+3]-9*U[i,j+4])+ \\\n (-9*U[i-4,j]+128*U[i-3,j]-1008*U[i-2,j]+8064*U[i-1,j]-14350*U[i+0,j]+8064*U[i+1,j]-1008*U[i+2,j]+128*U[i+3,j]-9*U[i+4,j]))/ \\\n (5040*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(4+ncpml,ny-ncpml-4):\n for j in range(4,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(4,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-4,-j-5]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(4,ncpml+1):\n for j in range(4,nx-4):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(4,ncpml):\n for j in range(4,nx-4):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-5,j-4]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def readTempF(self):\r\n temp = self.readTempC() \r\n return temp if not temp else temp * 9.0/5.0 + 32",
"def _get_wn(cutoff, fs):\n return cutoff / (0.5 * fs)",
"def foursigfloat(num: int, units: List[str]):\n # Presumably three ifs are faster than one logarithm\n if (num >= (1 << 20)):\n if (num >= (1 << 30)):\n if (num >= (1 << 40)):\n if (num > (1 << 50)):\n # PB: TB with no decimal, more than three whole numbers.\n return ('{:.0f}'.format(num / (1 << 40)) + \" \" + units[4])\n else:\n # TB with at least one decimal.\n numstring = numstring[0:max(numstring.find('.'), + 2)]\n return (('{:.0f}'.format(num / (1 << 40)))[0:5] + \" \" + units[4])\n else: # < 1TB\n return (('{:1.3f}'.format(num / (1 << 30)))[0:5] + \" \" + units[3])\n else: # < 1GB\n return (('{:1.3f}'.format(num / (1 << 20)))[0:5] + \" \" + units[2])\n else: # < 1MB\n if (num >= (1 << 10)):\n return (('{:1.3f}'.format(num / (1 << 10)))[0:5] + \" \" + units[1])\n else:\n return ((str(num))[0:5] + \" \" + units[0])",
"def rewrite_trk_file_with_ED_vs_FL_scalars(trk_file_orig,trk_file_new, scalar_type):\t\n\timport nibabel as nib\n\timport numpy as np\n\tfrom nipype.interfaces.cmtk.cmtk import length as fib_length\n\tfibres_orig, hdr_orig = nib.trackvis.read(trk_file_orig, False)\n\thdr_new = hdr_orig.copy()\n\toutstreams = []\n\tfor f in fibres_orig:\n\t\t# Calculate fiber lengths\t\n\t\tFL = fib_length(f[0]) \n\t\t# Calculate Euclidean distance between fibre start and endpoints\n\t\tED = np.sqrt(np.square(f[0][0][0]-f[0][-1][0])+np.square(f[0][0][1]-f[0][-1][1])+np.square(f[0][0][2]-f[0][-1][2]))\n\t\t# Fiber length minus Euclidean distance:\n\t\tFLsubED = np.subtract(FL, ED)\n\t\tED_as_percent_of_FL = np.divide(100,FL)*ED\n\t\tif scalar_type == 'FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FL\n\t\t\tproperty_array = np.array([FL], dtype='float32')\n\t\tif scalar_type == 'ED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED\n\t\t\tproperty_array = np.array([ED], dtype='float32')\n\t\tif scalar_type == 'FLsubED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FLsubED\n\t\t\tproperty_array = np.array([FLsubED], dtype='float32')\n\t\tif scalar_type == 'ED_as_percent_of_FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED_as_percent_of_FL\n\t\t\tproperty_array = np.array([ED_as_percent_of_FL], dtype='float32')\n\t\tnew_tuple=tuple([f[0], scalar_array,property_array])\t\t\t\t\n\t\toutstreams.append(new_tuple)\n\tn_fib_out = len(outstreams)\n\thdr_new['n_count'] = n_fib_out\t\n\thdr_new['n_scalars'] = np.array(1, dtype='int16')\t\t\t\t#hdr_new['scalar_name'] = np.array(['JG_COLOURS', '', '', '', '', '', '', '', '', ''],dtype='|S20')\t\t\n\thdr_new['scalar_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['n_properties'] = np.array(1, dtype='int16')\n#\thdr_new['property_name'] = np.array(['JG_PROPERTY', '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['property_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\tnib.trackvis.write(trk_file_new, outstreams, hdr_new)",
"def write_waq_spatial(self,fp,quantity,data_fn,xyn):\n np.savetxt(os.path.join(self.model.run_dir,data_fn),\n xyn,fmt=\"%.6g\")\n fp.write(\"\\n\".join([\"QUANTITY=%s\"%quantity,\n \"FILENAME=%s\"%data_fn,\n \"FILETYPE=7\",\n \"METHOD=4\",\n \"OPERAND=O\\n\"]))",
"def calculate_precision(num_tp, num_fp, num_fn):\n if num_tp == num_fp:\n return 1\n return num_tp/float(num_tp+num_fp)",
"def output(self, file: 'FILE *') -> \"void\":\n return _coin.SoFloatElement_output(self, file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute Unweighted UniFrac using fp32 math and write to file
|
def unweighted_fp32_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'unweighted_fp32',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def solve_wave_FD8(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(4,ny-4):\n for j in range(4,nx-4):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-4,j-4]* \\\n ((-9*U[i,j-4]+128*U[i,j-3]-1008*U[i,j-2]+8064*U[i,j-1]-14350*U[i,j+0]+8064*U[i,j+1]-1008*U[i,j+2]+128*U[i,j+3]-9*U[i,j+4])+ \\\n (-9*U[i-4,j]+128*U[i-3,j]-1008*U[i-2,j]+8064*U[i-1,j]-14350*U[i+0,j]+8064*U[i+1,j]-1008*U[i+2,j]+128*U[i+3,j]-9*U[i+4,j]))/ \\\n (5040*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(4+ncpml,ny-ncpml-4):\n for j in range(4,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(4,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-4,-j-5]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(4,ncpml+1):\n for j in range(4,nx-4):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(4,ncpml):\n for j in range(4,nx-4):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-5,j-4]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def readTempF(self):\r\n temp = self.readTempC() \r\n return temp if not temp else temp * 9.0/5.0 + 32",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def extract_unw_phase(infilename, outfilename):\n _, _, unw = isce_read_write.read_scalar_data(infilename, band=2); # reading second band\n ny, nx = np.shape(unw);\n isce_read_write.write_isce_data(unw, nx, ny, \"FLOAT\", outfilename);\n return;",
"def foursigfloat(num: int, units: List[str]):\n # Presumably three ifs are faster than one logarithm\n if (num >= (1 << 20)):\n if (num >= (1 << 30)):\n if (num >= (1 << 40)):\n if (num > (1 << 50)):\n # PB: TB with no decimal, more than three whole numbers.\n return ('{:.0f}'.format(num / (1 << 40)) + \" \" + units[4])\n else:\n # TB with at least one decimal.\n numstring = numstring[0:max(numstring.find('.'), + 2)]\n return (('{:.0f}'.format(num / (1 << 40)))[0:5] + \" \" + units[4])\n else: # < 1TB\n return (('{:1.3f}'.format(num / (1 << 30)))[0:5] + \" \" + units[3])\n else: # < 1GB\n return (('{:1.3f}'.format(num / (1 << 20)))[0:5] + \" \" + units[2])\n else: # < 1MB\n if (num >= (1 << 10)):\n return (('{:1.3f}'.format(num / (1 << 10)))[0:5] + \" \" + units[1])\n else:\n return ((str(num))[0:5] + \" \" + units[0])",
"def output(self, file: 'FILE *') -> \"void\":\n return _coin.SoFloatElement_output(self, file)",
"def write_3d_lut(self, process_function, file_path, preset):\n pass",
"def solve_wave_FD4(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(2,ny-2):\n for j in range(2,nx-2):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-2,j-2]* \\\n ((-1*U[i-2,j]+16*U[i-1,j]-30*U[i,j]+16*U[i+1,j]-1*U[i+2,j]) + \\\n (-1*U[i,j-2]+16*U[i,j-1]-30*U[i,j]+16*U[i,j+1]-1*U[i,j+2]))/ \\\n (12*1.0*h**2)\n #CPML boundary in X-domain\n for i in range(2+ncpml,ny-ncpml-2):\n for j in range(2,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(2,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-2,-j-3]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(2,ncpml+1):\n for j in range(2,nx-2):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(2,ncpml):\n for j in range(2,nx-2):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-3,j-2]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def rewrite_trk_file_with_ED_vs_FL_scalars(trk_file_orig,trk_file_new, scalar_type):\t\n\timport nibabel as nib\n\timport numpy as np\n\tfrom nipype.interfaces.cmtk.cmtk import length as fib_length\n\tfibres_orig, hdr_orig = nib.trackvis.read(trk_file_orig, False)\n\thdr_new = hdr_orig.copy()\n\toutstreams = []\n\tfor f in fibres_orig:\n\t\t# Calculate fiber lengths\t\n\t\tFL = fib_length(f[0]) \n\t\t# Calculate Euclidean distance between fibre start and endpoints\n\t\tED = np.sqrt(np.square(f[0][0][0]-f[0][-1][0])+np.square(f[0][0][1]-f[0][-1][1])+np.square(f[0][0][2]-f[0][-1][2]))\n\t\t# Fiber length minus Euclidean distance:\n\t\tFLsubED = np.subtract(FL, ED)\n\t\tED_as_percent_of_FL = np.divide(100,FL)*ED\n\t\tif scalar_type == 'FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FL\n\t\t\tproperty_array = np.array([FL], dtype='float32')\n\t\tif scalar_type == 'ED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED\n\t\t\tproperty_array = np.array([ED], dtype='float32')\n\t\tif scalar_type == 'FLsubED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FLsubED\n\t\t\tproperty_array = np.array([FLsubED], dtype='float32')\n\t\tif scalar_type == 'ED_as_percent_of_FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED_as_percent_of_FL\n\t\t\tproperty_array = np.array([ED_as_percent_of_FL], dtype='float32')\n\t\tnew_tuple=tuple([f[0], scalar_array,property_array])\t\t\t\t\n\t\toutstreams.append(new_tuple)\n\tn_fib_out = len(outstreams)\n\thdr_new['n_count'] = n_fib_out\t\n\thdr_new['n_scalars'] = np.array(1, dtype='int16')\t\t\t\t#hdr_new['scalar_name'] = np.array(['JG_COLOURS', '', '', '', '', '', '', '', '', ''],dtype='|S20')\t\t\n\thdr_new['scalar_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['n_properties'] = np.array(1, dtype='int16')\n#\thdr_new['property_name'] = np.array(['JG_PROPERTY', '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['property_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\tnib.trackvis.write(trk_file_new, outstreams, hdr_new)",
"def output(self, fp: 'FILE *') -> \"void\":\n return _coin.SbVec4f_output(self, fp)",
"def test_write_resolution_float():\n data = random_data('uint8', (2, 219, 301))\n resolution = (92.0, 92.0)\n with TempFileName('resolution_float') as fname:\n imwrite(fname, data, resolution=resolution)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].tags['XResolution'].value == (92, 1)\n assert tif.pages[0].tags['YResolution'].value == (92, 1)\n assert tif.pages[1].tags['XResolution'].value == (92, 1)\n assert tif.pages[1].tags['YResolution'].value == (92, 1)\n assert__str__(tif)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def uncompress_float(packedNum):\n return packedNum * 0.5"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute weighted normalized UniFrac using fp64 math and write to file
|
def weighted_normalized_fp64_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'weighted_normalized_fp64',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def save_mulaw(fn, y, sr=22050, quantization_channel=256):\n mu = quantization_channel - 1\n safe_audio_abs = np.minimum(np.abs(y), 1.)\n magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)\n signal = np.sign(y) * magnitude\n y_ = ((signal + 1) / 2 * mu + 0.5).astype(np.uint8)\n np.save(fn, y_)",
"def _get_wn(cutoff, fs):\n return cutoff / (0.5 * fs)",
"def write_waq_spatial(self,fp,quantity,data_fn,xyn):\n np.savetxt(os.path.join(self.model.run_dir,data_fn),\n xyn,fmt=\"%.6g\")\n fp.write(\"\\n\".join([\"QUANTITY=%s\"%quantity,\n \"FILENAME=%s\"%data_fn,\n \"FILETYPE=7\",\n \"METHOD=4\",\n \"OPERAND=O\\n\"]))",
"def unweighted_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def _mag2fluxdensity(mag,band,unit='Jy'):\n from astropy import units\n _mag = -mag/2.5\n f0 = _zeropoint(band)\n _w = wavelength(band,'angstrom')\n f = (f0 * 10**_mag) * (_w/_w.to('Hz',units.spectral()))\n return f.to(unit)",
"def wgfaWrite(info, outfile = None, minBranch = 1.e-5, rightDigits = 4, maxLvl1 = None):\n #\n# gname = info['ionS']\n if outfile:\n wgfaname = outfile\n else:\n print(' output filename not specified, no file will be created')\n return\n# wgfaname = gname + '.wgfa'\n print((' wgfa file name = ', wgfaname))\n if minBranch > 0.:\n info['ref'].append(' minimum branching ratio = %10.2e'%(minBranch))\n out = open(wgfaname, 'w')\n #ntrans = len(info['lvl1'])\n nlvl = max(info['lvl2'])\n totalAvalue = np.zeros(nlvl, 'float64')\n if 'pretty1' in info:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e%30s - %30s'\n else:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e'\n for itrans, avalue in enumerate(info['avalue']):\n # for autoionization transitions, lvl1 can be less than zero\n if abs(info['lvl1'][itrans]) > 0 and info['lvl2'][itrans] > 0:\n totalAvalue[info['lvl2'][itrans] -1] += avalue\n\n for itrans, avalue in enumerate(info['avalue']):\n if info['wvl'][itrans] == 0.:\n branch = 1.\n elif avalue > 0.:\n branch = avalue/totalAvalue[info['lvl2'][itrans] -1]\n else:\n branch = 0.\n test1 = branch > minBranch\n test2 = abs(info['lvl1'][itrans]) > 0\n test3 = info['lvl2'][itrans] > 0\n if maxLvl1:\n test4 = info['lvl1'][itrans] <= maxLvl1\n else:\n test4 = True\n if test1 and test2 and test3 and test4:\n if 'pretty1' in info:\n # generally only useful with NIST data\n if 'transType' in info:\n if info['transType'][itrans] != '':\n lbl2 = info['pretty2']+' ' + info['transType'][itrans]\n else:\n lbl2 = info['pretty2'][itrans]\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue, info['pretty1'][itrans].rjust(30), lbl2.ljust(30))\n out.write(pstring+'\\n')\n else:\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue)\n out.write(pstring+'\\n')\n out.write(' -1\\n')\n out.write('%filename: ' + wgfaname + '\\n')\n for one in info['ref']:\n out.write(one+'\\n')\n out.write(today.strftime('%Y %B %d') +'\\n')\n out.write(' -1 \\n')\n out.close()",
"def test_write_resolution_float():\n data = random_data('uint8', (2, 219, 301))\n resolution = (92.0, 92.0)\n with TempFileName('resolution_float') as fname:\n imwrite(fname, data, resolution=resolution)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].tags['XResolution'].value == (92, 1)\n assert tif.pages[0].tags['YResolution'].value == (92, 1)\n assert tif.pages[1].tags['XResolution'].value == (92, 1)\n assert tif.pages[1].tags['YResolution'].value == (92, 1)\n assert__str__(tif)",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def weight_conversion(user_weight_pounds, conversion_constant):\n\n user_weight_kilograms = user_weight_pounds * conversion_constant\n # user_weight_newtons_earth = user_weight_kilograms * gravitational_force\n return format(user_weight_kilograms, '.2f')",
"def rule_power_factor(f11, f10, f01, f00):\n N = f11 + f10 + f01 + f00\n zero = 1e-10\n supp_ab = f11 / N\n supp_a = f10 / N\n return (supp_ab * supp_ab) / (supp_a + zero)",
"def db_to_amplitude(db):\n return db_to_power(db / 2.0)",
"def roundRelativeBinary(df, nBits, eps=0., downgradeType = True):\n type=df.dtype\n # If dtype is not floating point number or int, skip this step\n if type.kind not in ['f', 'c', 'i', 'u']:\n return df\n shiftN = 2 ** nBits\n mantissa, exp2 = np.frexp(df)\n mantissa = np.rint(mantissa * shiftN)/shiftN\n # If result can be represented by single precision float, use that, otherwise cast to double\n if downgradeType and type.kind == 'f' and nBits <= 23 and np.min(exp2) > -256 and np.max(exp2) < 255:\n return np.ldexp(mantissa.astype(np.float32), exp2).astype(np.float32)\n return np.ldexp(mantissa, exp2).astype(type)",
"def extract_unw_phase(infilename, outfilename):\n _, _, unw = isce_read_write.read_scalar_data(infilename, band=2); # reading second band\n ny, nx = np.shape(unw);\n isce_read_write.write_isce_data(unw, nx, ny, \"FLOAT\", outfilename);\n return;"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute weighted normalized UniFrac using fp32 math and write to file
|
def weighted_normalized_fp32_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'weighted_normalized_fp32',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def unweighted_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def rule_power_factor(f11, f10, f01, f00):\n N = f11 + f10 + f01 + f00\n zero = 1e-10\n supp_ab = f11 / N\n supp_a = f10 / N\n return (supp_ab * supp_ab) / (supp_a + zero)",
"def foursigfloat(num: int, units: List[str]):\n # Presumably three ifs are faster than one logarithm\n if (num >= (1 << 20)):\n if (num >= (1 << 30)):\n if (num >= (1 << 40)):\n if (num > (1 << 50)):\n # PB: TB with no decimal, more than three whole numbers.\n return ('{:.0f}'.format(num / (1 << 40)) + \" \" + units[4])\n else:\n # TB with at least one decimal.\n numstring = numstring[0:max(numstring.find('.'), + 2)]\n return (('{:.0f}'.format(num / (1 << 40)))[0:5] + \" \" + units[4])\n else: # < 1TB\n return (('{:1.3f}'.format(num / (1 << 30)))[0:5] + \" \" + units[3])\n else: # < 1GB\n return (('{:1.3f}'.format(num / (1 << 20)))[0:5] + \" \" + units[2])\n else: # < 1MB\n if (num >= (1 << 10)):\n return (('{:1.3f}'.format(num / (1 << 10)))[0:5] + \" \" + units[1])\n else:\n return ((str(num))[0:5] + \" \" + units[0])",
"def solve_wave_FD8(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(4,ny-4):\n for j in range(4,nx-4):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-4,j-4]* \\\n ((-9*U[i,j-4]+128*U[i,j-3]-1008*U[i,j-2]+8064*U[i,j-1]-14350*U[i,j+0]+8064*U[i,j+1]-1008*U[i,j+2]+128*U[i,j+3]-9*U[i,j+4])+ \\\n (-9*U[i-4,j]+128*U[i-3,j]-1008*U[i-2,j]+8064*U[i-1,j]-14350*U[i+0,j]+8064*U[i+1,j]-1008*U[i+2,j]+128*U[i+3,j]-9*U[i+4,j]))/ \\\n (5040*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(4+ncpml,ny-ncpml-4):\n for j in range(4,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(4,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-4,-j-5]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(4,ncpml+1):\n for j in range(4,nx-4):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(4,ncpml):\n for j in range(4,nx-4):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-5,j-4]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def solve_wave_FD4(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(2,ny-2):\n for j in range(2,nx-2):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-2,j-2]* \\\n ((-1*U[i-2,j]+16*U[i-1,j]-30*U[i,j]+16*U[i+1,j]-1*U[i+2,j]) + \\\n (-1*U[i,j-2]+16*U[i,j-1]-30*U[i,j]+16*U[i,j+1]-1*U[i,j+2]))/ \\\n (12*1.0*h**2)\n #CPML boundary in X-domain\n for i in range(2+ncpml,ny-ncpml-2):\n for j in range(2,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(2,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-2,-j-3]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(2,ncpml+1):\n for j in range(2,nx-2):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(2,ncpml):\n for j in range(2,nx-2):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-3,j-2]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def save_mulaw(fn, y, sr=22050, quantization_channel=256):\n mu = quantization_channel - 1\n safe_audio_abs = np.minimum(np.abs(y), 1.)\n magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)\n signal = np.sign(y) * magnitude\n y_ = ((signal + 1) / 2 * mu + 0.5).astype(np.uint8)\n np.save(fn, y_)",
"def readTempF(self):\r\n temp = self.readTempC() \r\n return temp if not temp else temp * 9.0/5.0 + 32",
"def do_keep_fp32(network, cell_types):\n for name, cell in network.cells_and_names():\n if isinstance(cell, cell_types):\n cell.to_float(mstype.float32)\n print(f'cast {name} to fp32')",
"def write_waq_spatial(self,fp,quantity,data_fn,xyn):\n np.savetxt(os.path.join(self.model.run_dir,data_fn),\n xyn,fmt=\"%.6g\")\n fp.write(\"\\n\".join([\"QUANTITY=%s\"%quantity,\n \"FILENAME=%s\"%data_fn,\n \"FILETYPE=7\",\n \"METHOD=4\",\n \"OPERAND=O\\n\"]))",
"def rewrite_trk_file_with_ED_vs_FL_scalars(trk_file_orig,trk_file_new, scalar_type):\t\n\timport nibabel as nib\n\timport numpy as np\n\tfrom nipype.interfaces.cmtk.cmtk import length as fib_length\n\tfibres_orig, hdr_orig = nib.trackvis.read(trk_file_orig, False)\n\thdr_new = hdr_orig.copy()\n\toutstreams = []\n\tfor f in fibres_orig:\n\t\t# Calculate fiber lengths\t\n\t\tFL = fib_length(f[0]) \n\t\t# Calculate Euclidean distance between fibre start and endpoints\n\t\tED = np.sqrt(np.square(f[0][0][0]-f[0][-1][0])+np.square(f[0][0][1]-f[0][-1][1])+np.square(f[0][0][2]-f[0][-1][2]))\n\t\t# Fiber length minus Euclidean distance:\n\t\tFLsubED = np.subtract(FL, ED)\n\t\tED_as_percent_of_FL = np.divide(100,FL)*ED\n\t\tif scalar_type == 'FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FL\n\t\t\tproperty_array = np.array([FL], dtype='float32')\n\t\tif scalar_type == 'ED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED\n\t\t\tproperty_array = np.array([ED], dtype='float32')\n\t\tif scalar_type == 'FLsubED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FLsubED\n\t\t\tproperty_array = np.array([FLsubED], dtype='float32')\n\t\tif scalar_type == 'ED_as_percent_of_FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED_as_percent_of_FL\n\t\t\tproperty_array = np.array([ED_as_percent_of_FL], dtype='float32')\n\t\tnew_tuple=tuple([f[0], scalar_array,property_array])\t\t\t\t\n\t\toutstreams.append(new_tuple)\n\tn_fib_out = len(outstreams)\n\thdr_new['n_count'] = n_fib_out\t\n\thdr_new['n_scalars'] = np.array(1, dtype='int16')\t\t\t\t#hdr_new['scalar_name'] = np.array(['JG_COLOURS', '', '', '', '', '', '', '', '', ''],dtype='|S20')\t\t\n\thdr_new['scalar_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['n_properties'] = np.array(1, dtype='int16')\n#\thdr_new['property_name'] = np.array(['JG_PROPERTY', '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['property_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\tnib.trackvis.write(trk_file_new, outstreams, hdr_new)",
"def wgfaWrite(info, outfile = None, minBranch = 1.e-5, rightDigits = 4, maxLvl1 = None):\n #\n# gname = info['ionS']\n if outfile:\n wgfaname = outfile\n else:\n print(' output filename not specified, no file will be created')\n return\n# wgfaname = gname + '.wgfa'\n print((' wgfa file name = ', wgfaname))\n if minBranch > 0.:\n info['ref'].append(' minimum branching ratio = %10.2e'%(minBranch))\n out = open(wgfaname, 'w')\n #ntrans = len(info['lvl1'])\n nlvl = max(info['lvl2'])\n totalAvalue = np.zeros(nlvl, 'float64')\n if 'pretty1' in info:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e%30s - %30s'\n else:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e'\n for itrans, avalue in enumerate(info['avalue']):\n # for autoionization transitions, lvl1 can be less than zero\n if abs(info['lvl1'][itrans]) > 0 and info['lvl2'][itrans] > 0:\n totalAvalue[info['lvl2'][itrans] -1] += avalue\n\n for itrans, avalue in enumerate(info['avalue']):\n if info['wvl'][itrans] == 0.:\n branch = 1.\n elif avalue > 0.:\n branch = avalue/totalAvalue[info['lvl2'][itrans] -1]\n else:\n branch = 0.\n test1 = branch > minBranch\n test2 = abs(info['lvl1'][itrans]) > 0\n test3 = info['lvl2'][itrans] > 0\n if maxLvl1:\n test4 = info['lvl1'][itrans] <= maxLvl1\n else:\n test4 = True\n if test1 and test2 and test3 and test4:\n if 'pretty1' in info:\n # generally only useful with NIST data\n if 'transType' in info:\n if info['transType'][itrans] != '':\n lbl2 = info['pretty2']+' ' + info['transType'][itrans]\n else:\n lbl2 = info['pretty2'][itrans]\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue, info['pretty1'][itrans].rjust(30), lbl2.ljust(30))\n out.write(pstring+'\\n')\n else:\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue)\n out.write(pstring+'\\n')\n out.write(' -1\\n')\n out.write('%filename: ' + wgfaname + '\\n')\n for one in info['ref']:\n out.write(one+'\\n')\n out.write(today.strftime('%Y %B %d') +'\\n')\n out.write(' -1 \\n')\n out.close()",
"def output(self, file: 'FILE *') -> \"void\":\n return _coin.SoFloatElement_output(self, file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute weighted unnormalized UniFrac using fp64 math and write to file
|
def weighted_unnormalized_fp64_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'weighted_unnormalized_fp64',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def write_waq_spatial(self,fp,quantity,data_fn,xyn):\n np.savetxt(os.path.join(self.model.run_dir,data_fn),\n xyn,fmt=\"%.6g\")\n fp.write(\"\\n\".join([\"QUANTITY=%s\"%quantity,\n \"FILENAME=%s\"%data_fn,\n \"FILETYPE=7\",\n \"METHOD=4\",\n \"OPERAND=O\\n\"]))",
"def save_mulaw(fn, y, sr=22050, quantization_channel=256):\n mu = quantization_channel - 1\n safe_audio_abs = np.minimum(np.abs(y), 1.)\n magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)\n signal = np.sign(y) * magnitude\n y_ = ((signal + 1) / 2 * mu + 0.5).astype(np.uint8)\n np.save(fn, y_)",
"def unweighted_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def _get_wn(cutoff, fs):\n return cutoff / (0.5 * fs)",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def writeShortComplex(fileName, data):\n out_file = open(fileName, 'wb')\n data.copy().view(np.float).astype('>i2').tofile(out_file)\n out_file.close()",
"def test_write_resolution_float():\n data = random_data('uint8', (2, 219, 301))\n resolution = (92.0, 92.0)\n with TempFileName('resolution_float') as fname:\n imwrite(fname, data, resolution=resolution)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 2\n assert tif.pages[0].tags['XResolution'].value == (92, 1)\n assert tif.pages[0].tags['YResolution'].value == (92, 1)\n assert tif.pages[1].tags['XResolution'].value == (92, 1)\n assert tif.pages[1].tags['YResolution'].value == (92, 1)\n assert__str__(tif)",
"def extract_unw_phase(infilename, outfilename):\n _, _, unw = isce_read_write.read_scalar_data(infilename, band=2); # reading second band\n ny, nx = np.shape(unw);\n isce_read_write.write_isce_data(unw, nx, ny, \"FLOAT\", outfilename);\n return;",
"def _mag2fluxdensity(mag,band,unit='Jy'):\n from astropy import units\n _mag = -mag/2.5\n f0 = _zeropoint(band)\n _w = wavelength(band,'angstrom')\n f = (f0 * 10**_mag) * (_w/_w.to('Hz',units.spectral()))\n return f.to(unit)",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def wgfaWrite(info, outfile = None, minBranch = 1.e-5, rightDigits = 4, maxLvl1 = None):\n #\n# gname = info['ionS']\n if outfile:\n wgfaname = outfile\n else:\n print(' output filename not specified, no file will be created')\n return\n# wgfaname = gname + '.wgfa'\n print((' wgfa file name = ', wgfaname))\n if minBranch > 0.:\n info['ref'].append(' minimum branching ratio = %10.2e'%(minBranch))\n out = open(wgfaname, 'w')\n #ntrans = len(info['lvl1'])\n nlvl = max(info['lvl2'])\n totalAvalue = np.zeros(nlvl, 'float64')\n if 'pretty1' in info:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e%30s - %30s'\n else:\n pformat = '%5i%5i%15.' + str(rightDigits) + 'f%15.3e%15.3e'\n for itrans, avalue in enumerate(info['avalue']):\n # for autoionization transitions, lvl1 can be less than zero\n if abs(info['lvl1'][itrans]) > 0 and info['lvl2'][itrans] > 0:\n totalAvalue[info['lvl2'][itrans] -1] += avalue\n\n for itrans, avalue in enumerate(info['avalue']):\n if info['wvl'][itrans] == 0.:\n branch = 1.\n elif avalue > 0.:\n branch = avalue/totalAvalue[info['lvl2'][itrans] -1]\n else:\n branch = 0.\n test1 = branch > minBranch\n test2 = abs(info['lvl1'][itrans]) > 0\n test3 = info['lvl2'][itrans] > 0\n if maxLvl1:\n test4 = info['lvl1'][itrans] <= maxLvl1\n else:\n test4 = True\n if test1 and test2 and test3 and test4:\n if 'pretty1' in info:\n # generally only useful with NIST data\n if 'transType' in info:\n if info['transType'][itrans] != '':\n lbl2 = info['pretty2']+' ' + info['transType'][itrans]\n else:\n lbl2 = info['pretty2'][itrans]\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue, info['pretty1'][itrans].rjust(30), lbl2.ljust(30))\n out.write(pstring+'\\n')\n else:\n pstring = pformat%(info['lvl1'][itrans], info['lvl2'][itrans], info['wvl'][itrans], info['gf'][itrans], avalue)\n out.write(pstring+'\\n')\n out.write(' -1\\n')\n out.write('%filename: ' + wgfaname + '\\n')\n for one in info['ref']:\n out.write(one+'\\n')\n out.write(today.strftime('%Y %B %d') +'\\n')\n out.write(' -1 \\n')\n out.close()",
"def db_to_amplitude(db):\n return db_to_power(db / 2.0)",
"def _ftw(frequency_offset):\n ftw = int(np.round(frequency_offset * _ftw_scale))\n return b\",\".join(map(_ascii_numerals, _4_bytes.pack(ftw)))",
"def writeNormTransFunc(survey='sdss'):\n localpath = getlocalpath()\n dir_out = localpath+survey+'/normtrans/'\n\n if not os.path.isdir(dir_out):\n os.mkdir(dir_out)\n\n for band in surveybands[survey]:\n fp = dir_out + band+'.csv'\n\n trans, ws = calcNormTransFunc(band=band, survey=survey)\n\n tabout = at.Table([ws, trans], names=('ws', 'trans'), dtype=('float', 'float'))\n\n tabout.write(fp, format='ascii.csv', overwrite=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute weighted unnormalized UniFrac using fp32 math and write to file
|
def weighted_unnormalized_fp32_to_file(table: str,
phylogeny: str,
out_filename: str,
pcoa_dims: int = 10,
threads: int = 1,
variance_adjusted: bool = False,
bypass_tips: bool = False,
format: str = "hdf5",
buf_dirname: str = "",
n_substeps: int = 1,
n_subsamples: int = 1,
subsample_depth: int = 0,
subsample_with_replacement: bool = True,
permanova_perms: int = 0,
grouping_filename: str = "",
grouping_columns: str = "") -> str:
return _call_ssu_to_file(table, phylogeny, out_filename,
'weighted_unnormalized_fp32',
variance_adjusted, 1.0, bypass_tips,
n_substeps, format,
n_subsamples,
subsample_depth, subsample_with_replacement,
pcoa_dims,
permanova_perms,
grouping_filename, grouping_columns,
buf_dirname)
|
[
"def weighted_normalized_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp32_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp32',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_unnormalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_unnormalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def weighted_normalized_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'weighted_normalized_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def unweighted_fp64_to_file(table: str,\n phylogeny: str,\n out_filename: str,\n pcoa_dims: int = 10,\n threads: int = 1,\n variance_adjusted: bool = False,\n bypass_tips: bool = False,\n format: str = \"hdf5\",\n buf_dirname: str = \"\",\n n_substeps: int = 1,\n n_subsamples: int = 1,\n subsample_depth: int = 0,\n subsample_with_replacement: bool = True,\n permanova_perms: int = 0,\n grouping_filename: str = \"\",\n grouping_columns: str = \"\") -> str:\n return _call_ssu_to_file(table, phylogeny, out_filename,\n 'unweighted_fp64',\n variance_adjusted, 1.0, bypass_tips,\n n_substeps, format,\n n_subsamples,\n subsample_depth, subsample_with_replacement,\n pcoa_dims,\n permanova_perms,\n grouping_filename, grouping_columns,\n buf_dirname)",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n -62460.15645250649j-18743.74669072136,\n -62675.1700584679]\n\n # Normalize to +12.2 dB at 6.3 kHz, numerically\n # TODO: Derive exact value with sympy\n b, a = zpk2tf(z, p, 1)\n w, h = freqs(b, a, 2*pi*6300)\n k = 10**(+12.2/20) / abs(h[0])\n\n return z, p, k",
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*U[i-2,j]+538137600*U[i-1,j]-924708642*U[i+0,j]+538137600*U[i+1,j]-94174080*U[i+2,j]+22830080*U[i+3,j]-5350800*U[i+4,j]+1053696*U[i+5,j]-156800*U[i+6,j]+15360*U[i+7,j]-735*U[i+8,j])+ \\\n (-735*U[i,j-8]+15360*U[i,j-7]-156800*U[i,j-6]+1053696*U[i,j-5]-5350800*U[i,j-4]+22830080*U[i,j-3]-94174080*U[i,j-2]+538137600*U[i,j-1]-924708642*U[i,j+0]+538137600*U[i,j+1]-94174080*U[i,j+2]+22830080*U[i,j+3]-5350800*U[i,j+4]+1053696*U[i,j+5]-156800*U[i,j+6]+15360*U[i,j+7]-735*U[i,j+8]))/ \\\n (302702400*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(8+ncpml,ny-ncpml-8):\n for j in range(8,ncpml+1):\n phi[i,j]=b[j-8]*phi[i,j]+(b[j-8]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-8]*phi[i,-j-1]+(b[j-8]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(8,ncpml):\n psi[i,j]=b[j-8]*psi[i,j]+(b[j-8]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-8]*psi[i,-j-1]+(b[j-8]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-8,j-8]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-8,-j-9]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(8,ncpml+1):\n for j in range(8,nx-8):\n phi[i,j]=b[i-8]*phi[i,j]+(b[i-8]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-8]*phi[-i-1,j]+(b[i-8]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(8,ncpml):\n for j in range(8,nx-8):\n psi[i,j]=b[i-8]*psi[i,j]+(b[i-8]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-8]*psi[-i-1,j]+(b[i-8]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n \n Up[i,j] += c[i-8,j-8]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-9,j-8]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1425600*U[i+1,j]-222750*U[i+2,j]+44000*U[i+3,j]-7425*U[i+4,j]+864*U[i+5,j]-50*U[i+6,j])+ \\\n (-50*U[i,j-6]+864*U[i,j-5]-7425*U[i,j-4]+44000*U[i,j-3]-222750*U[i,j-2]+1425600*U[i,j-1]-2480478*U[i,j+0]+1425600*U[i,j+1]-222750*U[i,j+2]+44000*U[i,j+3]-7425*U[i,j+4]+864*U[i,j+5]-50*U[i,j+6]))/ \\\n (831600*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(6+ncpml,ny-ncpml-6):\n for j in range(6,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(6,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-6,-j-7]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(6,ncpml+1):\n for j in range(6,nx-6):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(6,ncpml):\n for j in range(6,nx-6):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-6,j-6]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-7,j-6]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def write_waq_spatial(self,fp,quantity,data_fn,xyn):\n np.savetxt(os.path.join(self.model.run_dir,data_fn),\n xyn,fmt=\"%.6g\")\n fp.write(\"\\n\".join([\"QUANTITY=%s\"%quantity,\n \"FILENAME=%s\"%data_fn,\n \"FILETYPE=7\",\n \"METHOD=4\",\n \"OPERAND=O\\n\"]))",
"def save_mulaw(fn, y, sr=22050, quantization_channel=256):\n mu = quantization_channel - 1\n safe_audio_abs = np.minimum(np.abs(y), 1.)\n magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu)\n signal = np.sign(y) * magnitude\n y_ = ((signal + 1) / 2 * mu + 0.5).astype(np.uint8)\n np.save(fn, y_)",
"def solve_wave_FD8(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(4,ny-4):\n for j in range(4,nx-4):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-4,j-4]* \\\n ((-9*U[i,j-4]+128*U[i,j-3]-1008*U[i,j-2]+8064*U[i,j-1]-14350*U[i,j+0]+8064*U[i,j+1]-1008*U[i,j+2]+128*U[i,j+3]-9*U[i,j+4])+ \\\n (-9*U[i-4,j]+128*U[i-3,j]-1008*U[i-2,j]+8064*U[i-1,j]-14350*U[i+0,j]+8064*U[i+1,j]-1008*U[i+2,j]+128*U[i+3,j]-9*U[i+4,j]))/ \\\n (5040*1.0*h**2)\n \n #CPML boundary in X-domain\n for i in range(4+ncpml,ny-ncpml-4):\n for j in range(4,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(4,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-4,-j-5]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(4,ncpml+1):\n for j in range(4,nx-4):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(4,ncpml):\n for j in range(4,nx-4):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-4,j-4]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-5,j-4]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def foursigfloat(num: int, units: List[str]):\n # Presumably three ifs are faster than one logarithm\n if (num >= (1 << 20)):\n if (num >= (1 << 30)):\n if (num >= (1 << 40)):\n if (num > (1 << 50)):\n # PB: TB with no decimal, more than three whole numbers.\n return ('{:.0f}'.format(num / (1 << 40)) + \" \" + units[4])\n else:\n # TB with at least one decimal.\n numstring = numstring[0:max(numstring.find('.'), + 2)]\n return (('{:.0f}'.format(num / (1 << 40)))[0:5] + \" \" + units[4])\n else: # < 1TB\n return (('{:1.3f}'.format(num / (1 << 30)))[0:5] + \" \" + units[3])\n else: # < 1GB\n return (('{:1.3f}'.format(num / (1 << 20)))[0:5] + \" \" + units[2])\n else: # < 1MB\n if (num >= (1 << 10)):\n return (('{:1.3f}'.format(num / (1 << 10)))[0:5] + \" \" + units[1])\n else:\n return ((str(num))[0:5] + \" \" + units[0])",
"def write_3d_lut(self, process_function, file_path, preset):\n pass",
"def solve_wave_FD4(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(2,ny-2):\n for j in range(2,nx-2):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-2,j-2]* \\\n ((-1*U[i-2,j]+16*U[i-1,j]-30*U[i,j]+16*U[i+1,j]-1*U[i+2,j]) + \\\n (-1*U[i,j-2]+16*U[i,j-1]-30*U[i,j]+16*U[i,j+1]-1*U[i,j+2]))/ \\\n (12*1.0*h**2)\n #CPML boundary in X-domain\n for i in range(2+ncpml,ny-ncpml-2):\n for j in range(2,ncpml+1):\n phi[i,j]=b[j-1]*phi[i,j]+(b[j-1]-1.0)*(U[i,j+1]-U[i,j])/h\n phi[i,-j-1]=b[j-1]*phi[i,-j-1]+(b[j-1]-1.0)*(U[i,-j-1]-U[i,-j-2])/h\n for j in range(2,ncpml):\n psi[i,j]=b[j-1]*psi[i,j]+(b[j-1]-1.0)*\\\n ((U[i,j-1]-2*U[i,j]+U[i,j+1])/h/h \\\n +(phi[i,j+1]-phi[i,j])/h)\n psi[i,-j-1]=b[j-1]*psi[i,-j-1]+(b[j-1]-1.0)*\\\n ((U[i,-j-2]-2*U[i,-j-1]+U[i,-j])/h/h \\\n +(phi[i,-j-1]-phi[i,-j-2])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i,j+1]-phi[i,j])/h+psi[i,j])\n Up[i,-j-1] += c[i-2,-j-3]*((phi[i,-j-1]-phi[i,-j-2])/h+psi[i,-j-1])\n \n #CPML boundary in Y-domain\n for i in range(2,ncpml+1):\n for j in range(2,nx-2):\n phi[i,j]=b[i-1]*phi[i,j]+(b[i-1]-1.0)*(U[i+1,j]-U[i,j])/h\n phi[-i-1,j]=b[i-1]*phi[-i-1,j]+(b[i-1]-1.0)*(U[-i-1,j]-U[-i-2,j])/h\n for i in range(2,ncpml):\n for j in range(2,nx-2):\n psi[i,j]=b[i-1]*psi[i,j]+(b[i-1]-1.0)*\\\n ((U[i-1,j]-2*U[i,j]+U[i+1,j])/h/h \\\n +(phi[i+1,j]-phi[i,j])/h)\n psi[-i-1,j]=b[i-1]*psi[-i-1,j]+(b[i-1]-1.0)*\\\n ((U[-i-2,j]-2*U[-i-1,j]+U[-i,j])/h/h \\\n +(phi[-i-1,j]-phi[-i-2,j])/h)\n Up[i,j] += c[i-2,j-2]*((phi[i+1,j]-phi[i,j])/h+psi[i,j])\n Up[-i-1,j] += c[-i-3,j-2]*((phi[-i-1,j]-phi[-i-2,j])/h+psi[-i-1,j])",
"def output(self, file: 'FILE *') -> \"void\":\n return _coin.SoFloatElement_output(self, file)",
"def extract_unw_phase(infilename, outfilename):\n _, _, unw = isce_read_write.read_scalar_data(infilename, band=2); # reading second band\n ny, nx = np.shape(unw);\n isce_read_write.write_isce_data(unw, nx, ny, \"FLOAT\", outfilename);\n return;",
"def rewrite_trk_file_with_ED_vs_FL_scalars(trk_file_orig,trk_file_new, scalar_type):\t\n\timport nibabel as nib\n\timport numpy as np\n\tfrom nipype.interfaces.cmtk.cmtk import length as fib_length\n\tfibres_orig, hdr_orig = nib.trackvis.read(trk_file_orig, False)\n\thdr_new = hdr_orig.copy()\n\toutstreams = []\n\tfor f in fibres_orig:\n\t\t# Calculate fiber lengths\t\n\t\tFL = fib_length(f[0]) \n\t\t# Calculate Euclidean distance between fibre start and endpoints\n\t\tED = np.sqrt(np.square(f[0][0][0]-f[0][-1][0])+np.square(f[0][0][1]-f[0][-1][1])+np.square(f[0][0][2]-f[0][-1][2]))\n\t\t# Fiber length minus Euclidean distance:\n\t\tFLsubED = np.subtract(FL, ED)\n\t\tED_as_percent_of_FL = np.divide(100,FL)*ED\n\t\tif scalar_type == 'FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FL\n\t\t\tproperty_array = np.array([FL], dtype='float32')\n\t\tif scalar_type == 'ED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED\n\t\t\tproperty_array = np.array([ED], dtype='float32')\n\t\tif scalar_type == 'FLsubED':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*FLsubED\n\t\t\tproperty_array = np.array([FLsubED], dtype='float32')\n\t\tif scalar_type == 'ED_as_percent_of_FL':\n\t\t\tscalar_array = np.ones((len(f[0]),1),dtype='float')*ED_as_percent_of_FL\n\t\t\tproperty_array = np.array([ED_as_percent_of_FL], dtype='float32')\n\t\tnew_tuple=tuple([f[0], scalar_array,property_array])\t\t\t\t\n\t\toutstreams.append(new_tuple)\n\tn_fib_out = len(outstreams)\n\thdr_new['n_count'] = n_fib_out\t\n\thdr_new['n_scalars'] = np.array(1, dtype='int16')\t\t\t\t#hdr_new['scalar_name'] = np.array(['JG_COLOURS', '', '', '', '', '', '', '', '', ''],dtype='|S20')\t\t\n\thdr_new['scalar_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['n_properties'] = np.array(1, dtype='int16')\n#\thdr_new['property_name'] = np.array(['JG_PROPERTY', '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\thdr_new['property_name'] = np.array([scalar_type, '', '', '', '', '', '', '', '', ''],dtype='|S20')\n\tnib.trackvis.write(trk_file_new, outstreams, hdr_new)",
"def rule_power_factor(f11, f10, f01, f00):\n N = f11 + f10 + f01 + f00\n zero = 1e-10\n supp_ab = f11 / N\n supp_a = f10 / N\n return (supp_ab * supp_ab) / (supp_a + zero)",
"def readTempF(self):\r\n temp = self.readTempC() \r\n return temp if not temp else temp * 9.0/5.0 + 32"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read UniFrac distance matrix from a hdf5 file
|
def h5unifrac(h5file: str) -> skbio.DistanceMatrix:
with h5py.File(h5file, "r") as f_u:
if 'matrix:0' in f_u.keys():
# multi format
dm = skbio.DistanceMatrix(
f_u['matrix:0'][:, :],
[c.decode('ascii') for c in f_u['order'][:]])
else:
# single format
dm = skbio.DistanceMatrix(
f_u['matrix'][:, :],
[c.decode('ascii') for c in f_u['order'][:]])
return dm
|
[
"def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target",
"def read_h(h_file):\n h_matrix = np.zeros((3, 3), dtype=\"float\")\n for i, line in enumerate(open(h_file, \"r\").readlines()):\n h_matrix[i, :] = [float(x) for x in line.strip().split(\",\")]\n\n return h_matrix",
"def read_data(distort=False):\n with h5.File('CIFAR10.hdf5', 'r') as hf:\n print('List of arrays in this file: \\n', hf.keys())\n X_test = hf.get('X_test')\n X_test = np.array(X_test)\n Y_test = hf.get('Y_test')\n Y_test = np.array(Y_test)\n Y_test = dense_to_one_hot(Y_test)\n X_train = hf.get('X_train')\n X_train = np.array(X_train)\n Y_train = hf.get('Y_train')\n Y_train = np.array(Y_train)\n Y_train = dense_to_one_hot(Y_train)\n print('Data download successful!')\n X_train, X_test = rearrange_axes(X_train), rearrange_axes(X_test)\n if distort:\n X_train, Y_train = distort_images(X_train, Y_train)\n print(\"Data distortion successful!\")\n return X_train, Y_train, X_test, Y_test",
"def load_hdf5(self, path):\n f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')\n self.matrix = f.root.vectors.read()\n self.vocabulary = Vocabulary()\n self.vocabulary.load(path)\n # self.name += os.path.basename(os.path.normpath(path))\n f.close()",
"def load(self, h5):\n # open file for reading\n if isinstance(h5, str):\n h5 = h5py.File(h5, 'r')\n \n # load features\n self._positives = torch.from_numpy(h5[\"Features\"][:])\n \n # load extreme vectors\n e = h5['ExtremeVectors']\n obj = dict(Scale=torch.from_numpy(e['scale'][()]),Shape = torch.from_numpy(e['shape'][()]),signTensor = torch.tensor(e['sign'][()]),translateAmountTensor = torch.LongTensor(e['translateAmount'][()]),smallScoreTensor = torch.from_numpy(e['smallScore'][()]))\n self._extreme_vectors = weibull.weibull(obj)\n self._extreme_vectors_indexes = torch.tensor(e['indexes'][()])\n\n cv = []\n # load covered indices\n for i in range(len(self._extreme_vectors_indexes)):\n cv.append(torch.from_numpy(numpy.array(e['CoveredVectors/'+str(i)][()])))\n self._covered_vectors = cv\n \n # load other parameteres\n self.distance_function = e.attrs[\"Distance\"]\n self.tailsize = e.attrs[\"Tailsize\"]\n self._label = e.attrs[\"Label\"]\n if self._label == -1: self._label = None\n self.cover_threshold = e.attrs[\"CoverThreshold\"]\n if self.cover_threshold == -1.: self.cover_threshold = None",
"def load_h5(f, path):\n # Sparse array.\n if f.has_attr(path, 'sparse_type'):\n if f.read_attr(path, 'sparse_type') == 'csr':\n return SparseCSR.load_h5(f, path)\n else:\n raise NotImplementedError(\"Only SparseCSR arrays are implemented \"\n \"currently.\")\n # Regular dense dataset.\n else:\n return f.read(path)[...]",
"def load(hdf5_filename):\n # Expand filename to be absolute\n hdf5_filename = os.path.expanduser(hdf5_filename)\n\n try:\n f = h5py.File(hdf5_filename, \"r\")\n # neurodata stores data inside the 'cutout' h5 dataset\n data_layers = f.get('image').get('CUTOUT')\n except Exception as e:\n raise ValueError(\"Could not load file {0} for conversion. {}\".format(\n hdf5_filename, e))\n raise\n\n return numpy.array(data_layers)",
"def _read_from_hdf5(self, array, idx_start, idx_end):\n # Cache the data in memory\n with h5py.File(self.filename, \"r\",) as df:\n for ch in self.ch_range:\n chname_h5 = f\"/ECEI/ECEI_{self.attrs['dev']}{ch.ch_v:02d}{ch.ch_h:02d}/Voltage\"\n array[ch.get_idx(), :] = df[chname_h5][idx_start:idx_end].astype(self.dtype)\n array[:] = array[:] * 1e-4",
"def load_elevation_matrix():\r\n with open(\"mpp.txt\") as f:\r\n elevation_matrix = [elevation_matrix.split() for elevation_matrix in f]\r\n for i in range(5):\r\n for row in elevation_matrix:\r\n del row[-1]\r\n return elevation_matrix",
"def read_dm_test_file():\n # Load data from file\n dat = np.genfromtxt(join(path,\"benchmark/dm_model1-5.txt\")).T\n assert(dat.shape == (6,6))\n\n # Split into redshift column and chi(z) columns\n z = dat[0]\n dm = dat[1:]\n return z, dm",
"def read_hdf5_voltages(file: file) -> (np.ndarray):\n # verify extension matches .hdf, .h4, .hdf4, .he2, .h5, .hdf5, .he5\n if re.search(r'\\.h[de]?f?[f245]$', file) is None:\n raise ValueError(\"Must supply HDF5 file (.h5)\")\n\n recording = h5py.File(file, 'r')\n return np.array(recording[\n \"Data/Recording_0/AnalogStream/Stream_0/ChannelData\"], dtype='int32')",
"def read_hdf5_atomicblock(outputs,datapath,name,time,n):\n\n\t#open HDF5 file\n\t#Read HemoCell v2.0 and v1.0 output. \n\ttry:\n\t\t#HemoCell V2.0 output format\n\t\thdf5datafile = h5py.File(datapath+\"/\"+str(time).zfill(12)+\"/\"+name+\".\" + str(time).zfill(12) +\".p.\" +str(n) +\".h5\",\"r\")\t\n\texcept:\n\t\ttry:\n\t\t\t#HemoCell V1.0 output format\n\t\t\thdf5datafile = h5py.File(datapath+\"/\"+str(time)+\"/\"+name+\".\" + str(time) +\".p.\" +str(n) +\".h5\",\"r\")\n\t\texcept (OSError, IOError):\n\t\t\t#If file does not exist raise the error\n\t\t\traise\n\t\n\t#Append data per output string to a dictionary\n\tdata = {}\n\tfor output in outputs:\n\t\t#If data is LBM Fluid each output needs to be reshaped to be analyzed over the entire domain\n\t\tif \"Fluid\" in name:\n\t\t\tattribute = []\n\t\t\ttempattribute = np.array(hdf5datafile[output])\n\n\t\t\t#Reshape each output attribute so it can be indexed using numpy indexing\n\t\t\tif \"Position\" in data.keys():\n\t\t\t\t#X and Y indicies are reversed for better visualization with Paraview\n\t\t\t\txblocks = np.shape(tempattribute)[2]\n\t\t\t\tyblocks = np.shape(tempattribute)[1]\n\t\t\t\tzblocks = np.shape(tempattribute)[0]\n\t\t\t\tfor xpos in range(xblocks):\n\t\t\t\t\tfor ypos in range(yblocks):\n\t\t\t\t\t\tfor zpos in range(zblocks):\n\t\t\t\t\t\t\tattribute.append(tempattribute[zpos][ypos][xpos])\n\t\t\t\tdata[output] = np.array(attribute)\n\n\t\t\t#Create a Position output to identfy the location of each LBM in the entire domain \n\t\t\telse:\n\t\t\t\tposition =[]\n\t\t\t\t#Get reletive postion of each atomic block\n\t\t\t\trelpos = hdf5datafile.attrs.get('relativePosition')\n\t\t\t\t#X and Y indicies are reversed for better visualization with Paraview\n\t\t\t\txblocks = np.shape(tempattribute)[2]\n\t\t\t\tyblocks = np.shape(tempattribute)[1]\n\t\t\t\tzblocks = np.shape(tempattribute)[0]\n\t\t\t\tfor xpos in range(xblocks):\n\t\t\t\t\tfor ypos in range(yblocks):\n\t\t\t\t\t\tfor zpos in range(zblocks):\n\t\t\t\t\t\t\tattribute.append(tempattribute[zpos][ypos][xpos])\n\t\t\t\t\t\t\tposition.append(np.array([xpos+relpos[2],ypos+relpos[1],zpos+relpos[0]]))\n\n\t\t\t\tdata[output] = np.array(attribute)\n\t\t\t\tdata[\"Position\"] = np.array(position)\n\n\t\t#If data is Cell type simply append it to a dictionary \n\t\telse:\n\t\t\tdata[output] = np.array(hdf5datafile[output])\n\n\thdf5datafile.close()\n\n\t#Return desired data as a dictionary over atomic block domain\n\treturn(data)",
"def _read_niftyreg_matrix(trsf_path):\n matrix = np.loadtxt(trsf_path)\n matrix = np.linalg.inv(matrix)\n return torch.as_tensor(matrix)",
"def read_mhd(filename):\n # Reads the image using SimpleITK\n itkimage = sitk.ReadImage(filename)\n\n # Convert the image to a numpy array first and then\n # shuffle the dimensions to get axis in the order z,y,x\n ct_scan = sitk.GetArrayFromImage(itkimage)\n\n # Read the origin of the ct_scan, will be used to convert\n # the coordinates from world to voxel and vice versa.\n origin = np.array(list(reversed(itkimage.GetOrigin())))\n\n # Read the spacing along each dimension\n spacing = np.array(list(reversed(itkimage.GetSpacing())))\n\n return DataReader.normalization_array(ct_scan), origin, spacing",
"def loadFile(self, fileName,verbose=False):\n if (os.path.isabs(fileName)):\n self.fileName = os.path.basename(fileName)\n self.fullFileName = fileName\n else:\n self.fileName = fileName\n # make the full file name by joining the input name \n # to the MKID_RAW_PATH (or . if the environment variable \n # is not defined)\n dataDir = os.getenv('MKID_RAW_PATH', '/')\n self.fullFileName = os.path.join(dataDir, self.fileName)\n\n if (not os.path.exists(self.fullFileName)):\n msg='file does not exist: %s'%self.fullFileName\n if verbose:\n print msg\n raise Exception(msg)\n \n #open the hdf5 file\n self.file = tables.open_file(self.fullFileName, mode='r')\n\n ##### TO DO/DELETE #####\n # dark obs files have no header currently (SRM 2017-05-05)\n # can update later by foldingn log files into obs file generation somehow\n # header is currently not used anywhere else in the code anyways. Maybe can just trash this.\n '''\n self.header = self.file.root.header.header\n self.titles = self.header.colnames\n try:\n self.info = self.header[0] #header is a table with one row\n except IndexError as inst:\n if verbose:\n print 'Can\\'t read header for ',self.fullFileName\n raise inst\n '''\n\n # Useful information about data format set here.\n # For now, set all of these as constants.\n # If we get data taken with different parameters, straighten\n # that all out here.\n\n ##### TO DELETE? #####\n ## These parameters are for DARKNESS data\n # May be cleared out later if deprecated (SRM 2017-05-05)\n self.tickDuration = 1e-6 #s\n self.ticksPerSec = int(1.0 / self.tickDuration)\n self.intervalAll = interval[0.0, (1.0 / self.tickDuration) - 1]\n\n\n ##### TO DELETE #####\n # Did not do this in DARKNESS. nonAllocPixels were just flagged in beammap\n # but still assigned a unique location. Correct method will be with beam map flags \n #self.nonAllocPixelName = '/r0/p250/'\n\n\n #get the beam image.\n try:\n self.beamImage = self.file.get_node('/BeamMap/Map').read()\n self.beamMapFlags = self.file.get_node('/BeamMap/Flag').read()\n except Exception as inst:\n if verbose:\n print 'Can\\'t access beamimage for ',self.fullFileName\n raise inst\n\n ##### TO DELETE #####\n # dark obs files have pixels ID'd by resID now, not roach/pixel address\n # Do we need these beamImageRoaches or beamImagePixelNums later?\n '''\n #format for a pixelName in beamImage is /r#/p#/t# where r# is the roach number, p# is the pixel number\n # and t# is the starting timestamp\n self.beamImageRoaches = np.array([[int(s.split('r')[1].split('/')[0]) for s in row] for row in self.beamImage])\n self.beamImagePixelNums = np.array([[int(s.split('p')[1].split('/')[0]) for s in row] for row in self.beamImage])\n '''\n #instead of beamImagePixelNums, we alternatively use beamImagePixelIDs\n #simply the beamImage cast to integer data types from strings\n self.beamImagePixelIDs = np.array(self.beamImage, dtype=int)\n\n #get shape of array from beamImage\n beamShape = self.beamImage.shape\n self.nRow = beamShape[0]\n self.nCol = beamShape[1]\n\n #make pointer to data table\n self.data = self.file.root.Photons.data\n\n #easy way to check exactly how many seconds of data are supposedly recorded\n self.totalIntegrationTime = self.file.root.Images._g_getnchildren()",
"def load_hdf5(self, idxfile):\n table = tables.open_file(idxfile)\n data, index = (table.get_node('/sentences'), table.get_node('/indices'))\n data_len = index.shape[0]\n offset = 0\n print(\"{} entries\".format(data_len))\n questions = []\n answers = []\n while offset < data_len:\n pos, q_len, a_len = index[offset]['pos'], index[offset]['q_len'], index[offset]['a_len']\n offset += 1\n questions.append(data[pos:pos + q_len].astype('int64'))\n answers.append(data[pos + q_len:pos + q_len + a_len].astype('int64'))\n table.close()\n return questions, answers",
"def h5ToDf(filename):\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)",
"def read_hcore(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 1:\n size = int(line[0])\n hcore = np.zeros((size, size), dtype=np.float64)\n elif len(line) == 3:\n i, j, val = int(line[0])-1, int(line[1])-1, np.float64(line[2])\n hcore[i,j] = hcore[j,i] = val\n return hcore",
"def prepare_data(path, output):\n\n with h5.File(NORM, 'r') as fh:\n M = fh['M'][()]\n\n with h5.File(path, 'r') as fh:\n # load extracted snvs\n snv = fh[\"SNVR\"][()].T.reshape(3, 3, 16, 4, 2, 2, 96, -1)\n\n # compute the normalization constant\n N0 = (snv.sum(axis=(4, 5, 6, 7)) / snv.sum()).reshape(3, 3, 16, 4, 1)\n N1 = np.concatenate(\n [N0, N0[[1, 0, 2], :, :][:, [1, 0, 2], :, :]], axis=4)\n N2 = N1.reshape(3, 3, 16, 4, 1, 2, 1, 1)\n N = (N2 * M) / 2\n\n # collapse data\n N = collapse_data(np.concatenate([N] * 2, axis=-4))\n snv = collapse_data(snv)\n\n # to be changed soon\n sv = np.zeros([81, snv.shape[-1]])\n sv[:] = np.nan\n other = np.concatenate(\n [fh['MNV'][()].T, fh['INDELS'][()].T, sv], axis=0)\n\n with h5.File(output, 'w') as fh:\n fh.create_dataset('SNV', data=snv)\n fh.create_dataset('OTHER', data=other)\n fh.create_dataset('N', data=N)\n\n return 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read PCoA from a hdf5 file
|
def h5pcoa(h5file: str) -> skbio.OrdinationResults:
with h5py.File(h5file, "r") as f_u:
pcoa_method = f_u['pcoa_method'][0].decode('ascii')
if 'FSVD' == pcoa_method:
long_method_name = "Approximate Principal Coordinate Analysis" + \
" using FSVD"
else:
long_method_name = "Possibly Approximate Principal " + \
"Coordinate Analysis " + \
"using " + pcoa_method
order_index = [c.decode('ascii')
for c in f_u['order'][:]]
if 'pcoa_eigvals:0' in f_u.keys():
# multi interface
pc = _build_pcoa(f_u, long_method_name, order_index,
'pcoa_eigvals:0', 'pcoa_samples:0',
'pcoa_proportion_explained:0')
else:
# single interface
pc = _build_pcoa(f_u, long_method_name, order_index,
'pcoa_eigvals', 'pcoa_samples',
'pcoa_proportion_explained')
return pc
|
[
"def h5pcoa_all(h5file: str) -> tuple:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" using FSVD\"\n else:\n long_method_name = \"Possibly Approximate Principal \" + \\\n \"Coordinate Analysis \" + \\\n \"using \" + pcoa_method\n order_index = [c.decode('ascii')\n for c in f_u['order'][:]]\n\n if 'pcoa_eigvals' in f_u.keys():\n # single matrix single PCoA version\n pcs = [_build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals', 'pcoa_samples',\n 'pcoa_proportion_explained')]\n else:\n # multi-matrix version\n pcs = []\n i = 0\n while 'pcoa_eigvals:%i' % i in f_u.keys():\n pcs.append(_build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals:%i' % i,\n 'pcoa_samples:%i' % i,\n 'pcoa_proportion_explained:%i' % i))\n i = i + 1\n\n return pcs",
"def read_cags(self):\n df = pd.read_hdf(self.hdf5_fp,\"/abund/CAGs\")\n self.cags_df = df",
"def cellInfoCaimanHdf5(hdf5File, dims=None):\n cellProfs = []\n signalTraces = []\n\n with h5py.File(hdf5File, mode='r') as cFile:\n est = cFile['estimates']\n Ainfo = est['A']\n signalTraces = np.transpose( np.array(est['C']) )\n Adata = np.array(Ainfo['data'])\n Aindices = np.array(Ainfo['indices'])\n Aindptr = np.array(Ainfo['indptr'])\n Ashape = np.array(Ainfo['shape'])\n A = csc_matrix((Adata,Aindices,Aindptr) , shape=Ashape ).transpose()\n A = np.array(A.todense())\n if dims is None:\n dims = np.array(est['dims'])\n cellProfs = A.reshape((A.shape[0],dims[0] ,-1))\n f = np.array(est['f']).transpose()\n b = np.array(est['b']).transpose()\n b = b.reshape(b.shape[0],cellProfs.shape[1],cellProfs.shape[2])\n return (cellProfs, signalTraces, b, f)",
"def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target",
"def read_h5_file(self, h5file_path):\n f = h5py.File(h5file_path, 'r')\n return f['wav']",
"def load_peaks(fname, verbose=False):\n\n if os.path.splitext(fname)[1].lower() != '.pam5':\n raise IOError('This function supports only PAM5 (HDF5) files')\n\n f = h5py.File(fname, 'r')\n\n pam = PeaksAndMetrics()\n\n pamh = f['pam']\n\n version = f.attrs['version']\n\n if version != '0.0.1':\n raise IOError('Incorrect PAM5 file version {0}'.format(version,))\n\n try:\n affine = pamh['affine'][:]\n except KeyError:\n affine = None\n\n peak_dirs = pamh['peak_dirs'][:]\n peak_values = pamh['peak_values'][:]\n peak_indices = pamh['peak_indices'][:]\n\n try:\n shm_coeff = pamh['shm_coeff'][:]\n except KeyError:\n shm_coeff = None\n\n sphere_vertices = pamh['sphere_vertices'][:]\n\n try:\n odf = pamh['odf'][:]\n except KeyError:\n odf = None\n\n pam.affine = affine\n pam.peak_dirs = peak_dirs\n pam.peak_values = peak_values\n pam.peak_indices = peak_indices\n pam.shm_coeff = shm_coeff\n pam.sphere = Sphere(xyz=sphere_vertices)\n pam.B = pamh['B'][:]\n pam.total_weight = pamh['total_weight'][:][0]\n pam.ang_thr = pamh['ang_thr'][:][0]\n pam.gfa = pamh['gfa'][:]\n pam.qa = pamh['qa'][:]\n pam.odf = odf\n\n f.close()\n\n if verbose:\n print('PAM5 version')\n print(version)\n print('Affine')\n print(pam.affine)\n print('Dirs shape')\n print(pam.peak_dirs.shape)\n print('SH shape')\n if pam.shm_coeff is not None:\n print(pam.shm_coeff.shape)\n else:\n print('None')\n print('ODF shape')\n if pam.odf is not None:\n print(pam.odf.shape)\n else:\n print('None')\n print('Total weight')\n print(pam.total_weight)\n print('Angular threshold')\n print(pam.ang_thr)\n print('Sphere vertices shape')\n print(pam.sphere.vertices.shape)\n\n return pam",
"def load_h5(f, path):\n # Sparse array.\n if f.has_attr(path, 'sparse_type'):\n if f.read_attr(path, 'sparse_type') == 'csr':\n return SparseCSR.load_h5(f, path)\n else:\n raise NotImplementedError(\"Only SparseCSR arrays are implemented \"\n \"currently.\")\n # Regular dense dataset.\n else:\n return f.read(path)[...]",
"def parse_PRNU_file():\n hdf_name = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\batch_2017Jun20_TEMPO_PRNU_-20Tccd__46Tfpe_3pixSpectral_3pixSpatial.h5'\n file = h5py.File(hdf_name, 'r')\n prnu = file.get('prnu')\n prnu = np.array(prnu).transpose()\n quad_D = prnu[2:1030, 10:1034]\n quad_C = prnu[2:1030, 1078:2102]\n quad_A = prnu[1062:2090, 10:1034]\n quad_B = prnu[1062:2090, 1078:2102]\n prnu_map_lower = np.concatenate((quad_D, quad_C), axis=1)\n prnu_map_upper = np.concatenate((quad_A, quad_B), axis=1)\n prnu_map = np.concatenate((prnu_map_lower, prnu_map_upper), axis=0)\n return prnu_map",
"def getAssociationLandscapeDataFromHDF5File(inputFname=None, associationTableName='association', \\\n\t\t\t\t\t\t\t\t\t\tlandscapeTableName='landscape', min_MAF=0.1):\n\tpdata = PassingData(min_MAF=min_MAF)\n\tgenome_wide_result = getGenomeWideResultFromHDF5MatrixFile(inputFname=inputFname, \\\n\t\t\t\t\t\tmin_value_cutoff=None, do_log10_transformation=False, pdata=pdata,\\\n\t\t\t\t\t\tconstruct_chr_pos2index=False, construct_data_obj_id2index=False, \\\n\t\t\t\t\t\tconstruct_locus_db_id2index=True,\\\n\t\t\t\t\t\treport=True, tableName=associationTableName)\n\t\n\treturnData = PassingData(genome_wide_result=genome_wide_result)\n\t\n\tsys.stderr.write(\"Reading landscape from %s ...\"%(inputFname))\n\tcurrent_obj = None\n\tbridge_ls = []\n\tlocusLandscapeNeighborGraph = nx.Graph()\n\treader = HDF5MatrixFile(inputFname, openMode='r')\n\tlandscapeTableObject = reader.getTableObject(tableName=landscapeTableName)\n\treturnData.HDF5AttributeNameLs = []\n\tfor attributeName, value in landscapeTableObject.getAttributes().iteritems():\n\t\treturnData.HDF5AttributeNameLs.append(attributeName)\n\t\tsetattr(returnData, attributeName, value)\n\t\n\tfor row in landscapeTableObject:\n\t\tif row.start_locus_id==0:\t#empty data. happens when inputFname contains no valid landscape, but one default null data point.\n\t\t\tcontinue\n\t\tstart_locus_id = row.start_locus_id\n\t\tstop_locus_id = row.stop_locus_id\n\t\tno_of_loci = row.no_of_loci\n\t\tdeltaX = row.deltaX\n\t\t\n\t\tstart_obj = genome_wide_result.get_data_obj_by_locus_db_id(start_locus_id)\n\t\tstop_obj = genome_wide_result.get_data_obj_by_locus_db_id(stop_locus_id)\n\t\t\n\t\tbridge_ls.append([start_obj, stop_obj, no_of_loci, deltaX])\n\t\t\n\t\tsource_index = start_obj.index\n\t\t#genome_wide_result.get_data_obj_index_by_locus_db_id(start_locus_id)\n\t\ttarget_index = stop_obj.index\n\t\t\n\t\tlocusLandscapeNeighborGraph.add_edge(source_index, target_index, \\\n\t\t\t\t\t\t\t\t\tweight=None)\n\t\tlocusLandscapeNeighborGraph[source_index][target_index]['no_of_loci'] = no_of_loci\n\t\tlocusLandscapeNeighborGraph[source_index][target_index]['deltaX'] = deltaX\n\t\t\n\tdel reader\n\tsys.stderr.write(\"%s bridges.\\n\"%(len(bridge_ls)))\n\treturnData.bridge_ls = bridge_ls\n\treturnData.locusLandscapeNeighborGraph = locusLandscapeNeighborGraph\n\treturn returnData",
"def read_raw_hdf5_case_and_write_pandas_hdf5(\n hdf5_file,\n root = '' ,\n output_file = '' ,\n serration_angle = 0 ,\n angle_correction = 0 ,\n height_correction = 0 ,\n streamwise_correction = 0 ,\n overwrite = False ,\n time_step_limit = 0 ,\n plot = False ,\n airfoil_normal = False,\n):\n\n #######################################################\n #######################################################\n # IMPORTANT\n #\n # The coordinates coming from the HDF5 file are the\n # vertical freestream coordinates of DaVis.\n #\n # The coordinates used for the local variables are\n # already put to the left-to-right freestream \n # coordinates\n #\n #######################################################\n #######################################################\n\n from progressbar import ProgressBar,Percentage,Bar,ETA,SimpleProgress\n import h5py\n import numpy as np\n import pandas as pd\n from os.path import isfile,join\n\n write_frequency = 150\n\n case = hdf5_file.replace('.hdf5','')\n\n # File related things ######################################################\n if not output_file:\n output_file = case+\".hdf5\"\n\n if airfoil_normal:\n output_file = output_file+\"_AirfoilNormal\"\n\n if not output_file.endswith('.hdf5'):\n output_file = output_file.replace(\".hdf5\",\"\")+\".hdf5\"\n\n if isfile(output_file) and not overwrite:\n print \" Exiting; file exists:\\n{0}\".format(output_file)\n return 0\n # ##########################################################################\n\n h5 = h5py.File(join(root,hdf5_file),'r')\n\n # Read the available times #################################################\n available_times = sorted([int(f[0]) for f in \\\n h5['{0}'.format(case)].iteritems()\\\n if not 'mask' in f and not 'x' in f and not 'y'\\\n in f])\n # ##########################################################################\n\n if time_step_limit:\n available_times = available_times[:time_step_limit]\n\n progress = ProgressBar(\n widgets=[\n Bar(),' ',\n Percentage(),' ',\n ETA(), ' (time step ',\n SimpleProgress(),')'], \n maxval=len(available_times)\n ).start()\n\n t_x_cnt = 0\n cnt = 0\n\n hdf = pd.HDFStore(output_file)\n\n df_dump = pd.DataFrame( columns = ['x','y','u','v','w','time_step'] )\n\n rotation_angle = serration_angle + angle_correction\n if airfoil_normal:\n rotation_angle = rotation_angle - 11.4\n\n for ti in available_times:\n df = pd.DataFrame( data = {\n 'x' : np.array(h5[\"{0}/y\".format(case)].value),\n 'y' : -np.array(h5[\"{0}/x\".format(case)].value),\n 'u' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vy')].value),\n 'v' : -np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vx')].value),\n 'w' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vz')].value),\n })\n\n df[ 'time_step' ] = ti\n\n df = correct_flow_plane_df(\n df,\n rotation_angle = rotation_angle,\n height_correction = height_correction,\n streamwise_correction = streamwise_correction,\n )\n\n if plot and ti == 0:\n show_surface_from_df(\n df[df.time_step == ti], \n 'u'\n )\n\n progress.update(ti)\n\n df_dump = df_dump.append(df,ignore_index=True)\n\n if cnt == write_frequency:\n\n if t_x_cnt == cnt:\n hdf.put(\n case, \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n else:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n df_dump = pd.DataFrame( \n columns = ['x','y','u','v','w','time_step'] \n )\n cnt = 0\n\n if ti == available_times[-1]:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n t_x_cnt += 1\n cnt += 1\n\n hdf.close()\n h5.close()\n\n progress.finish()",
"def load_h5(self):\n path = os.path.join(self.directory, self.filename)\n self.h5file = tb.open_file(path, mode=self.mode)\n self.root = self.h5file.get_node(self.root_name)",
"def read_image_from_h5(h5_path, *args, **kwargs):\n # TODO: Implement the method\n\n return image",
"def read_hdf5(filename):\n import h5py as hp\n hfile = hp.File(filename, 'r')\n lenk = len(hfile.keys())\n if lenk == 1:\n data = hfile[hfile.keys()[0]].value\n else:\n data = {}\n for k in hfile.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# data[k] = hfile[k].value\n exec(\"data['\" + k + \"'] = hfile['\" + k + \"'].value\")\n hfile.close()\n return data",
"def load(hdf5_filename):\n # Expand filename to be absolute\n hdf5_filename = os.path.expanduser(hdf5_filename)\n\n try:\n f = h5py.File(hdf5_filename, \"r\")\n # neurodata stores data inside the 'cutout' h5 dataset\n data_layers = f.get('image').get('CUTOUT')\n except Exception as e:\n raise ValueError(\"Could not load file {0} for conversion. {}\".format(\n hdf5_filename, e))\n raise\n\n return numpy.array(data_layers)",
"def _Read_atmo(self, atmo_fln):\n f = open(atmo_fln,'r')\n lines = f.readlines()\n self.atmo_grid = []\n self.atmo_doppler = []\n for line in lines:\n if (line[0] != '#') and (line[0] != '\\n'):\n tmp = line.split()\n self.atmo_grid.append(Atmosphere.AtmoGridPhot.ReadHDF5(tmp[1]))\n self.atmo_doppler.append(Atmosphere.AtmoGridDoppler.ReadHDF5(tmp[2]))\n return",
"def read_data(distort=False):\n with h5.File('CIFAR10.hdf5', 'r') as hf:\n print('List of arrays in this file: \\n', hf.keys())\n X_test = hf.get('X_test')\n X_test = np.array(X_test)\n Y_test = hf.get('Y_test')\n Y_test = np.array(Y_test)\n Y_test = dense_to_one_hot(Y_test)\n X_train = hf.get('X_train')\n X_train = np.array(X_train)\n Y_train = hf.get('Y_train')\n Y_train = np.array(Y_train)\n Y_train = dense_to_one_hot(Y_train)\n print('Data download successful!')\n X_train, X_test = rearrange_axes(X_train), rearrange_axes(X_test)\n if distort:\n X_train, Y_train = distort_images(X_train, Y_train)\n print(\"Data distortion successful!\")\n return X_train, Y_train, X_test, Y_test",
"def test_load_top_associations_by_top_hits(self):\n top_hit_num = 15\n top_hits = [('1', 6369772, 5.559458119903501, 0.1386861313868613, 19, 0.360335870170728, 0.0761941875889666),\n ('2', 18351161, 5.221548337450959, 0.08029197080291971, 11, 0.328720498341187, 0.0747141063333232),\n ('3', 18057816, 4.795206143400829, 0.2116788321167883, 29, -0.336795159960789, 0.0737295910747224),\n ('4', 429928, 6.555416448260276, 0.4233576642335766, 58, 0.368255762771892, 0.0711756042811744 ),\n ('5', 18577788, 6.219812361173065, 0.15328467153284672, 21, -0.327934944673749 ,0.0833854459419328 )]\n\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, top_hit_num, maf=0, top_or_threshold='top')\n assert thresholds['bonferroni_threshold01'] == 7.3140147710960965\n assert thresholds['bonferroni_threshold05'] == 6.615044766760077\n assert thresholds['bh_threshold'] == 6.6150447667600778\n assert thresholds['total_associations'] == 206070\n assert len(top_associations) == top_hit_num*5\n assert np.count_nonzero(top_associations['maf'] < 0.05) > 0\n self._check_return_array(top_associations)\n for i in range(0 ,5):\n assert top_associations[i*top_hit_num].tolist() == top_hits[i]",
"def open_collectobot_data(self):\n self.read_data(hdf5_name=HDF_NAME)",
"def read_metaphlan(self):\n df = pd.read_hdf(self.hdf5_fp, \"/abund/metaphlan/table\")\n self.metaphlan_df = df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read all PCoAs from a hdf5 file
|
def h5pcoa_all(h5file: str) -> tuple:
with h5py.File(h5file, "r") as f_u:
pcoa_method = f_u['pcoa_method'][0].decode('ascii')
if 'FSVD' == pcoa_method:
long_method_name = "Approximate Principal Coordinate Analysis" + \
" using FSVD"
else:
long_method_name = "Possibly Approximate Principal " + \
"Coordinate Analysis " + \
"using " + pcoa_method
order_index = [c.decode('ascii')
for c in f_u['order'][:]]
if 'pcoa_eigvals' in f_u.keys():
# single matrix single PCoA version
pcs = [_build_pcoa(f_u, long_method_name, order_index,
'pcoa_eigvals', 'pcoa_samples',
'pcoa_proportion_explained')]
else:
# multi-matrix version
pcs = []
i = 0
while 'pcoa_eigvals:%i' % i in f_u.keys():
pcs.append(_build_pcoa(f_u, long_method_name, order_index,
'pcoa_eigvals:%i' % i,
'pcoa_samples:%i' % i,
'pcoa_proportion_explained:%i' % i))
i = i + 1
return pcs
|
[
"def h5pcoa(h5file: str) -> skbio.OrdinationResults:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" using FSVD\"\n else:\n long_method_name = \"Possibly Approximate Principal \" + \\\n \"Coordinate Analysis \" + \\\n \"using \" + pcoa_method\n order_index = [c.decode('ascii')\n for c in f_u['order'][:]]\n\n if 'pcoa_eigvals:0' in f_u.keys():\n # multi interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals:0', 'pcoa_samples:0',\n 'pcoa_proportion_explained:0')\n else:\n # single interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals', 'pcoa_samples',\n 'pcoa_proportion_explained')\n\n return pc",
"def read_cags(self):\n df = pd.read_hdf(self.hdf5_fp,\"/abund/CAGs\")\n self.cags_df = df",
"def cellInfoCaimanHdf5(hdf5File, dims=None):\n cellProfs = []\n signalTraces = []\n\n with h5py.File(hdf5File, mode='r') as cFile:\n est = cFile['estimates']\n Ainfo = est['A']\n signalTraces = np.transpose( np.array(est['C']) )\n Adata = np.array(Ainfo['data'])\n Aindices = np.array(Ainfo['indices'])\n Aindptr = np.array(Ainfo['indptr'])\n Ashape = np.array(Ainfo['shape'])\n A = csc_matrix((Adata,Aindices,Aindptr) , shape=Ashape ).transpose()\n A = np.array(A.todense())\n if dims is None:\n dims = np.array(est['dims'])\n cellProfs = A.reshape((A.shape[0],dims[0] ,-1))\n f = np.array(est['f']).transpose()\n b = np.array(est['b']).transpose()\n b = b.reshape(b.shape[0],cellProfs.shape[1],cellProfs.shape[2])\n return (cellProfs, signalTraces, b, f)",
"def loadH5Parts(*args, **kwds):\n \n return io.loadH5Parts(*args, **kwds)",
"def read_data(distort=False):\n with h5.File('CIFAR10.hdf5', 'r') as hf:\n print('List of arrays in this file: \\n', hf.keys())\n X_test = hf.get('X_test')\n X_test = np.array(X_test)\n Y_test = hf.get('Y_test')\n Y_test = np.array(Y_test)\n Y_test = dense_to_one_hot(Y_test)\n X_train = hf.get('X_train')\n X_train = np.array(X_train)\n Y_train = hf.get('Y_train')\n Y_train = np.array(Y_train)\n Y_train = dense_to_one_hot(Y_train)\n print('Data download successful!')\n X_train, X_test = rearrange_axes(X_train), rearrange_axes(X_test)\n if distort:\n X_train, Y_train = distort_images(X_train, Y_train)\n print(\"Data distortion successful!\")\n return X_train, Y_train, X_test, Y_test",
"def test_load_top_associations_by_top_hits(self):\n top_hit_num = 15\n top_hits = [('1', 6369772, 5.559458119903501, 0.1386861313868613, 19, 0.360335870170728, 0.0761941875889666),\n ('2', 18351161, 5.221548337450959, 0.08029197080291971, 11, 0.328720498341187, 0.0747141063333232),\n ('3', 18057816, 4.795206143400829, 0.2116788321167883, 29, -0.336795159960789, 0.0737295910747224),\n ('4', 429928, 6.555416448260276, 0.4233576642335766, 58, 0.368255762771892, 0.0711756042811744 ),\n ('5', 18577788, 6.219812361173065, 0.15328467153284672, 21, -0.327934944673749 ,0.0833854459419328 )]\n\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, top_hit_num, maf=0, top_or_threshold='top')\n assert thresholds['bonferroni_threshold01'] == 7.3140147710960965\n assert thresholds['bonferroni_threshold05'] == 6.615044766760077\n assert thresholds['bh_threshold'] == 6.6150447667600778\n assert thresholds['total_associations'] == 206070\n assert len(top_associations) == top_hit_num*5\n assert np.count_nonzero(top_associations['maf'] < 0.05) > 0\n self._check_return_array(top_associations)\n for i in range(0 ,5):\n assert top_associations[i*top_hit_num].tolist() == top_hits[i]",
"def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target",
"def load_h5(f, path):\n # Sparse array.\n if f.has_attr(path, 'sparse_type'):\n if f.read_attr(path, 'sparse_type') == 'csr':\n return SparseCSR.load_h5(f, path)\n else:\n raise NotImplementedError(\"Only SparseCSR arrays are implemented \"\n \"currently.\")\n # Regular dense dataset.\n else:\n return f.read(path)[...]",
"def open_collectobot_data(self):\n self.read_data(hdf5_name=HDF_NAME)",
"def extract_cells_from_h5(in_fn: str, out_fn: str,\n coi: List[str]) -> None:\n coi = {x: None for x in coi}\n ih = h5py.File(in_fn, 'r')\n oh = h5py.File(out_fn, 'w')\n in_cells = {x.decode('UTF-8'): n for n, x in enumerate(ih['names/cells'])}\n out_cells = [x for x in in_cells if x in coi]\n\n if len(out_cells) == 0:\n raise ValueError(\n \"ERROR: None of the input cells were found in the H5 file\")\n if len(out_cells) != len(coi):\n print(\"WARNING: only %d/%d cells found in the H5 file\" % (\n len(out_cells), len(coi)))\n cell_idx_map = {in_cells[x]: n for n, x in enumerate(out_cells)}\n\n oh.create_group('names')\n oh['names'].create_dataset(\n 'genes', chunks=None, data=list(ih['names/genes']))\n oh['names'].create_dataset(\n 'cells', chunks=None, data=[x.encode(\"ascii\") for x in out_cells])\n\n grp = oh.create_group(\"cell_data\")\n for i in tqdm(out_cells):\n grp.create_dataset(i, data=ih['cell_data'][i], chunks=None)\n\n grp = oh.create_group(\"gene_data\")\n for g in tqdm(ih['gene_data']):\n data = []\n d = ih['gene_data'][g][:]\n for i in d:\n if i[0] in cell_idx_map:\n data.append((cell_idx_map[i[0]], i[1]))\n grp.create_dataset(g, data=np.array(data, dtype=d.dtype), chunks=None)\n\n ih.close(), oh.close()\n return None",
"def read_hdf5(filename):\n import h5py as hp\n hfile = hp.File(filename, 'r')\n lenk = len(hfile.keys())\n if lenk == 1:\n data = hfile[hfile.keys()[0]].value\n else:\n data = {}\n for k in hfile.iterkeys():\n # The straight code gives ustrings, which I don't like.\n# data[k] = hfile[k].value\n exec(\"data['\" + k + \"'] = hfile['\" + k + \"'].value\")\n hfile.close()\n return data",
"def read_many_hdf5(num_images):\n images= []\n\n # Open the HDF5 file\n file = h5py.File(hdf5_dir / f\"{num_images}_vids.h5\", \"r+\")\n\n images = np.array(file[\"/images\"]).astype(\"float32\")\n\n return images",
"def load(hdf5_filename):\n # Expand filename to be absolute\n hdf5_filename = os.path.expanduser(hdf5_filename)\n\n try:\n f = h5py.File(hdf5_filename, \"r\")\n # neurodata stores data inside the 'cutout' h5 dataset\n data_layers = f.get('image').get('CUTOUT')\n except Exception as e:\n raise ValueError(\"Could not load file {0} for conversion. {}\".format(\n hdf5_filename, e))\n raise\n\n return numpy.array(data_layers)",
"def CAN_OPENER(directory):\n\tfilelist = np.array([])\n\tfor file in os.listdir(directory):\n\t\tif fnmatch.fnmatch(file, '*.hdf5'):\n\t\t\tfilelist = np.append(filelist, file)\n\tnfiles = int(len(filelist))\n\t\n\tif nfiles == 0:\n\t\tprint \"no files found, make sure they end with .hdf5 \\\n\t\tand are in\" + directory\n\n\tpfs = np.array([])\n\tall_data = np.array([])\n\tfor i in xrange(nfiles):\n\t\tpf = load(directory+filelist[i])\n\t\tdata = pf.h.all_data()\n\t\tpfs = np.append(pfs,pf)\n\t\tall_data = np.append(all_data,data)\n\treturn pfs, all_data",
"def read_hdf5_atomicblock(outputs,datapath,name,time,n):\n\n\t#open HDF5 file\n\t#Read HemoCell v2.0 and v1.0 output. \n\ttry:\n\t\t#HemoCell V2.0 output format\n\t\thdf5datafile = h5py.File(datapath+\"/\"+str(time).zfill(12)+\"/\"+name+\".\" + str(time).zfill(12) +\".p.\" +str(n) +\".h5\",\"r\")\t\n\texcept:\n\t\ttry:\n\t\t\t#HemoCell V1.0 output format\n\t\t\thdf5datafile = h5py.File(datapath+\"/\"+str(time)+\"/\"+name+\".\" + str(time) +\".p.\" +str(n) +\".h5\",\"r\")\n\t\texcept (OSError, IOError):\n\t\t\t#If file does not exist raise the error\n\t\t\traise\n\t\n\t#Append data per output string to a dictionary\n\tdata = {}\n\tfor output in outputs:\n\t\t#If data is LBM Fluid each output needs to be reshaped to be analyzed over the entire domain\n\t\tif \"Fluid\" in name:\n\t\t\tattribute = []\n\t\t\ttempattribute = np.array(hdf5datafile[output])\n\n\t\t\t#Reshape each output attribute so it can be indexed using numpy indexing\n\t\t\tif \"Position\" in data.keys():\n\t\t\t\t#X and Y indicies are reversed for better visualization with Paraview\n\t\t\t\txblocks = np.shape(tempattribute)[2]\n\t\t\t\tyblocks = np.shape(tempattribute)[1]\n\t\t\t\tzblocks = np.shape(tempattribute)[0]\n\t\t\t\tfor xpos in range(xblocks):\n\t\t\t\t\tfor ypos in range(yblocks):\n\t\t\t\t\t\tfor zpos in range(zblocks):\n\t\t\t\t\t\t\tattribute.append(tempattribute[zpos][ypos][xpos])\n\t\t\t\tdata[output] = np.array(attribute)\n\n\t\t\t#Create a Position output to identfy the location of each LBM in the entire domain \n\t\t\telse:\n\t\t\t\tposition =[]\n\t\t\t\t#Get reletive postion of each atomic block\n\t\t\t\trelpos = hdf5datafile.attrs.get('relativePosition')\n\t\t\t\t#X and Y indicies are reversed for better visualization with Paraview\n\t\t\t\txblocks = np.shape(tempattribute)[2]\n\t\t\t\tyblocks = np.shape(tempattribute)[1]\n\t\t\t\tzblocks = np.shape(tempattribute)[0]\n\t\t\t\tfor xpos in range(xblocks):\n\t\t\t\t\tfor ypos in range(yblocks):\n\t\t\t\t\t\tfor zpos in range(zblocks):\n\t\t\t\t\t\t\tattribute.append(tempattribute[zpos][ypos][xpos])\n\t\t\t\t\t\t\tposition.append(np.array([xpos+relpos[2],ypos+relpos[1],zpos+relpos[0]]))\n\n\t\t\t\tdata[output] = np.array(attribute)\n\t\t\t\tdata[\"Position\"] = np.array(position)\n\n\t\t#If data is Cell type simply append it to a dictionary \n\t\telse:\n\t\t\tdata[output] = np.array(hdf5datafile[output])\n\n\thdf5datafile.close()\n\n\t#Return desired data as a dictionary over atomic block domain\n\treturn(data)",
"def _Read_atmo(self, atmo_fln):\n f = open(atmo_fln,'r')\n lines = f.readlines()\n self.atmo_grid = []\n self.atmo_doppler = []\n for line in lines:\n if (line[0] != '#') and (line[0] != '\\n'):\n tmp = line.split()\n self.atmo_grid.append(Atmosphere.AtmoGridPhot.ReadHDF5(tmp[1]))\n self.atmo_doppler.append(Atmosphere.AtmoGridDoppler.ReadHDF5(tmp[2]))\n return",
"def load_h5(self):\n path = os.path.join(self.directory, self.filename)\n self.h5file = tb.open_file(path, mode=self.mode)\n self.root = self.h5file.get_node(self.root_name)",
"def _loadGeneratorEnsembleFromFileHDF(file, close=False):\n for p in file.iterNodes('/'):\n # create list of cluster arrays and append it to ensemble list\n yield [i[:] for i in file.iterNodes(p)]\n\n if close:\n file.close()",
"def read_h5_file(self, h5file_path):\n f = h5py.File(h5file_path, 'r')\n return f['wav']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read first PERMANOVA statistical test from a hdf5 file As describe in scikitbio skbio.stats.distance.permanova.py, Permutational Multivariate Analysis of Variance (PERMANOVA) is a nonparametric method that tests whether two or more groups of objects are significantly different based on a categorical factor.
|
def h5permanova(h5file: str) -> pd.Series:
found = False
with h5py.File(h5file, "r") as f_u:
methods = f_u['stat_methods'][:]
test_names = f_u['stat_test_names'][:]
values = f_u['stat_values'][:]
pvalues = f_u['stat_pvalues'][:]
n_permutations = f_u['stat_n_permutations'][:]
num_groups = f_u['stat_n_groups'][:]
sample_size = len(f_u['order'][:])
n_stats = len(methods)
for i in range(n_stats):
if (methods[i] == b'PERMANOVA') and (test_names[i] == b'pseudo-F'):
found = True
pmn = _build_stat('PERMANOVA', 'pseudo-F',
sample_size, num_groups[i],
values[i], pvalues[i], n_permutations[i])
break
if (not found):
raise KeyError("PERMANOVA not found")
return pmn
|
[
"def h5permanova_dict(h5file: str) -> dict:\n\n pmns = {}\n with h5py.File(h5file, \"r\") as f_u:\n methods = f_u['stat_methods'][:]\n test_names = f_u['stat_test_names'][:]\n grouping_names = f_u['stat_grouping_names'][:]\n values = f_u['stat_values'][:]\n pvalues = f_u['stat_pvalues'][:]\n n_permutations = f_u['stat_n_permutations'][:]\n num_groups = f_u['stat_n_groups'][:]\n\n sample_size = len(f_u['order'][:])\n\n n_stats = len(methods)\n\n for i in range(n_stats):\n if (methods[i] == b'PERMANOVA') and (test_names[i] == b'pseudo-F'):\n kname = grouping_names[i].decode('ascii')\n pmns[kname] = _build_stat('PERMANOVA', 'pseudo-F',\n sample_size, num_groups[i],\n values[i], pvalues[i],\n n_permutations[i])\n\n return pmns",
"def load_peaks(fname, verbose=False):\n\n if os.path.splitext(fname)[1].lower() != '.pam5':\n raise IOError('This function supports only PAM5 (HDF5) files')\n\n f = h5py.File(fname, 'r')\n\n pam = PeaksAndMetrics()\n\n pamh = f['pam']\n\n version = f.attrs['version']\n\n if version != '0.0.1':\n raise IOError('Incorrect PAM5 file version {0}'.format(version,))\n\n try:\n affine = pamh['affine'][:]\n except KeyError:\n affine = None\n\n peak_dirs = pamh['peak_dirs'][:]\n peak_values = pamh['peak_values'][:]\n peak_indices = pamh['peak_indices'][:]\n\n try:\n shm_coeff = pamh['shm_coeff'][:]\n except KeyError:\n shm_coeff = None\n\n sphere_vertices = pamh['sphere_vertices'][:]\n\n try:\n odf = pamh['odf'][:]\n except KeyError:\n odf = None\n\n pam.affine = affine\n pam.peak_dirs = peak_dirs\n pam.peak_values = peak_values\n pam.peak_indices = peak_indices\n pam.shm_coeff = shm_coeff\n pam.sphere = Sphere(xyz=sphere_vertices)\n pam.B = pamh['B'][:]\n pam.total_weight = pamh['total_weight'][:][0]\n pam.ang_thr = pamh['ang_thr'][:][0]\n pam.gfa = pamh['gfa'][:]\n pam.qa = pamh['qa'][:]\n pam.odf = odf\n\n f.close()\n\n if verbose:\n print('PAM5 version')\n print(version)\n print('Affine')\n print(pam.affine)\n print('Dirs shape')\n print(pam.peak_dirs.shape)\n print('SH shape')\n if pam.shm_coeff is not None:\n print(pam.shm_coeff.shape)\n else:\n print('None')\n print('ODF shape')\n if pam.odf is not None:\n print(pam.odf.shape)\n else:\n print('None')\n print('Total weight')\n print(pam.total_weight)\n print('Angular threshold')\n print(pam.ang_thr)\n print('Sphere vertices shape')\n print(pam.sphere.vertices.shape)\n\n return pam",
"def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n out_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n impute_type='mode', filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n \n print 'Starting to parse genotypes'\n genotype_data = {}\n h5py_file = h5py.File(out_file_prefix + '.hdf5')\n genotype_data['hdf5p_file'] = h5py_file\n genot_group = h5py_file.create_group('genot_data')\n indiv_group = h5py_file.create_group('indiv_data')\n \n \n tot_num_snps = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_ambiguous_loc_removed = 0\n curr_chrom = 1\n print 'Working on chromosome %d' % curr_chrom\n \n g_filename = '%s.tped' % (in_file_prefix) \n s_filename = '%s.bim' % (in_file_prefix)\n i_filename = '%s.tfam' % (in_file_prefix) \n\n \n \n indiv_ids = []\n phenotypes = [] \n sex = []\n print 'Parsing individuals file: %s' % i_filename\n with open(i_filename) as f:\n for line in f:\n l = line.split()\n iid = l[0]\n indiv_ids.append(iid)\n sex.append(int(l[4]))\n phenotypes.append(float(l[5]))\n tot_num_indiv = len(indiv_ids) \n \n print 'Storing individual data in individ. group'\n indiv_group.create_dataset('indiv_ids', data=indiv_ids)\n indiv_group.create_dataset('sex', data=sex)\n indiv_group.create_dataset('phenotypes', data=phenotypes)\n \n \n \n num_indiv = len(indiv_ids)\n print 'Found %d Individuals' % (num_indiv)\n\n print 'Parsing nucleotide map'\n nt_map = {}\n chromsomoes = []\n curr_chrom = 0\n with open(s_filename) as f:\n for line in f:\n l = line.split()\n chrom = l[0]\n if chrom != curr_chrom:\n chromsomoes.append(chrom)\n curr_chrom = chrom\n nt_map[l[1]] = (l[4], l[5]) \n assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'\n curr_chrom = chromsomoes[0]\n \n position = -1\n # Initializing containers.\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n t0 = time.time()\n\n print 'Starting to parse SNP files'\n gf = open(g_filename)\n for g_line in gf:\n# if random.random() > 0.01:\n# continue\n gl = g_line.split()\n chrom = gl[0]\n if chrom != curr_chrom:\n \n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)\n t0 = time.time()\n\n \n\n # Reset containers\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_ambiguous = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n \n curr_chrom = chrom\n\n sid = gl[1]\n prev_position = position\n position = int(gl[3])\n\n # Skipping unmappable locations\n if position == prev_position:\n num_ambiguous_loc_removed += 1\n continue\n if position == 0:\n num_ambiguous_loc_removed += 1\n continue\n\n nt = nt_map[sid]\n \n snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')\n a = sp.arange(tot_num_indiv * 2)\n even_map = a % 2 == 0\n odd_map = a % 2 == 1\n snp = snp0[even_map] + snp0[odd_map] - 2\n snp[snp < 0] = 9\n \n bin_counts = sp.bincount(snp)\n \n\n if len(bin_counts) > 3:\n missing_count = bin_counts[-1]\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n continue\n elif impute_type == 'mode':\n nt_counts = bin_counts[:3] \n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n bin_counts = sp.bincount(snp)\n else:\n raise Exception('Imputation type is unknown')\n else:\n missing_count = 0\n\n assert len(bin_counts) < 4, 'Issues with nucleotides.'\n nt_counts = bin_counts[:3] \n if len(nt_counts) == 2:\n nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])\n elif len(nt_counts) == 1:\n nt_counts = sp.array([nt_counts[0], 0, 0])\n \n\n # Removing monomorphic SNPs\n if filter_monomorphic_snps:\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(position)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq) \n\n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)\n\n \n gf.close()\n \n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed\n h5py_file.close()\n \n print 'Done parsing genotypes.'",
"def test_load_top_associations_by_top_hits(self):\n top_hit_num = 15\n top_hits = [('1', 6369772, 5.559458119903501, 0.1386861313868613, 19, 0.360335870170728, 0.0761941875889666),\n ('2', 18351161, 5.221548337450959, 0.08029197080291971, 11, 0.328720498341187, 0.0747141063333232),\n ('3', 18057816, 4.795206143400829, 0.2116788321167883, 29, -0.336795159960789, 0.0737295910747224),\n ('4', 429928, 6.555416448260276, 0.4233576642335766, 58, 0.368255762771892, 0.0711756042811744 ),\n ('5', 18577788, 6.219812361173065, 0.15328467153284672, 21, -0.327934944673749 ,0.0833854459419328 )]\n\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, top_hit_num, maf=0, top_or_threshold='top')\n assert thresholds['bonferroni_threshold01'] == 7.3140147710960965\n assert thresholds['bonferroni_threshold05'] == 6.615044766760077\n assert thresholds['bh_threshold'] == 6.6150447667600778\n assert thresholds['total_associations'] == 206070\n assert len(top_associations) == top_hit_num*5\n assert np.count_nonzero(top_associations['maf'] < 0.05) > 0\n self._check_return_array(top_associations)\n for i in range(0 ,5):\n assert top_associations[i*top_hit_num].tolist() == top_hits[i]",
"def verify_hdf5(dataset_dir, result_hdf5):\n print(f\"verify ({dataset_dir, result_hdf5})\")\n hf = h5py.File(result_hdf5, 'r') # open the file in read mode\n logistics = hf.get('LOGISTICS')\n files = hf.get('FILES')\n \n print('verifying FILES for {}'.format(dataset_dir))\n verify_hdf5_recursive_files(files, dataset_dir)\n print('verifying LOGISTICS for {}'.format(dataset_dir))\n verify_hdf5_recursive_logistics(logistics, dataset_dir)\n hf.close()",
"def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target",
"def test_halo_loading() :\n h = subfind.halos()\n # check that data loading for individual fof groups works\n h[0]['pos']\n h[1]['pos']\n\n # check that loading the subhalos works\n h[0].sub[0]['pos']\n for i,halo in enumerate(h[0:10]) :\n halo['mass'].sum()\n for fam in [halo.g, halo.d, halo.s] :\n assert(len(fam['iord']) == subfind._hdf_files[0][subfind._family_to_group_map[fam.families()[0]][0]]['Length'][i])\n for s in halo.sub :\n s['mass'].sum()\n\n\n\n # test halo catalogue slicing\n for halo in h[0:10] : pass\n for halo in h[30:40] : pass\n for sub in h[0].sub[1:5] : pass",
"def perform_statistical_analysis(method,\n methodparams, \n model, \n groupingparams,\n samples, \n rts,\n rt_tolerance,\n output_prefix,\n h5readpath,\n h5writepath,\n exportpath,\n dbfilename): \n \n #Make read and write paths absolute for consistency.\n h5readpath = mh5.abs_hdf5_path(h5readpath); \n h5writepath = mh5.abs_hdf5_path(h5writepath);\n \n #Get absolute path to the hdf5 file.\n dbfilename = os.path.abspath(dbfilename);\n \n with h5py.File(dbfilename, 'a') as h5file:\n #Get datasets from hdf5 file which contain processed data and metadata\n #Also return their respective indeces in the data array in case alphabetic\n #order was not preserved in previous processing steps. \n dataset_names, dataset_indexes = mh5.get_dataset_names_from_hdf5(h5file, \n h5readpath, \n filter_by_names = samples, \n filter_by_attributes = {\n 'is_OK':True,\n 'has_integrals':True,\n 'has_metadata':True,\n 'is_processed':True,\n 'is_continuous':False,\n 'is_raw':False,\n }, \n return_indeces = True);\n if not dataset_names:\n printlog('No datasets matching criteria found in the h5readpath provided: %s !'%h5readpath);\n return\n\n #Get the list of indeces of rt peaks according to rts selections and rt_tolerance \n rt_indeces = mh5.get_processed_rt_indeces(h5file, h5readpath, rts, rt_tolerance);\n if len(rt_indeces) == 0:\n printlog('No retention time indeces matching criteria found in the h5readpath provided: %s !'%h5readpath);\n return\n \n #Update output_prefix to contain hdf5 name if its mask is supplied.\n if '%HDF5_file_name%' in output_prefix:\n fname = os.path.splitext(os.path.basename(dbfilename))[0];\n output_prefix = output_prefix.replace('%HDF5_file_name%', fname);\n \n #prepare export_path to be absolute\n #if not supplied - use the path of hdf5 file.\n export_path = params['exportpath'];\n if export_path != '':\n export_path = os.path.abspath(export_path);\n else:\n export_path = os.path.split(dbfilename)[0];\n \n #Get full output_prefix (include absolute path) \n output_prefix = os.path.join(export_path, output_prefix);\n \n #Make sure the path exists by creating the folder structure if necessary \n fpath = os.path.split(output_prefix)[0];\n if not os.path.exists(fpath):\n os.makedirs(fpath);\n \n #Instantiate and initialize the statistical model\n stat_model = StatisticalModel(model, groupingparams, h5file, h5readpath, h5writepath, dataset_names, dataset_indexes, rt_indeces, fpath, output_prefix);\n \n #Do the analysis using supplied method and methodparams. For now please\n #call it only once per instance of stat_model created to avoid unexpected\n #behaviour. \n stat_model.analyse_by_method(method, methodparams);",
"def test_load_top_associations_by_top_threshold_and_maf(self):\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, 1e-5, maf=0.1, top_or_threshold='threshold')\n assert len(top_associations) == 13\n assert np.count_nonzero(top_associations['maf'] < 0.1) == 0",
"def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)",
"def test_hdf_properties(tmp_path):\n test_file = GeneratedFile()\n test_file.add_segment(*basic_segment())\n tdms_data = test_file.load()\n\n h5_path = tmp_path / 'h5_properties_test.h5'\n h5 = tdms_data.as_hdf(h5_path)\n\n # File level properties\n assert h5.attrs['num'] == 15\n\n # Group properties\n assert h5['Group'].attrs['prop'] == 'value'\n assert h5['Group'].attrs['num'] == 10\n\n # Channel properties\n assert h5['Group']['Channel2'].attrs['wf_start_offset'] == 0.0\n assert h5['Group']['Channel2'].attrs['wf_increment'] == 0.1",
"def read_raw_hdf5_case_and_write_pandas_hdf5(\n hdf5_file,\n root = '' ,\n output_file = '' ,\n serration_angle = 0 ,\n angle_correction = 0 ,\n height_correction = 0 ,\n streamwise_correction = 0 ,\n overwrite = False ,\n time_step_limit = 0 ,\n plot = False ,\n airfoil_normal = False,\n):\n\n #######################################################\n #######################################################\n # IMPORTANT\n #\n # The coordinates coming from the HDF5 file are the\n # vertical freestream coordinates of DaVis.\n #\n # The coordinates used for the local variables are\n # already put to the left-to-right freestream \n # coordinates\n #\n #######################################################\n #######################################################\n\n from progressbar import ProgressBar,Percentage,Bar,ETA,SimpleProgress\n import h5py\n import numpy as np\n import pandas as pd\n from os.path import isfile,join\n\n write_frequency = 150\n\n case = hdf5_file.replace('.hdf5','')\n\n # File related things ######################################################\n if not output_file:\n output_file = case+\".hdf5\"\n\n if airfoil_normal:\n output_file = output_file+\"_AirfoilNormal\"\n\n if not output_file.endswith('.hdf5'):\n output_file = output_file.replace(\".hdf5\",\"\")+\".hdf5\"\n\n if isfile(output_file) and not overwrite:\n print \" Exiting; file exists:\\n{0}\".format(output_file)\n return 0\n # ##########################################################################\n\n h5 = h5py.File(join(root,hdf5_file),'r')\n\n # Read the available times #################################################\n available_times = sorted([int(f[0]) for f in \\\n h5['{0}'.format(case)].iteritems()\\\n if not 'mask' in f and not 'x' in f and not 'y'\\\n in f])\n # ##########################################################################\n\n if time_step_limit:\n available_times = available_times[:time_step_limit]\n\n progress = ProgressBar(\n widgets=[\n Bar(),' ',\n Percentage(),' ',\n ETA(), ' (time step ',\n SimpleProgress(),')'], \n maxval=len(available_times)\n ).start()\n\n t_x_cnt = 0\n cnt = 0\n\n hdf = pd.HDFStore(output_file)\n\n df_dump = pd.DataFrame( columns = ['x','y','u','v','w','time_step'] )\n\n rotation_angle = serration_angle + angle_correction\n if airfoil_normal:\n rotation_angle = rotation_angle - 11.4\n\n for ti in available_times:\n df = pd.DataFrame( data = {\n 'x' : np.array(h5[\"{0}/y\".format(case)].value),\n 'y' : -np.array(h5[\"{0}/x\".format(case)].value),\n 'u' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vy')].value),\n 'v' : -np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vx')].value),\n 'w' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vz')].value),\n })\n\n df[ 'time_step' ] = ti\n\n df = correct_flow_plane_df(\n df,\n rotation_angle = rotation_angle,\n height_correction = height_correction,\n streamwise_correction = streamwise_correction,\n )\n\n if plot and ti == 0:\n show_surface_from_df(\n df[df.time_step == ti], \n 'u'\n )\n\n progress.update(ti)\n\n df_dump = df_dump.append(df,ignore_index=True)\n\n if cnt == write_frequency:\n\n if t_x_cnt == cnt:\n hdf.put(\n case, \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n else:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n df_dump = pd.DataFrame( \n columns = ['x','y','u','v','w','time_step'] \n )\n cnt = 0\n\n if ti == available_times[-1]:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n t_x_cnt += 1\n cnt += 1\n\n hdf.close()\n h5.close()\n\n progress.finish()",
"def read_dm_test_file():\n # Load data from file\n dat = np.genfromtxt(join(path,\"benchmark/dm_model1-5.txt\")).T\n assert(dat.shape == (6,6))\n\n # Split into redshift column and chi(z) columns\n z = dat[0]\n dm = dat[1:]\n return z, dm",
"def h5pcoa(h5file: str) -> skbio.OrdinationResults:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" using FSVD\"\n else:\n long_method_name = \"Possibly Approximate Principal \" + \\\n \"Coordinate Analysis \" + \\\n \"using \" + pcoa_method\n order_index = [c.decode('ascii')\n for c in f_u['order'][:]]\n\n if 'pcoa_eigvals:0' in f_u.keys():\n # multi interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals:0', 'pcoa_samples:0',\n 'pcoa_proportion_explained:0')\n else:\n # single interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals', 'pcoa_samples',\n 'pcoa_proportion_explained')\n\n return pc",
"def main( auc_ordering, abundances, sample_df, centrality_type, name, detailed=False ):\n\n out_dir = f\"{os.path.dirname(os.path.realpath(__file__))}/output\"\n # allows for cleaner execution and use of relative paths\n\n if( detailed ):\n out_file = f\"{out_dir}/{name}_PERMANOVA_result.csv\"\n # Create new files for output\n\n # Call PERMANOVA calculation\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, out_file, detailed )\n print( f\"Plots generated to {out_dir}.\" )\n\n # Since this is detailed must generate plots\n _generate_figures( permanova_df, centrality_type, out_dir, name )\n\n else:\n # No excess files necessary just generate dataframe to pass on\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, None )\n\n permanova_df.reset_index( drop=True, inplace=True ) # reset indicis as a precautionary to make sure all df's start at index 0\n\n return permanova_df",
"def test_parse_electrondensity():\n # Parse\n envisionpy.hdf5parser.charge(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n\n # Test if the generated HDF5-file contains correct information\n\n if os.path.isfile(PATH_TO_HDF5):\n with h5py.File(PATH_TO_HDF5, 'r') as h5:\n assert '/CHG' in h5\n assert '/UnitCell' in h5\n assert '/basis' in h5\n assert '/scaling_factor' in h5\n # cleanup\n os.remove(PATH_TO_HDF5)",
"def write_H5scanData(self,dir,H5file,H5name,averaged='False'):\n g = H5file.create_group(H5name) #H5 subgroup with the name of the sample\n H5_ela = g.create_group('elastic') #H5 subgroup for elastics\n H5_xrs = g.create_group('XRS') #H5 subgroup for NIXS\n all_scans = self.elastic_scans+self.nixs_scans\n for file in all_scans:\n scan_info = self.scan_info(file)\n if scan_info[2] == 'elastic':\n h5group = H5_ela.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"cenoms\",data=self.scans[scan_info[1]].cenom)\n elif scan_info[2]=='nixs':\n h5group = H5_xrs.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"eloss\",data=self.scans[scan_info[1]].eloss)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"tth\",data=self.scans[scan_info[1]].tth)\n\n g.create_dataset(\"energy\",data=self.energy)\n g.create_dataset(\"signals\",data=self.signals)\n g.create_dataset(\"eloss\",data=self.eloss)\n g.create_dataset(\"errors\",data=self.errors)\n g.create_dataset(\"tth\",data=self.tth)\n g.create_dataset(\"Mean Resolutions\", data=np.array(self.resolution.items()))\n\n #Never forget to close an open H5 file!!!\n H5file.close()",
"def test_read_from_hdf5(self):\n expected_keys = set(('velocity', 'position', 'pressure', 'tracer',\n 'snapshots', 'properties'))\n self.assertIsNone(self.data.thin_by)\n self.assertIsNotNone(self.data.snapshots)\n for key in expected_keys:\n self.assertIsNotNone(self.data[key])",
"def read_mean(hfile, **kwargs):\n means = hdf5tods(hfile, **kwargs)\n means = means.assign_coords(dim_0=means.gyf.isel(time=0).values).rename(dim_0=\"Y\")\n return means"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read PERMANOVA statistical tests from a hdf5 file As describe in scikitbio skbio.stats.distance.permanova.py, Permutational Multivariate Analysis of Variance (PERMANOVA) is a nonparametric method that tests whether two or more groups of objects are significantly different based on a categorical factor.
|
def h5permanova_dict(h5file: str) -> dict:
pmns = {}
with h5py.File(h5file, "r") as f_u:
methods = f_u['stat_methods'][:]
test_names = f_u['stat_test_names'][:]
grouping_names = f_u['stat_grouping_names'][:]
values = f_u['stat_values'][:]
pvalues = f_u['stat_pvalues'][:]
n_permutations = f_u['stat_n_permutations'][:]
num_groups = f_u['stat_n_groups'][:]
sample_size = len(f_u['order'][:])
n_stats = len(methods)
for i in range(n_stats):
if (methods[i] == b'PERMANOVA') and (test_names[i] == b'pseudo-F'):
kname = grouping_names[i].decode('ascii')
pmns[kname] = _build_stat('PERMANOVA', 'pseudo-F',
sample_size, num_groups[i],
values[i], pvalues[i],
n_permutations[i])
return pmns
|
[
"def h5permanova(h5file: str) -> pd.Series:\n\n found = False\n with h5py.File(h5file, \"r\") as f_u:\n methods = f_u['stat_methods'][:]\n test_names = f_u['stat_test_names'][:]\n values = f_u['stat_values'][:]\n pvalues = f_u['stat_pvalues'][:]\n n_permutations = f_u['stat_n_permutations'][:]\n num_groups = f_u['stat_n_groups'][:]\n\n sample_size = len(f_u['order'][:])\n\n n_stats = len(methods)\n\n for i in range(n_stats):\n if (methods[i] == b'PERMANOVA') and (test_names[i] == b'pseudo-F'):\n found = True\n pmn = _build_stat('PERMANOVA', 'pseudo-F',\n sample_size, num_groups[i],\n values[i], pvalues[i], n_permutations[i])\n break\n\n if (not found):\n raise KeyError(\"PERMANOVA not found\")\n\n return pmn",
"def load_peaks(fname, verbose=False):\n\n if os.path.splitext(fname)[1].lower() != '.pam5':\n raise IOError('This function supports only PAM5 (HDF5) files')\n\n f = h5py.File(fname, 'r')\n\n pam = PeaksAndMetrics()\n\n pamh = f['pam']\n\n version = f.attrs['version']\n\n if version != '0.0.1':\n raise IOError('Incorrect PAM5 file version {0}'.format(version,))\n\n try:\n affine = pamh['affine'][:]\n except KeyError:\n affine = None\n\n peak_dirs = pamh['peak_dirs'][:]\n peak_values = pamh['peak_values'][:]\n peak_indices = pamh['peak_indices'][:]\n\n try:\n shm_coeff = pamh['shm_coeff'][:]\n except KeyError:\n shm_coeff = None\n\n sphere_vertices = pamh['sphere_vertices'][:]\n\n try:\n odf = pamh['odf'][:]\n except KeyError:\n odf = None\n\n pam.affine = affine\n pam.peak_dirs = peak_dirs\n pam.peak_values = peak_values\n pam.peak_indices = peak_indices\n pam.shm_coeff = shm_coeff\n pam.sphere = Sphere(xyz=sphere_vertices)\n pam.B = pamh['B'][:]\n pam.total_weight = pamh['total_weight'][:][0]\n pam.ang_thr = pamh['ang_thr'][:][0]\n pam.gfa = pamh['gfa'][:]\n pam.qa = pamh['qa'][:]\n pam.odf = odf\n\n f.close()\n\n if verbose:\n print('PAM5 version')\n print(version)\n print('Affine')\n print(pam.affine)\n print('Dirs shape')\n print(pam.peak_dirs.shape)\n print('SH shape')\n if pam.shm_coeff is not None:\n print(pam.shm_coeff.shape)\n else:\n print('None')\n print('ODF shape')\n if pam.odf is not None:\n print(pam.odf.shape)\n else:\n print('None')\n print('Total weight')\n print(pam.total_weight)\n print('Angular threshold')\n print(pam.ang_thr)\n print('Sphere vertices shape')\n print(pam.sphere.vertices.shape)\n\n return pam",
"def verify_hdf5(dataset_dir, result_hdf5):\n print(f\"verify ({dataset_dir, result_hdf5})\")\n hf = h5py.File(result_hdf5, 'r') # open the file in read mode\n logistics = hf.get('LOGISTICS')\n files = hf.get('FILES')\n \n print('verifying FILES for {}'.format(dataset_dir))\n verify_hdf5_recursive_files(files, dataset_dir)\n print('verifying LOGISTICS for {}'.format(dataset_dir))\n verify_hdf5_recursive_logistics(logistics, dataset_dir)\n hf.close()",
"def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n out_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n impute_type='mode', filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n \n print 'Starting to parse genotypes'\n genotype_data = {}\n h5py_file = h5py.File(out_file_prefix + '.hdf5')\n genotype_data['hdf5p_file'] = h5py_file\n genot_group = h5py_file.create_group('genot_data')\n indiv_group = h5py_file.create_group('indiv_data')\n \n \n tot_num_snps = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_ambiguous_loc_removed = 0\n curr_chrom = 1\n print 'Working on chromosome %d' % curr_chrom\n \n g_filename = '%s.tped' % (in_file_prefix) \n s_filename = '%s.bim' % (in_file_prefix)\n i_filename = '%s.tfam' % (in_file_prefix) \n\n \n \n indiv_ids = []\n phenotypes = [] \n sex = []\n print 'Parsing individuals file: %s' % i_filename\n with open(i_filename) as f:\n for line in f:\n l = line.split()\n iid = l[0]\n indiv_ids.append(iid)\n sex.append(int(l[4]))\n phenotypes.append(float(l[5]))\n tot_num_indiv = len(indiv_ids) \n \n print 'Storing individual data in individ. group'\n indiv_group.create_dataset('indiv_ids', data=indiv_ids)\n indiv_group.create_dataset('sex', data=sex)\n indiv_group.create_dataset('phenotypes', data=phenotypes)\n \n \n \n num_indiv = len(indiv_ids)\n print 'Found %d Individuals' % (num_indiv)\n\n print 'Parsing nucleotide map'\n nt_map = {}\n chromsomoes = []\n curr_chrom = 0\n with open(s_filename) as f:\n for line in f:\n l = line.split()\n chrom = l[0]\n if chrom != curr_chrom:\n chromsomoes.append(chrom)\n curr_chrom = chrom\n nt_map[l[1]] = (l[4], l[5]) \n assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'\n curr_chrom = chromsomoes[0]\n \n position = -1\n # Initializing containers.\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n t0 = time.time()\n\n print 'Starting to parse SNP files'\n gf = open(g_filename)\n for g_line in gf:\n# if random.random() > 0.01:\n# continue\n gl = g_line.split()\n chrom = gl[0]\n if chrom != curr_chrom:\n \n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)\n t0 = time.time()\n\n \n\n # Reset containers\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_ambiguous = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n \n curr_chrom = chrom\n\n sid = gl[1]\n prev_position = position\n position = int(gl[3])\n\n # Skipping unmappable locations\n if position == prev_position:\n num_ambiguous_loc_removed += 1\n continue\n if position == 0:\n num_ambiguous_loc_removed += 1\n continue\n\n nt = nt_map[sid]\n \n snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')\n a = sp.arange(tot_num_indiv * 2)\n even_map = a % 2 == 0\n odd_map = a % 2 == 1\n snp = snp0[even_map] + snp0[odd_map] - 2\n snp[snp < 0] = 9\n \n bin_counts = sp.bincount(snp)\n \n\n if len(bin_counts) > 3:\n missing_count = bin_counts[-1]\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n continue\n elif impute_type == 'mode':\n nt_counts = bin_counts[:3] \n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n bin_counts = sp.bincount(snp)\n else:\n raise Exception('Imputation type is unknown')\n else:\n missing_count = 0\n\n assert len(bin_counts) < 4, 'Issues with nucleotides.'\n nt_counts = bin_counts[:3] \n if len(nt_counts) == 2:\n nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])\n elif len(nt_counts) == 1:\n nt_counts = sp.array([nt_counts[0], 0, 0])\n \n\n # Removing monomorphic SNPs\n if filter_monomorphic_snps:\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(position)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq) \n\n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)\n\n \n gf.close()\n \n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed\n h5py_file.close()\n \n print 'Done parsing genotypes.'",
"def perform_statistical_analysis(method,\n methodparams, \n model, \n groupingparams,\n samples, \n rts,\n rt_tolerance,\n output_prefix,\n h5readpath,\n h5writepath,\n exportpath,\n dbfilename): \n \n #Make read and write paths absolute for consistency.\n h5readpath = mh5.abs_hdf5_path(h5readpath); \n h5writepath = mh5.abs_hdf5_path(h5writepath);\n \n #Get absolute path to the hdf5 file.\n dbfilename = os.path.abspath(dbfilename);\n \n with h5py.File(dbfilename, 'a') as h5file:\n #Get datasets from hdf5 file which contain processed data and metadata\n #Also return their respective indeces in the data array in case alphabetic\n #order was not preserved in previous processing steps. \n dataset_names, dataset_indexes = mh5.get_dataset_names_from_hdf5(h5file, \n h5readpath, \n filter_by_names = samples, \n filter_by_attributes = {\n 'is_OK':True,\n 'has_integrals':True,\n 'has_metadata':True,\n 'is_processed':True,\n 'is_continuous':False,\n 'is_raw':False,\n }, \n return_indeces = True);\n if not dataset_names:\n printlog('No datasets matching criteria found in the h5readpath provided: %s !'%h5readpath);\n return\n\n #Get the list of indeces of rt peaks according to rts selections and rt_tolerance \n rt_indeces = mh5.get_processed_rt_indeces(h5file, h5readpath, rts, rt_tolerance);\n if len(rt_indeces) == 0:\n printlog('No retention time indeces matching criteria found in the h5readpath provided: %s !'%h5readpath);\n return\n \n #Update output_prefix to contain hdf5 name if its mask is supplied.\n if '%HDF5_file_name%' in output_prefix:\n fname = os.path.splitext(os.path.basename(dbfilename))[0];\n output_prefix = output_prefix.replace('%HDF5_file_name%', fname);\n \n #prepare export_path to be absolute\n #if not supplied - use the path of hdf5 file.\n export_path = params['exportpath'];\n if export_path != '':\n export_path = os.path.abspath(export_path);\n else:\n export_path = os.path.split(dbfilename)[0];\n \n #Get full output_prefix (include absolute path) \n output_prefix = os.path.join(export_path, output_prefix);\n \n #Make sure the path exists by creating the folder structure if necessary \n fpath = os.path.split(output_prefix)[0];\n if not os.path.exists(fpath):\n os.makedirs(fpath);\n \n #Instantiate and initialize the statistical model\n stat_model = StatisticalModel(model, groupingparams, h5file, h5readpath, h5writepath, dataset_names, dataset_indexes, rt_indeces, fpath, output_prefix);\n \n #Do the analysis using supplied method and methodparams. For now please\n #call it only once per instance of stat_model created to avoid unexpected\n #behaviour. \n stat_model.analyse_by_method(method, methodparams);",
"def test_hdf_properties(tmp_path):\n test_file = GeneratedFile()\n test_file.add_segment(*basic_segment())\n tdms_data = test_file.load()\n\n h5_path = tmp_path / 'h5_properties_test.h5'\n h5 = tdms_data.as_hdf(h5_path)\n\n # File level properties\n assert h5.attrs['num'] == 15\n\n # Group properties\n assert h5['Group'].attrs['prop'] == 'value'\n assert h5['Group'].attrs['num'] == 10\n\n # Channel properties\n assert h5['Group']['Channel2'].attrs['wf_start_offset'] == 0.0\n assert h5['Group']['Channel2'].attrs['wf_increment'] == 0.1",
"def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)",
"def test_load_top_associations_by_top_hits(self):\n top_hit_num = 15\n top_hits = [('1', 6369772, 5.559458119903501, 0.1386861313868613, 19, 0.360335870170728, 0.0761941875889666),\n ('2', 18351161, 5.221548337450959, 0.08029197080291971, 11, 0.328720498341187, 0.0747141063333232),\n ('3', 18057816, 4.795206143400829, 0.2116788321167883, 29, -0.336795159960789, 0.0737295910747224),\n ('4', 429928, 6.555416448260276, 0.4233576642335766, 58, 0.368255762771892, 0.0711756042811744 ),\n ('5', 18577788, 6.219812361173065, 0.15328467153284672, 21, -0.327934944673749 ,0.0833854459419328 )]\n\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, top_hit_num, maf=0, top_or_threshold='top')\n assert thresholds['bonferroni_threshold01'] == 7.3140147710960965\n assert thresholds['bonferroni_threshold05'] == 6.615044766760077\n assert thresholds['bh_threshold'] == 6.6150447667600778\n assert thresholds['total_associations'] == 206070\n assert len(top_associations) == top_hit_num*5\n assert np.count_nonzero(top_associations['maf'] < 0.05) > 0\n self._check_return_array(top_associations)\n for i in range(0 ,5):\n assert top_associations[i*top_hit_num].tolist() == top_hits[i]",
"def read_hdf5_file(self, file_name):\n # if file_name.endswith('.hdf5'):\n stat_file = h5py.File(config.stat_dir+'stats.hdf5', mode='r')\n\n max_feat = np.array(stat_file[\"feats_maximus\"])\n min_feat = np.array(stat_file[\"feats_minimus\"])\n stat_file.close()\n\n with h5py.File(config.voice_dir + file_name) as feat_file:\n\n feats = np.array(feat_file['feats'])[()]\n\n pho_target = np.array(feat_file[\"phonemes\"])[()]\n\n f0 = feats[:,-2]\n\n med = np.median(f0[f0 > 0])\n\n f0[f0==0] = med\n\n f0_nor = (f0 - min_feat[-2])/(max_feat[-2]-min_feat[-2])\n\n\n return feats, f0_nor, pho_target",
"def main( auc_ordering, abundances, sample_df, centrality_type, name, detailed=False ):\n\n out_dir = f\"{os.path.dirname(os.path.realpath(__file__))}/output\"\n # allows for cleaner execution and use of relative paths\n\n if( detailed ):\n out_file = f\"{out_dir}/{name}_PERMANOVA_result.csv\"\n # Create new files for output\n\n # Call PERMANOVA calculation\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, out_file, detailed )\n print( f\"Plots generated to {out_dir}.\" )\n\n # Since this is detailed must generate plots\n _generate_figures( permanova_df, centrality_type, out_dir, name )\n\n else:\n # No excess files necessary just generate dataframe to pass on\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, None )\n\n permanova_df.reset_index( drop=True, inplace=True ) # reset indicis as a precautionary to make sure all df's start at index 0\n\n return permanova_df",
"def read_raw_hdf5_case_and_write_pandas_hdf5(\n hdf5_file,\n root = '' ,\n output_file = '' ,\n serration_angle = 0 ,\n angle_correction = 0 ,\n height_correction = 0 ,\n streamwise_correction = 0 ,\n overwrite = False ,\n time_step_limit = 0 ,\n plot = False ,\n airfoil_normal = False,\n):\n\n #######################################################\n #######################################################\n # IMPORTANT\n #\n # The coordinates coming from the HDF5 file are the\n # vertical freestream coordinates of DaVis.\n #\n # The coordinates used for the local variables are\n # already put to the left-to-right freestream \n # coordinates\n #\n #######################################################\n #######################################################\n\n from progressbar import ProgressBar,Percentage,Bar,ETA,SimpleProgress\n import h5py\n import numpy as np\n import pandas as pd\n from os.path import isfile,join\n\n write_frequency = 150\n\n case = hdf5_file.replace('.hdf5','')\n\n # File related things ######################################################\n if not output_file:\n output_file = case+\".hdf5\"\n\n if airfoil_normal:\n output_file = output_file+\"_AirfoilNormal\"\n\n if not output_file.endswith('.hdf5'):\n output_file = output_file.replace(\".hdf5\",\"\")+\".hdf5\"\n\n if isfile(output_file) and not overwrite:\n print \" Exiting; file exists:\\n{0}\".format(output_file)\n return 0\n # ##########################################################################\n\n h5 = h5py.File(join(root,hdf5_file),'r')\n\n # Read the available times #################################################\n available_times = sorted([int(f[0]) for f in \\\n h5['{0}'.format(case)].iteritems()\\\n if not 'mask' in f and not 'x' in f and not 'y'\\\n in f])\n # ##########################################################################\n\n if time_step_limit:\n available_times = available_times[:time_step_limit]\n\n progress = ProgressBar(\n widgets=[\n Bar(),' ',\n Percentage(),' ',\n ETA(), ' (time step ',\n SimpleProgress(),')'], \n maxval=len(available_times)\n ).start()\n\n t_x_cnt = 0\n cnt = 0\n\n hdf = pd.HDFStore(output_file)\n\n df_dump = pd.DataFrame( columns = ['x','y','u','v','w','time_step'] )\n\n rotation_angle = serration_angle + angle_correction\n if airfoil_normal:\n rotation_angle = rotation_angle - 11.4\n\n for ti in available_times:\n df = pd.DataFrame( data = {\n 'x' : np.array(h5[\"{0}/y\".format(case)].value),\n 'y' : -np.array(h5[\"{0}/x\".format(case)].value),\n 'u' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vy')].value),\n 'v' : -np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vx')].value),\n 'w' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vz')].value),\n })\n\n df[ 'time_step' ] = ti\n\n df = correct_flow_plane_df(\n df,\n rotation_angle = rotation_angle,\n height_correction = height_correction,\n streamwise_correction = streamwise_correction,\n )\n\n if plot and ti == 0:\n show_surface_from_df(\n df[df.time_step == ti], \n 'u'\n )\n\n progress.update(ti)\n\n df_dump = df_dump.append(df,ignore_index=True)\n\n if cnt == write_frequency:\n\n if t_x_cnt == cnt:\n hdf.put(\n case, \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n else:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n df_dump = pd.DataFrame( \n columns = ['x','y','u','v','w','time_step'] \n )\n cnt = 0\n\n if ti == available_times[-1]:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n t_x_cnt += 1\n cnt += 1\n\n hdf.close()\n h5.close()\n\n progress.finish()",
"def test_halo_loading() :\n h = subfind.halos()\n # check that data loading for individual fof groups works\n h[0]['pos']\n h[1]['pos']\n\n # check that loading the subhalos works\n h[0].sub[0]['pos']\n for i,halo in enumerate(h[0:10]) :\n halo['mass'].sum()\n for fam in [halo.g, halo.d, halo.s] :\n assert(len(fam['iord']) == subfind._hdf_files[0][subfind._family_to_group_map[fam.families()[0]][0]]['Length'][i])\n for s in halo.sub :\n s['mass'].sum()\n\n\n\n # test halo catalogue slicing\n for halo in h[0:10] : pass\n for halo in h[30:40] : pass\n for sub in h[0].sub[1:5] : pass",
"def test_load_top_associations_by_top_threshold_and_maf(self):\n top_associations, thresholds = hdf5.get_top_associations(self.hdf5_file, 1e-5, maf=0.1, top_or_threshold='threshold')\n assert len(top_associations) == 13\n assert np.count_nonzero(top_associations['maf'] < 0.1) == 0",
"def test_parse_electrondensity():\n # Parse\n envisionpy.hdf5parser.charge(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n\n # Test if the generated HDF5-file contains correct information\n\n if os.path.isfile(PATH_TO_HDF5):\n with h5py.File(PATH_TO_HDF5, 'r') as h5:\n assert '/CHG' in h5\n assert '/UnitCell' in h5\n assert '/basis' in h5\n assert '/scaling_factor' in h5\n # cleanup\n os.remove(PATH_TO_HDF5)",
"def write_H5scanData(self,dir,H5file,H5name,averaged='False'):\n g = H5file.create_group(H5name) #H5 subgroup with the name of the sample\n H5_ela = g.create_group('elastic') #H5 subgroup for elastics\n H5_xrs = g.create_group('XRS') #H5 subgroup for NIXS\n all_scans = self.elastic_scans+self.nixs_scans\n for file in all_scans:\n scan_info = self.scan_info(file)\n if scan_info[2] == 'elastic':\n h5group = H5_ela.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"cenoms\",data=self.scans[scan_info[1]].cenom)\n elif scan_info[2]=='nixs':\n h5group = H5_xrs.create_group(scan_info[1])\n h5group.create_dataset(\"energy\",data=self.scans[scan_info[1]].energy)\n h5group.create_dataset(\"signals\",data=self.scans[scan_info[1]].signals)\n h5group.create_dataset(\"eloss\",data=self.scans[scan_info[1]].eloss)\n h5group.create_dataset(\"errors\",data=self.scans[scan_info[1]].errors)\n h5group.create_dataset(\"tth\",data=self.scans[scan_info[1]].tth)\n\n g.create_dataset(\"energy\",data=self.energy)\n g.create_dataset(\"signals\",data=self.signals)\n g.create_dataset(\"eloss\",data=self.eloss)\n g.create_dataset(\"errors\",data=self.errors)\n g.create_dataset(\"tth\",data=self.tth)\n g.create_dataset(\"Mean Resolutions\", data=np.array(self.resolution.items()))\n\n #Never forget to close an open H5 file!!!\n H5file.close()",
"def read_dm_test_file():\n # Load data from file\n dat = np.genfromtxt(join(path,\"benchmark/dm_model1-5.txt\")).T\n assert(dat.shape == (6,6))\n\n # Split into redshift column and chi(z) columns\n z = dat[0]\n dm = dat[1:]\n return z, dm",
"def h5pcoa(h5file: str) -> skbio.OrdinationResults:\n\n with h5py.File(h5file, \"r\") as f_u:\n pcoa_method = f_u['pcoa_method'][0].decode('ascii')\n if 'FSVD' == pcoa_method:\n long_method_name = \"Approximate Principal Coordinate Analysis\" + \\\n \" using FSVD\"\n else:\n long_method_name = \"Possibly Approximate Principal \" + \\\n \"Coordinate Analysis \" + \\\n \"using \" + pcoa_method\n order_index = [c.decode('ascii')\n for c in f_u['order'][:]]\n\n if 'pcoa_eigvals:0' in f_u.keys():\n # multi interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals:0', 'pcoa_samples:0',\n 'pcoa_proportion_explained:0')\n else:\n # single interface\n pc = _build_pcoa(f_u, long_method_name, order_index,\n 'pcoa_eigvals', 'pcoa_samples',\n 'pcoa_proportion_explained')\n\n return pc",
"def read_mean(hfile, **kwargs):\n means = hdf5tods(hfile, **kwargs)\n means = means.assign_coords(dim_0=means.gyf.isel(time=0).values).rename(dim_0=\"Y\")\n return means",
"def test_read_from_hdf5(self):\n expected_keys = set(('velocity', 'position', 'pressure', 'tracer',\n 'snapshots', 'properties'))\n self.assertIsNone(self.data.thin_by)\n self.assertIsNotNone(self.data.snapshots)\n for key in expected_keys:\n self.assertIsNotNone(self.data[key])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
generates a string which denotes a Eulerian cycle within the graph
|
def eulerian_cycle(graph, random_start=True, choice_start=0):
if len(graph) == 0 or len(graph) == 1:
return ""
stack, vertices, visited_edges = [], [], []
# deciding where to start by generating a random start location, if random_start is set to True;
# at the same time, check if there are any vertices which lead no where; if so, then it is impossible to create
# a eulerian cycle
for vertex in graph:
vertices.append(vertex)
if not graph[vertex]:
return "Eulerian cycle impossible"
if random_start:
stack.append(vertices[random.randint(0, len(graph)-1)])
else:
stack.append(vertices[choice_start])
path = "" # the string which will eventually have the entire path
while len(stack) != 0:
ptr = stack[-1]
# generate a list of unvisited paths
unvisited_paths = []
for next_vertex in graph[ptr]:
if (ptr, next_vertex) not in visited_edges:
unvisited_paths.append((ptr, next_vertex))
if len(unvisited_paths) == 0: # if no unvisited edges
stack = stack[:-1]
path += str(ptr) + ">-"
else: # if ptr -> current_edge is unvisited, go to one vertex
stack.append(unvisited_paths[0][1]) # appending a vertex
visited_edges.append(unvisited_paths[0]) # appending a path
# return the path by reversing the string and shaving off the first two characters (which were "->")
return path[::-1][2:]
|
[
"def euler26(d):\n max_cycle = 0\n max_cycle_denominator = 0\n\n for i in range(1, d):\n length = cycle_length(i)\n if length > max_cycle:\n max_cycle = length\n max_cycle_denominator = i\n\n return \"1/{0} has the max cycle, with a length of {1}\".format(max_cycle_denominator, max_cycle)",
"def find_eulerian_cycle(graph):\n\n\t#Initiate variables\n\t#Creation of a copy of the original grap\n\tcopyGraph = copy.deepcopy(graph)\n\t#Start node\n\tcurrent = list(copyGraph)[0]\n\tsubcycle = [current]\n\n\tcorrectCycle = []\n\t#While length of subcycle is bigger than 0\n\twhile len(subcycle) > 0:\n\t\t#If the vertex still has edges\n\t\tif copyGraph[current] != []:\n\t\t\t#Append the current to the subcycle list\n\t\t\tsubcycle.append(copyGraph[current][0])\n\t\t\t#Get the next vertex\n\t\t\tnextV = copyGraph[current].pop(0)\n\t\t\t#Change current vertex to 'next' for the next 'round'\n\t\t\tcurrent = nextV\n\t\telse:\n\t\t\t#Add the last item of subcycle to the correct Cycle\n\t\t\tcorrectCycle.append(subcycle[-1])\n\t\t\t#Delete it from the subcycle\n\t\t\tdel subcycle[-1]\n\t\t\t#If subcycle is empty, break\n\t\t\tif subcycle == []:\n\t\t\t\tbreak\n\t\t\telse: \n\t\t\t\t#Replace current with the last one from subcycle\n\t\t\t\tcurrent = subcycle[-1]\n\t#Return the correct Cycle reverse\n\treturn correctCycle[::-1]",
"def cycle_graph(n):\n return call_polymake_function(b\"polytope\", b\"cycle_graph\", n)",
"def find_eulerian_path(graph, start):\n\n\t#Initiate variables\n\tcopyGraph = copy.deepcopy(graph)\n\tif start == \"\":\n\t\tcurrent = list(copyGraph)[0]\n\telse:\n\t\tcurrent = list(copyGraph)[start]\n\tsubpath = [current]\n\n\tcorrectPath = []\n\t#Check is length subpath is not equal to 0\n\twhile len(subpath) > 0:\n\t\t#If current not in graph\n\t\tif current not in copyGraph:\n\t\t\t#Append to the correctCycle and delete from subpath\n\t\t\tcorrectPath.append(subpath[-1])\n\t\t\tdel subpath[-1]\n\t\t\t#If subpath is empty, break\n\t\t\tif subpath == []:\n\t\t\t\tbreak\n\t\t\telse: \n\t\t\t\t#Change current to last element of subpath\n\t\t\t\tcurrent = subpath[-1]\n\t\t#If current in copyGraph is not empty\n\t\tif copyGraph[current] != []:\n\t\t\t#Append to subpath\n\t\t\tsubpath.append(copyGraph[current][0])\n\t\t\t#Get the next variable\n\t\t\tnextV = copyGraph[current].pop(0)\n\t\t\tcurrent = nextV\n\t\telse:\n\t\t\t#Append to correctPath\n\t\t\tcorrectPath.append(subpath[-1])\n\t\t\tdel subpath[-1]\n\t\t\tif subpath == []:\n\t\t\t\tbreak\n\t\t\telse: \n\t\t\t\tcurrent = subpath[-1]\n\t\n\treturn correctPath[::-1]",
"def cycle_jewel(jewelType):\n if jewelType == 'd':\n return 'r'\n elif jewelType == 'r':\n return 'e'\n else:\n return 'd'",
"def CycleGraph(n):\n pos_dict = {}\n for i in range(n):\n x = float(cos((pi/2) + ((2*pi)/n)*i))\n y = float(sin((pi/2) + ((2*pi)/n)*i))\n pos_dict[i] = (x,y)\n G = graph.Graph(n,pos=pos_dict, name=\"Cycle graph\")\n G.add_cycle(range(n))\n return G",
"def attractorstring(self):\n attractorstring = \"\"\n _, attractor = RBN.get_cycle(self.nodes)\n for count, state in enumerate(attractor):\n attractorstring += str(count) + \" \" + str(state) + linesep\n return attractorstring",
"def cycle(num_vertices):\n\n # Instantiate a Graph\n pattern = Graph()\n # Populate it\n for u in range(num_vertices):\n pattern.add_edge(u, (u + 1) % num_vertices)\n # Return the cycle\n return pattern",
"def make_simple_cycle(N):\n G = nx.Graph()\n for i in range(N):\n if i == N - 1:\n G.add_edge(i, 0)\n else:\n G.add_edge(i, i + 1)\n labels = {}\n for node in G.nodes():\n labels[node] = str(node)\n return G",
"def as_cycle(self):\n c = [[0]]\n idx = [0]\n\n while len(c) < self._n:\n while self[c[-1][-1]] != c[-1][0] or len(c[-1]) == 1:\n c[-1] += [self[c[-1][-1]]]\n idx += [self[c[-1][-1]]]\n try:\n c += [[min(set(range(self._n))-set(idx))]]\n except ValueError:\n break\n\n c = ''.join(map(lambda x: str(tuple(map(lambda y: y+1, x))), filter(lambda x: x[0]!=x[1] if len(x)>1 else False, c)))\n\n return c",
"def create_cycle(cycle: Sequence[Union[str, int]]) -> QuantumCircuit:\n cycle = list(map(face_id_to_idx, cycle))\n label = f'cycle{str(cycle)}'\n\n qc = QuantumCircuit(cube_state_reg)\n p = np.eye(2**5)\n\n # Here we rearrange the columns of the identity matrix according to\n # the cycle to be implemented.\n first_col = np.copy(p[:, cycle[0]])\n for i in range(len(cycle) - 1):\n p[:, cycle[i]] = p[:, cycle[i + 1]] # Map i -> i+1\n p[:, cycle[-1]] = first_col\n\n # Verify that P is a permutation matrix.\n assert np.all(np.sum(p, axis=0) == 1) and np.all(np.sum(p, axis=1) == 1)\n\n # P is a permutation matrix, then P^{-1}=P^T and has real entries, thus P^H=P^T,\n # also PP^H=p^H P=I, hence P is unitary.\n qc.unitary(p, list(np.arange(5)), label=label)\n # By creating an arbitrary unitary (although these are all permutation matrices) we are\n # relaying on the transpiler to translate this operation into a sequence of basic gates.\n # Of course, this is a temporary solution.\n return qc",
"def make_cycle(n: int) -> Graph:\n assert(n >= 0 and n!=2)\n\n if n == 0: return Graph()\n g = make_path(n)\n g.add_edge(n-1, 0)\n return g",
"def __cycle_from_time(self, time):\n cycle = (time - self.__cycle_zero) / BeliefManager.CYCLE_LENGTH\n if cycle - int(cycle) > 0.0001:\n # If it doesn't go in perfectly, we should add one, because it's\n # technically in the next cycle.\n cycle += 1\n\n return max(int(cycle), 1)",
"def generate_cycle(self, workflow=None):\n if not workflow:\n _, workflow = self.generate_workflow()\n\n workflow = self._session_add(workflow) # this should be nicer\n\n obj_name = \"cycle\"\n\n obj_dict = {\n obj_name: {\n \"workflow\": {\n \"id\": workflow.id,\n \"type\": workflow.__class__.__name__,\n \"href\": \"/api/workflows/%d\" % workflow.id\n },\n \"context\": {\n \"id\": workflow.context.id,\n \"type\": workflow.context.__class__.__name__,\n \"href\": \"/api/workflows/%d\" % workflow.context.id\n },\n \"autogenerate\": \"true\"\n }\n }\n\n return self.generate(Cycle, obj_name, obj_dict)",
"def test_2_floyds_triangle_by_building_string():\n num = 1\n string = \"\"\n print(\"\\n\")\n for i in range(1, 6):\n for j in range(0, i):\n string = string + str(num)\n num = num + 1\n print(string)\n string = \"\"",
"def calc_degree_sequence(g, dest_file):\n func_intro = \"\\n\\nDegree Sequence ... \"\n logging.info(cs_ref, func_intro)\n print func_intro\n with open(dest_file, \"a\") as dat_file:\n dat_file.write(func_intro)\n\n degree_sequence = sorted(nx.degree(g).values(), reverse=True)\n with open(dest_file, \"a\") as dat_file:\n dat_file.write(\"\\n\\tDegree Sequence = \\t\" + str(degree_sequence))\n\n plt.loglog(degree_sequence, 'g-', marker='o')\n plt.title(\"Degree Rank/Sequence\" +src_file)\n plt.ylabel(\"degree\")\n plt.xlabel(\"rank\")\n gcc = sorted(nx.connected_component_subgraphs(g), key=len, reverse=True)[0]\n pos = nx.spring_layout(gcc)\n plt.axes([0.45, 0.45, 0.45, 0.45])\n plt.axis('off')\n nx.draw_networkx_nodes(gcc, pos, node_size=10)\n nx.draw_networkx_nodes(gcc, pos, alpha=0.4)\n plt.figure(1)\n plt.savefig(\"plots/cs1_degree_histogram.png\")\n plt.show()",
"def havel_hakimi_custom_graph(deg_sequence):\n\n if not (nx.is_valid_degree_sequence(deg_sequence) or nx.is_graphical(deg_sequence) or nx.is_valid_degree_sequence_erdos_gallai(deg_sequence)):\n raise nx.NetworkXError('Invalid degree sequence')\n\n p = len(deg_sequence)\n G=nx.empty_graph(p)\n num_degs = []\n for i in range(p):\n num_degs.append([])\n dmin, dmax, dsum, n = 10000000, 0, 0, 0\n for d in deg_sequence:\n # Process only the non-zero integers\n if d>0:\n num_degs[d].append(n)\n dmin, dmax, dsum, n = min(dmin,d), max(dmax,d), dsum+d, n+1\n # Return graph if no edges\n if n==0:\n return G\n\n modstubs = [(0,0)]*(dmax+1)\n # Successively reduce degree sequence by removing the maximum degree\n while n > 0:\n # Retrieve the maximum degree in the sequence\n while len(num_degs[dmax]) == 0:\n dmax -= 1;\n while len(num_degs[dmin]) == 0:\n dmax += 1;\n # If there are not enough stubs to connect to, then the sequence is\n # not graphical\n if dmax > n-1:\n raise nx.NetworkXError('Non-graphical integer sequence')\n \n # Remove most little stub in list\n source = num_degs[dmin].pop()\n n -= 1\n # Reduce the dmin largest stubs\n mslen = 0\n k = dmax\n for i in range(dmin):\n while len(num_degs[k]) == 0:\n k -= 1\n target = num_degs[k].pop()\n G.add_edge(source, target)\n n -= 1\n if k > 1:\n modstubs[mslen] = (k-1,target)\n mslen += 1\n # Add back to the list any nonzero stubs that were removed\n for i in range(mslen):\n (stubval, stubtarget) = modstubs[i]\n num_degs[stubval].append(stubtarget)\n n += 1\n\n G.name=\"havel_hakimi_graph %d nodes %d edges\"%(G.order(),G.size())\n return G",
"def cycle_length(number):\n if number % 2 == 0:\n return cycle_length(number / 2)\n if number % 5 == 0:\n return cycle_length(number / 5)\n\n i = 1\n while True:\n if (pow(10, i) - 1) % number == 0:\n return i\n i += 1",
"def _genCycleTable():\n\n table = {\n ('mov', Register, Register): 2,\n ('mov', Indirect, Register): 13,\n ('mov', Register, Indirect): 12,\n ('mov', Indirect, Literal): 14,\n ('mov', Register, Literal): 4,\n\n ('cmp', Register, Register): 3,\n ('cmp', Indirect, Register): 13,\n ('cmp', Register, Indirect): 12,\n ('cmp', Indirect, Literal): 14,\n ('cmp', Register, Literal): 4,\n\n ('test', Register, Register): 3,\n ('test', Indirect, Register): 13,\n ('test', Register, Indirect): 13,\n ('test', Indirect, Literal): 11,\n ('test', Register, Literal): 5,\n\n ('xchg', Register, Register): 4,\n ('xchg', Indirect, Register): 25,\n ('xchg', Register, Indirect): 25,\n\n ('imul', Register): 89, # Average (8-bit)\n ('imul', Indirect): 95,\n\n ('mul', Register): 73, # Average (8-bit)\n ('mul', Indirect): 79,\n\n ('div', Register): 85, # Average (8-bit)\n ('div', Indirect): 91,\n\n ('not', Register): 3,\n ('not', Indirect): 24,\n\n ('neg', Register): 3,\n ('neg', Indirect): 24,\n\n ('inc', Register): 3,\n ('inc', Indirect): 23,\n\n ('dec', Register): 3,\n ('dec', Indirect): 23,\n\n ('les', Register, Indirect): 24,\n\n ('jmp', Literal): 15,\n ('loop', Literal): 17,\n ('call', Register): 20,\n ('call', Literal): 23,\n ('ret',): 20,\n\n ('out', Literal, Register): 14,\n ('out', Register, Register): 12,\n\n ('in', Register, Literal): 14,\n ('in', Register, Register): 12,\n\n ('push', Register): 15,\n ('push', Indirect): 24,\n\n ('pop', Register): 12,\n ('pop', Indirect): 25,\n\n ('cmc',): 2,\n ('clc',): 2,\n ('stc',): 2,\n ('cbw',): 2,\n\n # Stubs for instructions that take a long and variable\n # amount of time to execute. No sane programmer would\n # use these in a timing-critical loop.. (fingers crossed)\n\n ('int', Literal): 0,\n ('rep_stosb',): 0,\n ('rep_stosw',): 0,\n ('rep_movsb',): 0,\n ('rep_movsw',): 0,\n }\n\n # Conditional jumps (assume jump taken)\n for op in 'jz jnz jc jnc js jns ja jnl jl jng jna jcxz'.split():\n table[(op, Literal)] = 16\n\n # All shifts and rotates are the same\n for op in ('shl', 'shr', 'rcl', 'rcr', 'sar', 'ror'):\n table.update({\n (op, Register, Register): 12,\n (op, Indirect, Register): 32, # This is why you see so many\n (op, Indirect, Literal): 23, # repeated shifts by 1...\n (op, Register, Literal): 2, # <-- Much cheaper.\n })\n\n # 2-operand ALU operations are mostly the same.\n for op in ('xor', 'and', 'or', 'add', 'sub', 'adc', 'sbb'):\n table.update({\n (op, Register, Register): 3,\n (op, Indirect, Register): 24,\n (op, Register, Indirect): 13,\n (op, Register, Literal): 4,\n (op, Indirect, Literal): 23,\n })\n\n return table"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns a count of the number of edges/inputs that are leaving the vertex
|
def num_of_outputs(vertex, graph):
if vertex in graph:
# simply count
return len(graph[vertex])
return 0
|
[
"def out_edge_count(self):",
"def number_of_edges(self) -> int:\n count = 0\n for vertice in self.__graph:\n count += len(self.__graph[vertice])\n return count // 2",
"def number_of_interface_vertices(self):\n return sum(len(attr['interface_points']) for u, v, attr in self.edges(True))",
"def get_num_edges(graph):\n return sum(cell for row in graph for cell in row) // 2",
"def numConnectedEdges(*args, **kwargs):\n \n pass",
"def getNumVertices(self): \n return self.__V",
"def getNumEdges(self): \n return self.__E",
"def num_vertices(self):\n pass",
"def number_of_loops_graph(self) -> int:\n\n count = 0\n for vertice in self.__graph:\n count += self.number_of_loops_vertice(vertice)\n\n return count",
"def count_neighbors(self, vert, out=True, cond=False, less=True, cutoff=0):\n \n pass",
"def degree_of_connectivity(G, v):\n if isinstance(G, nx.Graph):\n return len(G.edges(v))\n elif isinstance(G, nx.DiGraph):\n return len(G.in_edges(v)) + len(G.out_edges(v))\n else:\n raise TypeError(\"check type of G\")",
"def get_total_edges_count(self) -> int:\n return self.edge_record_count",
"def num_vertices(self):\n return self._num_vertices",
"def test_vertex_edge_count1(self):\n sum_of_the_degrees = sum( [ len( list( self.G[v] ) ) for v in self.G ] )\n number_of_edges = len( self.G.edges() )\n assert sum_of_the_degrees == number_of_edges * 2, \"sum of degrees: %i, num of edges: %i does not satisfy relationship\" % ( sum_of_the_degrees, number_of_edges )",
"def count_nodes(self):\n return len(self._graph)",
"def count_zero_vertices(self):\n return sum(1 for V in self.V if V.degree == 0)",
"def edge_sum(self):\n return len(self.edges)",
"def count_components(self, graph):\n\n component_count = 0\n visited = set()\n\n for node in graph.nodes():\n if node not in visited:\n self._explore(graph, node, visited)\n component_count += 1\n\n return component_count",
"def num_selected_edges(df):\n\n # Count number of edges going from and to defined proteins\n number_of_selected_edges = len(df.index.get_level_values(0))\n\n return number_of_selected_edges"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decorator for views that catch ObjectDoesNotExist Exception. if redirect is None, raise Http404 Exception, otherwise Redirect.
|
def object_does_not_exist(view_func=None, redirect=None):
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except ObjectDoesNotExist:
if redirect:
return HttpResponseRedirect(redirect)
else:
raise Http404()
return _wrapped_view
if not view_func:
def foo(view_func):
return decorator(view_func)
return foo
else:
return decorator(view_func)
|
[
"def redirect_error_handler(redirect_path: str, exception: Exception, **kwargs) -> RedirectResponse:\n return RedirectResponse(urls.with_query_params(redirect_path, error=exception, **kwargs))",
"def handle404(request):\n\n if request.path.endswith('/'):\n fixed_path = request.path[:-1]\n query_string = request.META.get('QUERY_STRING', '')\n if query_string:\n path = '%s?%s' % (fixed_path, query_string)\n else:\n path = fixed_path\n\n return HttpResponsePermanentRedirect(path)\n\n return jingo.render(request, 'handlers/404.html', status=404)",
"def catch_404_JSON(func):\n def wrapper(request, *args, **kwargs):\n try:\n return func(request, *args, **kwargs)\n except Http404 as err:\n return render_JSON_Error('404 raised.', {\n 'message': err.args[0],\n })\n return wrapper",
"def get_object_or_404_customed(queryset, *filter_args, **filter_kwargs):\n try:\n return _get_object_or_404(queryset, *filter_args, **filter_kwargs)\n except (TypeError, ValueError, ValidationError):\n raise Http404\n except Http404 as e:\n # print(e)\n # print(type(e))\n raise CustomAPIException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"{queryset.__name__}'s object is not found!\",\n object=queryset.__name__,\n )",
"def handle_404(request, response, exception):\n logging.exception(exception)\n blog_error = BlogError(request, response)\n return blog_error.get(\"Page not found\")",
"def landing(request):\n try:\n location = Location.objects.all()[0]\n except IndexError:\n raise Http404\n\n return HttpResponseRedirect(location.get_absolute_url())",
"def get_object_or_404(queryset, *filter_args, **filter_kwargs):\n try:\n return _get_object_or_404(queryset, *filter_args, **filter_kwargs)\n except MultipleObjectsReturned:\n raise Http404",
"def base_404(request, response, *kargs, **kwargs):\n response.status = falcon.HTTP_NOT_FOUND",
"def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n\n return view(**kwargs)\n\n return wrapped_view",
"def get_object(self):\n obj = super().get_object()\n if obj.status != URLModel.DONE:\n raise Http404\n return obj",
"def get_object_or_404(interesting, *args, **kwargs):\n queryset = _get_queryset(interesting)\n #queryset = interesting.objects (MongoDB)\n try:\n return queryset.get(*args, **kwargs)\n except queryset.model.DoesNotExist:\n #except ValidationError: (MongoDB)\n raise APIException.not_found()",
"def shortcut(request, content_type_id, object_id):\r\n # Look up the object, making sure it's got a get_absolute_url() function.\r\n try:\r\n content_type = ContentType.objects.get(pk=content_type_id)\r\n except (ContentType.DoesNotExist, ValueError):\r\n raise http.Http404(_(\"Content type %(ct_id)s object %(obj_id)s doesn't exist\") %\r\n {'ct_id': content_type_id, 'obj_id': object_id})\r\n \r\n if not content_type.model_class():\r\n raise http.Http404(_(\"Content type %(ct_id)s object has no associated model\") %\r\n {'ct_id': content_type_id})\r\n try:\r\n obj = content_type.get_object_for_this_type(pk=object_id)\r\n except (content_type.model_class().DoesNotExist, ValueError):\r\n raise http.Http404(_(\"Content type %(ct_id)s object %(obj_id)s doesn't exist\") %\r\n {'ct_id': content_type_id, 'obj_id': object_id})\r\n\r\n try:\r\n get_absolute_url = obj.get_absolute_url\r\n except AttributeError:\r\n raise http.Http404(_(\"%(ct_name)s objects don't have a get_absolute_url() method\") %\r\n {'ct_name': content_type.name})\r\n absurl = get_absolute_url()\r\n\r\n # Try to figure out the object's domain, so we can do a cross-site redirect\r\n # if necessary.\r\n\r\n # If the object actually defines a domain, we're done.\r\n if absurl.startswith('http://') or absurl.startswith('https://'):\r\n return http.HttpResponseRedirect(absurl)\r\n\r\n # Otherwise, we need to introspect the object's relationships for a\r\n # relation to the Site object\r\n object_domain = None\r\n\r\n if Site._meta.installed:\r\n opts = obj._meta\r\n\r\n # First, look for an many-to-many relationship to Site.\r\n for field in opts.many_to_many:\r\n if field.rel.to is Site:\r\n try:\r\n # Caveat: In the case of multiple related Sites, this just\r\n # selects the *first* one, which is arbitrary.\r\n object_domain = getattr(obj, field.name).all()[0].domain\r\n except IndexError:\r\n pass\r\n if object_domain is not None:\r\n break\r\n\r\n # Next, look for a many-to-one relationship to Site.\r\n if object_domain is None:\r\n for field in obj._meta.fields:\r\n if field.rel and field.rel.to is Site:\r\n try:\r\n object_domain = getattr(obj, field.name).domain\r\n except Site.DoesNotExist:\r\n pass\r\n if object_domain is not None:\r\n break\r\n\r\n # Fall back to the current site (if possible).\r\n if object_domain is None:\r\n try:\r\n object_domain = get_current_site(request).domain\r\n except Site.DoesNotExist:\r\n pass\r\n\r\n # If all that malarkey found an object domain, use it. Otherwise, fall back\r\n # to whatever get_absolute_url() returned.\r\n if object_domain is not None:\r\n protocol = 'https' if request.is_secure() else 'http'\r\n return http.HttpResponseRedirect('%s://%s%s'\r\n % (protocol, object_domain, absurl))\r\n else:\r\n return http.HttpResponseRedirect(absurl)",
"def raise404_if_different(o1, o2):\n from django.http import Http404\n\n if o1 != o2:\n raise Http404",
"def initialize_redirect(self, *args, **kwargs):\n def immediately_redirect():\n _RequestHandler.redirect(self, *args, **kwargs)\n self.get = immediately_redirect\n raise RedirectError",
"def template_redirect(to, *args, permanent=False, **kwargs):\n redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect\n return redirect_class(resolve_url(to, *args, **kwargs))",
"def login_required(view):\n\n @wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login', _external=True))\n return view(**kwargs)\n return wrapped_view",
"def safe_redirect(target, endpoint=\"home.index\"):\r\n if not target or not is_safe_url(target):\r\n target = url_for(endpoint)\r\n return redirect(target)",
"def get_object_or_404(klass, *args, **kwargs):\n queryset = _get_queryset(klass)\n if not hasattr(queryset, \"get\"):\n klass__name = (\n klass.__name__ if isinstance(klass, type) else klass.__class__.__name__\n )\n raise ValueError(\n \"First argument to get_object_or_404() must be a Model, Manager, \"\n \"or QuerySet, not '%s'.\" % klass__name\n )\n try:\n return queryset.get(*args, **kwargs)\n except queryset.model.DoesNotExist:\n raise Http404(\n \"No %s matches the given query.\" % queryset.model._meta.object_name\n )",
"def anonimous_required(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(resolve_url(settings.LOGIN_REDIRECT_URL))\n else:\n return view_func(request, *args, **kwargs)\n return _wrapped_view",
"def test_redirect_suppression(self):\n rev = RedirectRevisionFactory()\n redirect = rev.document\n response = self.client.get(redirect.get_absolute_url() + \"?redirect=no\", follow=True)\n self.assertContains(response, \"REDIRECT \")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read csv file from PECO into a pandas dataframe
|
def read_PECO_csv(datafile):
if hasattr(datafile, 'read'):
# Read buffer directly
df = pandas.read_csv(datafile, skiprows=4)
else:
# Read in usage log (csv format, probably specific to PECO)
df = pandas.read_csv(root+datafile+'.csv', skiprows=4)
# Convert costs (drop dollar sign and convert to float)
df['COST'] = df['COST'].str.slice(1).apply(lambda x: float(x))
df = _add_convieant_cols(df)
return df
|
[
"def load_csv():\n\ttry:\n\t\tdf = pd.read_csv(DATASET_CSV_PATH)\n\texcept:\n\t\tprint('Error reading %s. Make sure file exists or try to regenerate it using generate_csv() method.')\n\t\tdf = pd.DataFrame()\n\n\treturn df",
"def load_data(path):\n\n df = pd.read_csv(path)\n return df",
"def load_csv(path, columns):\n if os.path.isfile(path):\n data = pd.read_csv(path)\n else:\n # Create an empty frame.\n data = pd.DataFrame(columns=columns)\n data.set_index(\"GEOID\")\n return data",
"def from_csv(cls, filepath_or_buffer): \n records = pd.read_csv(filepath_or_buffer)\n\n return cls(records)\n\n # ------------------------------------------------------------------\n # Old implementation kept for future use:\n\n # # Read the data from the csv file, assuming the third column of the\n # # file represents timestamp and parsing it as a datetime.\n # records = pd.read_csv(\n # filepath,\n # index_col=[0, 1],\n # header=[0, 1], \n # parse_dates=[2]\n # )\n\n # # Convert the index's 'offset' level to TimedeltaIndex.\n # records.index = records.index.set_levels(\n # pd.TimedeltaIndex(data.index.get_level_values('offset')),\n # level='offset')\n\n # # Fix column level values, an artifact of blank level values in a\n # # .csv file.\n # fields = data.columns.get_level_values('field')\n\n # #srcs = data.columns.get_level_values('source').str.replace('Un.*', 'device')\n # srcs = data.columns.get_level_values('elev_source').str.replace('Un.*', 'device')\n \n # col_tups = [(field, src) for field, src in zip(fields, srcs)]\n # data.columns = pandas.MultiIndex.from_tuples(col_tups,\n # names=['field', 'source'])\n # data['time', 'device'] = \\\n # (data['timestamp', 'device'] \\\n # - data['timestamp', 'device'].iloc[0]).dt.total_seconds()\n\n # ------------------------------------------------------------------",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def import_csv_to_df(filename):\n\n return pd.read_csv(filename, index_col=0)",
"def csv_to_pd(csvfname):\n csvfile = open(csvfname)\n\n line = 'test'\n counter = 0\n while line != 'Data starts here.\\n':\n line = csvfile.readline()\n counter = counter + 1\n\n data = pd.read_csv(csvfname, skiprows=counter)\n data.sort_values(['Track_ID', 'Frame'], ascending=[1, 1])\n\n return data",
"def convert_to_df(path):\n return pd.read_csv(path, sep='\\t')",
"def parse(file_name):\n return pd.read_csv(file_name, na_values='---')",
"def pines_log_reader(path):\n try:\n df = pd.read_csv(path)\n except:\n print('Something is wrong with {}, inspect!'.format(path.name))\n breakpoint()\n\n # Remove trailing/leading spaces from column names\n df.columns = df.columns.str.lstrip()\n df.columns = df.columns.str.rstrip()\n\n # Remove our header comment idicator in the first column if it's there.\n if '#' in df.columns[0]:\n df.rename(\n columns={df.columns[0]: df.columns[0].replace('#', '')}, inplace=True)\n\n # Remove trailing and leading spaces from log entries.\n for key in df.keys():\n try:\n df[key] = df[key].str.strip()\n except:\n continue\n\n return df",
"def csv_import(name, sep, header):\n csv_file = pd.read_csv(name, sep = sep, header = header) ##loading data using read_csv from pandas\n return csv_file #returning the data structure",
"def read_csv(self, file):\n blob_client = self.blobservice.get_blob_client(container=self.container_name, blob=file)\n dl = blob_client.download_blob()\n df = pd.read_csv(StringIO(dl.content_as_text()))\n return df",
"def csv_to_dataframe(csv):\n data = pd.read_csv(csv,thousands='.', decimal=',', index_col=[0])\n return data",
"def load_index(path:str) -> PandasDf:\n return pd.read_csv(path)",
"def open_csv(file, folder, separator):\n if folder != 'inputs':\n path = os.path.join('inputs', folder, file)\n else:\n path = os.path.join('inputs', file)\n return pd.read_csv(path , sep = separator, engine='python')",
"def read_ballistic_hop_csv(bhop_csv):\n return pd.read_csv(bhop_csv, header=0, index_col=0)",
"def make_dataframe(csv):\n try:\n dataframe = pd.read_table(csv, sep=\"\\s+|,\", engine=\"python\")\n except:\n error(\"{} does not exist or cannot be read\".format(csv),\n continue_exec=False)\n return dataframe",
"def cria_df_cadastral(self):\n log.debug(\"Carregando csv cadastral\")\n create_dir(CSV_FILES_DIR)\n self.download_inf_cadastral()\n self.pd_df = pd.read_csv(\n self.filename, sep=\";\", encoding=\"ISO-8859-1\", index_col=\"CNPJ_FUNDO\"\n )",
"def parse_data(filename):\n df = pd.read_csv(filename, names = [\"User ID\", \"Gender\", AGE, \"Occupation\", \"Star Sign\", \"date\", \"text\"])\n return df",
"def read_order_data() -> pd.DataFrame:\n return read_data_with_columns(r'data/raw/orders/order_data.csv', r'data/raw/orders/order_columns.txt')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read xml file in GB format
|
def read_GB_xml(datafile):
from BeautifulSoup import BeautifulStoneSoup
if hasattr(datafile, 'read'):
# Read buffer directly
soup = BeautifulStoneSoup(datafile.read())
else:
# Read in usage log (csv format, probably specific to PECO)
with open(datafile) as f:
soup = BeautifulStoneSoup(f.read())
# Create data appropriate for current dataframe fromat
data = []
getDate = lambda x: datetime.fromtimestamp(x).strftime('%Y-%m-%d')
getStart = lambda x: datetime.fromtimestamp(x).strftime('%H:%M')
getEnd = lambda x, y: datetime.fromtimestamp(x+y).strftime('%H:%M')
for r in soup.findAll('intervalreading'):
#import pdb; pdb.set_trace()
dt = int(r.start.string)
dur = int(r.duration.string)
try:
cost = float(r.cost.string)*(10.0**-5)
except AttributeError:
cost = None
row = ['Electric usage', getDate(dt), getStart(dt), getEnd(dt, dur),
#TYPE DATE START TIME END TIME
float(r.value.string)/1000, 'kWh', cost, '']
#USAGE UNIT COST NOTES
data.append(row)
df = pandas.DataFrame(data=data,columns=['TYPE', 'DATE', 'START TIME',
'END TIME','USAGE', 'UNITS', 'COST', 'NOTES'])
df = _add_convieant_cols(df)
return df
|
[
"def read_xml_file(self, xml_fn):\n pass",
"def read_xml_file(self):\r\n\r\n #Find the root of xml tree.\r\n xml_tree = ET.parse(self.xml_file_path + \"pic{}.xml\".format(self.file_index))\r\n root = xml_tree.getroot()\r\n\r\n return root",
"def read_xml(path_to_xml, verbose=True, n_records=None):\n with open(path_to_xml, 'r') as file:\n records = parse_records(file, verbose, n_records)\n\n return records",
"def ScXMLDocument_readFile(filename: 'char const *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readFile(filename)",
"def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')",
"def open_xml(self, file_name):\r\n tree = ET.parse(file_name)\r\n root = tree.getroot()\r\n return root",
"def read_xml(fname):\n tree = ET.parse(fname)\n root = tree.getroot()\n \n return tree, root",
"def readFile(filename: 'char const *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readFile(filename)",
"def has_30k_or_fewer_records(medline_xml, parser=None, tree=None):",
"def readXML(filename):\n with open(filename) as fd:\n legal_doc_xml = fd.read()\n return legal_doc_xml",
"def read_eng_pkt (EngPktFile): \n print(EngPktFile)\n with open(EngPktFile) as f: \n xml = f.read()\n x = xmltodict.parse(xml)\n \n return x",
"def read_sgems_variogram_xml(xml_file,return_type=GeoStruct):\n try:\n import xml.etree.ElementTree as ET\n\n except Exception as e:\n print(\"error import elementtree, skipping...\")\n VARTYPE = {1: SphVario, 2: ExpVario, 3: GauVario, 4: None}\n assert os.path.exists(xml_file)\n tree = ET.parse(xml_file)\n gs_model = tree.getroot()\n structures = []\n variograms = []\n nugget = 0.0\n num_struct = 0\n for key,val in gs_model.items():\n #print(key,val)\n if str(key).lower() == \"nugget\":\n if len(val) > 0:\n nugget = float(val)\n if str(key).lower() == \"structures_count\":\n num_struct = int(val)\n if num_struct == 0:\n raise Exception(\"no structures found\")\n if num_struct != 1:\n raise NotImplementedError()\n for structure in gs_model:\n vtype, contribution = None, None\n mx_range,mn_range = None, None\n x_angle,y_angle = None,None\n #struct_name = structure.tag\n for key,val in structure.items():\n key = str(key).lower()\n if key == \"type\":\n vtype = str(val).lower()\n if vtype.startswith(\"sph\"):\n vtype = SphVario\n elif vtype.startswith(\"exp\"):\n vtype = ExpVario\n elif vtype.startswith(\"gau\"):\n vtype = GauVario\n else:\n raise Exception(\"unrecognized variogram type:{0}\".format(vtype))\n\n elif key == \"contribution\":\n contribution = float(val)\n for item in structure:\n if item.tag.lower() == \"ranges\":\n mx_range = float(item.attrib[\"max\"])\n mn_range = float(item.attrib[\"min\"])\n elif item.tag.lower() == \"angles\":\n x_angle = float(item.attrib[\"x\"])\n y_angle = float(item.attrib[\"y\"])\n\n assert contribution is not None\n assert mn_range is not None\n assert mx_range is not None\n assert x_angle is not None\n assert y_angle is not None\n assert vtype is not None\n v = vtype(contribution=contribution,a=mx_range,\n anisotropy=mx_range/mn_range,bearing=(180.0/np.pi)*np.arctan2(x_angle,y_angle),\n name=structure.tag)\n return GeoStruct(nugget=nugget,variograms=[v])",
"def readFile(filename):\n GZIPMAGIC = '\\037\\213'\n\n try:\n fh = open(filename, 'rb')\n except IOError:\n raise SVGError(\"could not open file '%s' for reading\" % filename)\n\n # test for gzip compression\n magic = fh.read(2)\n fh.close()\n\n if magic == GZIPMAGIC:\n svg = etree.parse(GzipFile(filename, 'r'))\n else:\n svg = etree.parse(filename)\n\n root = svg.getroot()\n\n if not root.tag == Renderer.SVG_ROOT:\n raise SVGError(\"Expected SVG fragment as root object\")\n\n return root",
"def readXMLData(xmldoc: 'cc_xml_doc *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readXMLData(xmldoc)",
"def read_scf_data(xml_file):\n \n data_file_xml = et.parse(xml_file)\n data_file_root = data_file_xml.getroot()\n\n output_node = data_file_root.find('output')\n\n band_node = output_node.find('band_structure')\n fermi_en = float(band_node.find('fermi_energy').text)*27.21138602\n\n return fermi_en",
"def loading_xml(self):\n\n dom = minidom.parse(self.filepath)\n return dom",
"def ScXMLDocument_readXMLData(xmldoc: 'cc_xml_doc *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readXMLData(xmldoc)",
"def ScXML_readFile(filename: 'char const *') -> \"ScXMLStateMachine *\":\n return _coin.ScXML_readFile(filename)",
"def get_xml_content(self, xml_path):\n if not xml_path.suffix == \".xml\":\n raise DemInputXmlException(\"指定できる形式は.xmlのみです\")\n\n name_space = {\n \"dataset\": \"http://fgd.gsi.go.jp/spec/2008/FGD_GMLSchema\",\n \"gml\": \"http://www.opengis.net/gml/3.2\",\n }\n\n try:\n tree = et.parse(xml_path)\n root = tree.getroot()\n mesh_code = int(\n root.find(\n \"dataset:DEM//dataset:mesh\",\n name_space\n ).text\n )\n except et.ParseError:\n raise DemInputXmlException(\"不正なxmlです\")\n\n raw_metadata = {\n \"mesh_code\": mesh_code,\n \"lower_corner\": root.find(\n \"dataset:DEM//dataset:coverage//gml:boundedBy//gml:Envelope//gml:lowerCorner\",\n name_space,\n ).text,\n \"upper_corner\": root.find(\n \"dataset:DEM//dataset:coverage//gml:boundedBy//gml:Envelope//gml:upperCorner\",\n name_space,\n ).text,\n \"grid_length\": root.find(\n \"dataset:DEM//dataset:coverage//gml:gridDomain//gml:Grid//gml:high\",\n name_space,\n ).text,\n \"start_point\": root.find(\n \"dataset:DEM//dataset:coverage//gml:coverageFunction//gml:GridFunction//gml:startPoint\",\n name_space,\n ).text,\n }\n\n meta_data = self._format_metadata(raw_metadata)\n\n tuple_list = root.find(\n \"dataset:DEM//dataset:coverage//gml:rangeSet//gml:DataBlock//gml:tupleList\",\n name_space,\n ).text\n\n # Create a two-dimensional array list like [[地表面,354.15]...]\n if tuple_list.startswith(\"\\n\"):\n strip_tuple_list = tuple_list.strip()\n items = [item.split(\",\")[1]\n for item in strip_tuple_list.split(\"\\n\")]\n else:\n items = [item.split(\",\")[1] for item in tuple_list.split(\"\\n\")]\n\n elevation = {\"mesh_code\": mesh_code, \"items\": items}\n\n return {\n \"mesh_code\": mesh_code,\n \"meta_data\": meta_data,\n \"elevation\": elevation,\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create density cloud of data for a given tag or group of tags
|
def density_cloud_by_tags(df, columns, silent=False):
figures = []
if columns == 'hr' or 'hr' in columns:
raise ValueError("Columns cannot contain hr tag")
# Create a profile for day of week
maxY = df['USAGE'].max()
for label, data in df.groupby(columns):
# Find mean
mean = data.groupby('hr')['USAGE'].agg('mean')
# Add in any missing hours
for h in set(range(24)) - set(data['hr']):
mean = mean.set_value(h, None)
# Create a density cloud of the MW
X = np.zeros([24, 100]) # Hours by resolution
Y = np.zeros([24, 100])
C = np.zeros([24, 100])
for hr, data2 in data.groupby('hr'):
freq = []
step = 1
rng = range(0,51,step)[1:]
freq += rng
bins = np.percentile(data2['USAGE'], rng)
rng = range(50,101,step)[1:]
freq += [100 - a for a in rng]
bins = np.hstack([bins, np.percentile(data2['USAGE'], rng)])
freq = np.array(freq)
X[hr,:] = np.ones(len(bins))*hr
Y[hr,:] = bins
C[hr,:] = freq
plt.figure()
#plt.xkcd()
plt.pcolor(X, Y, C, cmap=plt.cm.YlOrRd)
plt.plot(X[:,1], mean, color='k', label='Mean')
plt.colorbar().set_label('Probability Higher/Lower than Median')
plt.legend(loc='upper left')
plt.xlabel('Hour of Day')
plt.ylabel('Usage (kWh)')
plt.ylim([0, maxY])
plt.xlim([0,23])
plt.title('Typical usage on %s' % str(label))
plt.grid(axis='y')
figures.append(plt.gcf())
if not silent:
plt.show()
return figures
|
[
"def tag_cloud(self, steps=4, distribution=LOGARITHMIC, filters=None, min_count=None):\r\n\t\t\r\n\t\tfrom utils import calculate_cloud\r\n\t\t\r\n\t\treturn calculate_cloud(Tagging, steps, distribution)",
"def density_cluster(Data,iradius, Clusters): #This function classifies data points into clusters and noise points",
"def create_density_list():\r\n food = pd.read_csv(PATH + r\"\\FoodData_legacy\\food.csv\")\r\n food_portion = pd.read_csv(PATH + r\"\\FoodData_legacy\\food_portion.csv\")\r\n\r\n joined_ingredient_list = pd.merge(food_portion, food, on=\"fdc_id\", how=\"left\")\r\n\r\n clean_list = joined_ingredient_list[[\"description\", \"gram_weight\", \"modifier\"]]\r\n\r\n clean_list = clean_list[clean_list[\"modifier\"].isin([\"cup\", \"tbsp\", \"tsp\"])]\r\n\r\n clean_list[\"ingredient\"] = (\r\n clean_list[\"description\"].str.split(\",\").str[0].str.strip().str.lower()\r\n )\r\n clean_list[\"attribute\"] = (\r\n clean_list[\"description\"].str.split(\",\").str[1].str.strip().str.lower()\r\n )\r\n clean_list = clean_list.fillna(\"\")\r\n\r\n grouped_multiple = clean_list.groupby([\"ingredient\", \"attribute\", \"modifier\"])[\r\n \"gram_weight\"\r\n ].mean()\r\n grouped_multiple = grouped_multiple.reset_index()\r\n\r\n grouped_multiple[\"density\"] = np.where(\r\n grouped_multiple[\"modifier\"] == \"cup\",\r\n grouped_multiple[\"gram_weight\"] / 240,\r\n np.where(\r\n grouped_multiple[\"modifier\"] == \"tsp\",\r\n grouped_multiple[\"gram_weight\"] / 4.9,\r\n np.where(\r\n grouped_multiple[\"modifier\"] == \"tbsp\",\r\n grouped_multiple[\"gram_weight\"] / 14.7,\r\n grouped_multiple[\"gram_weight\"],\r\n ),\r\n ),\r\n )\r\n\r\n density_list = grouped_multiple.groupby([\"ingredient\", \"attribute\"])[\r\n \"density\"\r\n ].mean()\r\n density_list = density_list.reset_index()\r\n\r\n density_avg = density_list.groupby(\"ingredient\")[\"density\"].mean()\r\n density_list = density_list.set_index(\"ingredient\")\r\n density_list[\"density_avg\"] = density_avg\r\n density_list = density_list.reset_index()\r\n\r\n density_list.insert(0, \"ingredient_combined\", \"\")\r\n density_list[\"ingredient_combined\"] = (\r\n density_list[[\"attribute\", \"ingredient\"]].agg(\" \".join, axis=1).str.strip()\r\n )\r\n\r\n return density_list",
"def tag_cloud():\n\n cache_key = 'tag_cloud_tags'\n tags = cache.get(cache_key)\n if tags is None:\n MAX_WEIGHT = 7\n tags = Tag.objects.annotate(count=Count('article'))\n\n if len(tags) == 0:\n # go no further\n return {}\n\n min_count = max_count = tags[0].article_set.count()\n for tag in tags:\n if tag.count < min_count:\n min_count = tag.count\n if max_count < tag.count:\n max_count = tag.count\n\n # calculate count range, and avoid dbz\n _range = float(max_count - min_count)\n if _range == 0.0:\n _range = 1.0\n\n # calculate tag weights\n for tag in tags:\n tag.weight = int(MAX_WEIGHT * (tag.count - min_count) / _range)\n\n cache.set(cache_key, tags)\n\n return {'tags': tags}",
"def make_tag_cloud(labels_text, input_fname, output_dir):\n print(\"Let's make tag cloud\")\n for label, text in labels_text.iteritems():\n fig_name = input_fname + \"_label{}.png\".format(label)\n fig_path = output_dir + \"/\" + fig_name\n tags = make_tags(get_tag_counts(text), maxsize=80)\n create_tag_image(tags, fig_path, size=(900, 600))\n print(\"label {} finished\".format(label))",
"def density_at_points(data):\n data = numpy.asarray(data)\n kd = kde.gaussian_kde(data.T)\n return kd(data.T)",
"def voxel_occupancy_features(point_cloud_path,n_X=8,n_Y=8,n_Z=8):\n \n cloud = pyntcloud.PyntCloud.from_file(point_cloud_path)\n voxel_grid_cloud= cloud.add_structure(\"voxelgrid\", n_x=n_X, n_y=n_Y, n_z=n_Z)\n voxelgrid = cloud.structures[voxel_grid_cloud]\n\n density_feature_vector = voxelgrid.get_feature_vector(mode=\"density\").reshape(-1)\n\n return density_feature_vector",
"def CreateClouds( data = None, labels = None, names = None, \n n_clusters = None, save = 1, dirCreate = 1, \n filename = 'Clouds',\n dirName = 'Cdir', TFIDF = 1 ):\n dictions = CalculateCounts(data = data, names = names, TFIDF = TFIDF,\n labels = labels, n_clusters = n_clusters)\n \n for i in np.arange( n_clusters ):\n \n filenamenew = filename + str(i)\n clouds(counts = dictions[i], filename = filenamenew,\n dirName = dirName, dirCreate = dirCreate)\n \n return dictions",
"def xyz_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):\n for eles in elements:\n # XYZ density stats for each group of specified elements.\n e = ''.join(eles)\n print('XYZ density stats for {e}\\n'.format(e=e))\n # XYZ density.\n xyz_density = box.xyz_density(eles, mole, dr)\n # Write to disk.\n xout = out_fmt.format(time=time, ele=e, xyz='x')\n yout = out_fmt.format(time=time, ele=e, xyz='y')\n zout = out_fmt.format(time=time, ele=e, xyz='z')\n\n write_density(xyz_density['x'], dr, xout, header)\n write_density(xyz_density['y'], dr, yout, header)\n write_density(xyz_density['z'], dr, zout, header)\n\n if debug:\n # For testing.\n with open(container, 'a') as cc:\n out = '\\n'.join([xout, yout, zout, ''])\n cc.write(out)",
"def cloudgen(numpoint, numfeat, numnoise, fracpos, width):\n numpos = int(round(fracpos*numpoint))\n numneg = numpoint - numpos\n\n metadata = 'cloudgen(%d,%d,%d,%d,%3.2f)' % (numpos, numneg, numfeat, numnoise, width)\n print(metadata)\n\n datapos = ones((numfeat, numpos)) + width*randn(numfeat, numpos)\n dataneg = -ones((numfeat, numneg)) + width*randn(numfeat, numneg)\n noise = (2.0+width)*(rand(numnoise, numpos+numneg)\n - 0.5 * ones((numnoise, numpos+numneg)))\n pointcloud = 0.2*concatenate((concatenate((datapos, dataneg), axis=1),\n noise), axis=0)\n labels = concatenate((ones(numpos), -ones(numneg)))\n\n return metadata, pointcloud, labels",
"def generate_data(dataset, num_pairs = 10000):\n im1s, im2s, labels = [], [], []\n for _ in range(num_pairs):\n dp1_idx = np.random.randint(dataset.num_datapoints)\n dp2_idx, label = dp1_idx, 1 # same object\n \n im1_idx = np.random.randint(20)\n im2_idx = np.random.randint(20)\n \n im1s.append(255 * dataset[dp1_idx]['depth_images'][im1_idx])\n\n if np.random.random() < 0.5: # Makes half of the training data to be different objects\n while dp2_idx == dp1_idx:\n dp2_idx = np.random.randint(dataset.num_datapoints)\n label = 0\n\n im2s.append(255 * dataset[dp2_idx]['depth_images'][im2_idx])\n labels.append(label)\n im1s, im2s, labels = np.array(im1s), np.array(im2s), np.array(labels)\n return np.expand_dims(im1s, 1), np.expand_dims(im2s, 1), labels",
"def buildFruitTree(density, distr, sd_factor=3, mean=None, num_clast=4, min_dist=2000):\n fruits = []\n area = np.count_nonzero(dp.Island())\n size = int(area / 1000000 * density) # number of trees to compute\n # Compute Trees\n if distr == Distr.UNIFORM:\n while len(fruits) < size:\n f = [random.randint(0, dp.Island().shape[0] - 1), random.randint(0, dp.Island().shape[1] - 1)]\n if dp.Island()[f[0], f[1]] > 0:\n fruits.append(geo.Coordinates(f[0], f[1], dp.Island()[f[0], f[1]]))\n elif distr == Distr.NORMAL:\n if mean is None or mean[0] < 0 or mean[1] < 0 or mean[0] > dp.Island().shape[0] - 1 or mean[1] > dp.Island().shape[1] - 1:\n mean = [dp.Island().shape[0] / 2, dp.Island().shape[1] / 2]\n cov = [[math.pow(mean[0] / sd_factor, 2), 0], [0, math.pow(mean[1] / sd_factor, 2)]] # covariance matrix. Axis are independent\n fruits = np.random.multivariate_normal(mean, cov, size)\n fruits = fruits.astype(int)\n fruits = [_createCoor(x) for x in fruits]\n fruits = [x for x in fruits if _checkFruitTree(x)]\n elif distr == Distr.CLUSTERED:\n clst = []\n while len(clst) < num_clast:\n c = [random.randint(0, dp.Island().shape[0] - 1), random.randint(0, dp.Island().shape[1] - 1)]\n if dp.Island()[c[0], c[1]] > 0:\n p = geo.Coordinates(c[0], c[1], dp.Island()[c[0], c[1]])\n if not _checkMinDist(p, clst, min_dist):\n continue\n clst.append(p)\n for c in clst:\n c_fruits = []\n mean = [c.x, c.y]\n cov = [[math.pow(dp.Island().shape[0] / (sd_factor * num_clast), 2), 0], [0, math.pow(dp.Island().shape[1] / (sd_factor * num_clast), 2)]] # covariance matrix. Axis are independent\n c_fruits = np.random.multivariate_normal(mean, cov, int(size / num_clast))\n c_fruits = c_fruits.astype(int)\n c_fruits = [_createCoor(x) for x in c_fruits]\n c_fruits = [x for x in c_fruits if _checkFruitTree(x)]\n fruits.extend(c_fruits)\n return fruits",
"def standard_read_and_process(filenames,densnam,countnam=None) :\n if countnam is None :\n countnam = densnam\n densities = Densities(filenames[0],len(filenames),densnam,countnam)\n for filename in filenames[1:] :\n densities.read(filename)\n densities.scale()\n densities.scale_by_uniform()\n densities.average()\n densities.erase_low()\n densities.calc_gibbs(cutoff=0.15)\n densities.crop(70,70)\n return densities",
"def gen_density_cubes(snaps=[], ptype='dm', resolution=512):\n for sn in snaps:\n dens = build_density_cube(sn, ptype=ptype, resolution=resolution)\n fits.writeto(\"%s_density_%03d_%d.fits\"%(pytype,sn,resolution), dens)",
"def make_tag_cloud(self, user):\n annos = json.loads(self.user_annos.get(user))\n taglists = []\n for a in annos:\n if a.has_key('tags') and a['tags'] is not None:\n taglists.append(a['tags'])\n tagdict = defaultdict(int)\n for taglist in taglists:\n if taglist is None: continue\n for tag in taglist:\n tagdict[tag.lower()] += 1\n tag_tuples = sorted(tagdict.items(), key=operator.itemgetter(0,1))\n tag_tuples = [tag_tuple for tag_tuple in tag_tuples if tag_tuple[1] > 1]\n tag_counts = [tag_tuple[1] for tag_tuple in tag_tuples]\n bin_count = 3\n histogram = numpy.histogram(tag_counts, bins=bin_count)\n breaks = histogram[1]\n formatted_tag_tuples = [self.format_tag_cloud(breaks, tag_tuple) for tag_tuple in tag_tuples]\n return ' '.join(formatted_tag_tuples)",
"def cloudfilter(allprf,tarr,z,datestring):\n\tthrs=1750.0\n\tday=datestring\n\tdia=int(day[6:8])\n\tyear=int(day[0:4])\n\tmes=int(day[4:6])\n\tzi=np.where(z==200.)\n\tzi=zi[0]\n\tzmax=np.where(z==4000.)\n\tzmax=zmax[0]\n\t#print zi,zmax\n\ta=allprf[zi:zmax,:]\n\tnbs=[]\n\ttmps=[]\n\tsigma_t=np.sigma(tarr)\n\tcount=len(tarr)*len(z[zi:zmax])\n\tsumi=0\n\tfor i,t in enumerate(tarr):\n\n\t\tfor j,z1 in enumerate(z[zi:zmax]):\n\t\t\ttry:\n\t\t\t\tsumi=sumi+a[j+zi,i]\n\t\t\texcept IndexError:\n\t#\t\t\tprint fl\n\t\t\t\tcontinue\n\tmu=sumi/count\n\tdeviat=0\n\tfor i,t in enumerate(tarr):\n\t\tfor j,z1 in enumerate(z[zi:zmax]):\n\t\t\ttry:\n\n\t\t\t\tdeviat=deviat+((a[j+zi,i]-mu)**2)\n\t\t\texcept IndexError:\n\t\t\t\tcontinue\n\n\tsigma=deviat/(count-1)\n\tec=mu+(3*np.sqrt(sigma))\n#\tprint 'media','sigma','3','s'\n#\tprint mu,sigma,three,two\n\t#if ec > 1400:\n\t#\tprint fl, ec\n\n\tfor i,t in enumerate(tarr):\n\t\tcloud=False\n\t\tfor j,z1 in enumerate(z[zi:zmax]):\n\t\t\ttry:\n\t\t\t\tif a[j+zi,i]>ec or a[j+zi,i]>thrs:\n\t\t\t\t\thoras=int(math.floor(t))\n\t\t\t\t\tminutos=int(round((t-horas)*60,-1))\n\t\t\t\t\ttim=datetime.datetime(year,mes,dia,horas,minutos)\n\t\t\t\t\ttmps.append(tim)\n\t\t\t\t\t#nbs.append(1)\n\t\t\t\t\tcloud=True\n\t\t\t\t\tbreak\n\t\t\texcept IndexError:\n\t\t\t\tcontinue\n\t\tif cloud:\n\t\t\tnbs.append(1)\n\t\telse:\n\t\t\tnbs.append(0)\n#\tprint tmps\n#\tprint nbs\n\treturn tmps,nbs",
"def mkdens_fermi(nel,orbe,orbs,e_temp):\n efermi = get_efermi(nel,orbe,e_temp)\n occs = get_fermi_occs(efermi,orbe,e_temp)\n D = mkdens_occs(orbs,occs)\n entropy = get_entropy(occs,e_temp)\n return D,entropy",
"def dilate(dimTags, x, y, z, a, b, c):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelOccDilate(\n api_dimTags_, api_dimTags_n_,\n c_double(x),\n c_double(y),\n c_double(z),\n c_double(a),\n c_double(b),\n c_double(c),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelOccDilate returned non-zero error code: \",\n ierr.value)",
"def genDensityData(Ev,material=\"SS-316L\"):\n starter_res = params[\"starter_res\"]\n if material==\"SS-316L\":\n # for dT = [20 - 100] C\n p_range_1 = np.full(int(starter_res*(80/20)),(7900.0+8000.0+8027.0)/3.0)\n ## calculate density data for dT =[100 - 1000] C using Ev spline and prev range value as starter point\n p_range_2 = []\n for dT in np.linspace(0.0,900.0,int(starter_res*(20/900))):\n p_range_2.append(p_range_1[0]/(1+Ev(dT)*dT))\n # convert to array\n p_range_2 = np.array(p_range_2)\n # combine data ranges together\n p_data = np.concatenate([p_range_1,p_range_2],axis=0)\n # create temperature data\n p_temp_range = np.linspace(CelciusToK(20.0),CelciusToK(1000.0),p_data.shape[0])\n return p_data, p_temp_range\n else:\n print(material,\" : Unsupported material!\")\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a green button dataframe, price that energy at PJM pnodes
|
def price_at_pnodes(df, pnodes):
for pnode in pnodes:
# Bring in PJM prices from DataMiner
pnode_prices = pandas.read_csv(root+'pnode_data/%s.csv' % pnode)
assert len(pnode_prices['PRICINGTYPE'].unique()) == 1
assert pnode_prices['PRICINGTYPE'].unique()[0] == 'TotalLMP'
# Unpivot the data
pnode_prices = pandas.melt(pnode_prices, id_vars=['PUBLISHDATE'],
value_vars=['H%d'%i for i in xrange(1,25)])
pnode_prices = pnode_prices.rename(columns={
'variable':'Hour',
'value':'Price'})
# Convert hour to standard format and to hour beginning standard
cvtHr = lambda x: "%d:00" % (int(x)-1)
pnode_prices['Hour'] = pnode_prices['Hour'].str.slice(1).apply(cvtHr)
pnode_prices['ts'] = \
(pnode_prices['PUBLISHDATE']+' '+
pnode_prices['Hour']) .apply(makeTimestamp)
pnode_prices = pnode_prices.set_index('ts', drop=False)
# Convert prices to $/kWhr (currently $/MWhr)
pnode_prices['Price'] = pnode_prices['Price']/1000
# Figure out what our wholesale price would have been
df['pnode_'+pnode] = df['USAGE'] * pnode_prices['Price']
return df
|
[
"def add_prices(self):\n for i in range(self.parameters[\"number_of_products\"]):\n self.product_space.nodes[i][\"price\"] = \\\n self.product_space.nodes[i][\"delta\"] / max(\n self.product_space.nodes[i][\"firms\"], 1)",
"def price(self, p, g, kind='var'):\n\n # figure regulatory assets; applied to unlimited losses\n vd = self.var_dict(p, kind, snap=True)\n a_reg = vd[self.name]\n\n # figure pricing distortion\n if isinstance(g, Distortion):\n # just use it\n pass\n else:\n # Distortion spec as dict\n g = Distortion(**g)\n\n self.apply_distortion(g)\n aug_row = self.density_df.loc[a_reg]\n\n # holder for the answer\n df = pd.DataFrame(columns=['line', 'L', 'P', 'M', 'Q'], dtype=float)\n df.columns.name = 'statistic'\n df = df.set_index('line', drop=True)\n\n el = aug_row['exa']\n P = aug_row['exag']\n M = P - el\n Q = a_reg - P\n\n df.loc[self.name, :] = [el, P, M, Q]\n df['a'] = a_reg\n df['LR'] = df.L / df.P\n df['PQ'] = df.P / df.Q\n df['ROE'] = df.M / df.Q\n # ap = namedtuple('AggregatePricing', ['df', 'distortion'])\n # return ap(df, g) # kinda dumb...\n return df",
"def _product_offer(self): # double is private, single for show.\n for k in self.model.clusters:\n for j in self.model.products:\n exp = pyo.quicksum(self.model.x[cluster, customer, product] for cluster, customer, product in self.model.ccp if (cluster==k and product==j))\n self.model.product_offer.add(exp == self.tactical_model.y[k, j].value)",
"def eotf_PQ_BT2100(E_p):\n\n return eotf_ST2084(E_p, 10000)",
"def lifetime_pressure_velocitydispersion_tradeoff(n, selected_pillar):\n # (1 Gauss / (1 cm^−(1/2) * g^(1/2) * s^−1))\n cgs_to_gauss = (u.Gauss / (u.cm**(-1/2) * u.g**(1/2) * u.s**-1))\n\n\n #### check what B field needed for 1-3 x 10^7 K cm-3\n def reverse_engineer_B_field(p):\n print(f\"For pressure P = {p:.1E}, \", end='')\n b = ((p*8*np.pi*const.k_B)**(1/2) * cgs_to_gauss).to(u.microGauss)\n print(f\"B = {b:.2f}\")\n reverse_engineer_B_field(3e6*u.K/u.cm**3)\n reverse_engineer_B_field(1e7*u.K/u.cm**3)\n reverse_engineer_B_field(2e7*u.K/u.cm**3)\n reverse_engineer_B_field(3e7*u.K/u.cm**3)\n print()\n\n\n def calc_B_field_Pattle(nH2, sigma_v, mmw=1.4):\n \"\"\"\n Implementing the equation for B field using Pattle's numbers but allowing\n mean molecular weight, sigma_v and nH2 to change\n I will use MMW = 1.33 but I want to check equations using theirs, 1.4\n \"\"\"\n Q = 0.5\n sigma_th = (14.4*u.deg).to(u.rad).to_value()\n rho = (2 * nH2 * mmw * Hmass).to(u.g/u.cm**3)\n return (Q * np.sqrt(4 * np.pi * rho) * (sigma_v / sigma_th) * cgs_to_gauss).to(u.microGauss)\n\n def calc_turbulent_pressure(nH2, sigma_v):\n \"\"\"\n Now default to mmw=1.33\n \"\"\"\n return ((2 * nH2 * mean_molecular_weight_neutral * Hmass) * sigma_v**2 / const.k_B).to(u.K * u.cm**-3)\n\n b_170ug = calc_B_field_Pattle(5e4 * u.cm**-3, 0.5 * kms)\n print(f\"This should be ~170uG: {b_170ug:.1f}\")\n\n nH2_lo = 1.3e5\n nH2_hi = 1.3e5\n\n b_molecular_lo = calc_B_field_Pattle(nH2_lo * u.cm**-3, 0.6 * kms, mmw=mean_molecular_weight_neutral)\n b_molecular_hi = calc_B_field_Pattle(nH2_hi * u.cm**-3, 0.6 * kms, mmw=mean_molecular_weight_neutral)\n print(f\"This is my best number for molecular gas: {b_molecular_lo:.1f} -- {b_molecular_hi:.1f}\")\n\n def calc_Bpressure_Pattle(B_field):\n return ((B_field/cgs_to_gauss)**2 / (8*np.pi * const.k_B)).to(u.K * u.cm**-3)\n\n pB_mol_lo = calc_Bpressure_Pattle(b_molecular_lo)\n pB_mol_hi = calc_Bpressure_Pattle(b_molecular_hi)\n print(f\"Molecular B pressures: {pB_mol_lo:.2E} -- {pB_mol_hi:.2E}\")\n p_therm_mol_lo = 25 * nH2_lo\n p_therm_mol_hi = 25 * nH2_hi\n p_turb_mol_lo = calc_turbulent_pressure(nH2_lo*u.cm**-3, 0.6*kms)\n p_turb_mol_hi = calc_turbulent_pressure(nH2_hi*u.cm**-3, 0.6*kms)\n print(f\"Molecular thermal pressure: {p_therm_mol_lo:.1E} -- {p_therm_mol_hi:.1E} \")\n print(f\"Molecular turbulent pressure: {p_turb_mol_lo:.1E} -- {p_turb_mol_hi:.1E}\")\n\n p_tot_mol_lo = (pB_mol_lo.to_value() + p_turb_mol_lo.to_value() + p_therm_mol_lo) / 1e6\n p_tot_mol_hi = (pB_mol_hi.to_value() + p_turb_mol_hi.to_value() + p_therm_mol_hi) / 1e6\n\n print(f\"Total molecular pressures: {p_tot_mol_lo:.1f} -- {p_tot_mol_hi:.1f}\")\n\n p_atom_lo = pB_mol_lo * (n/(2*nH2_lo))\n p_atom_hi = pB_mol_hi * (n/(2*nH2_hi))\n # print(f\"Atomic pressures: {p_atom_lo:.2E} -- {p_atom_hi:.2E}\")\n\n # n/2 because I baked in the 2xmH for molecular H2 into that function\n b_atom = calc_B_field_Pattle(n/2 * u.cm**-3, 0.6*kms, mmw=mean_molecular_weight_neutral)\n pB_atom = calc_Bpressure_Pattle(b_atom)\n print(f\"Atomic B values: {b_atom:.1f}, {pB_atom:.2E}\")\n\n\n\n \"\"\"\n There is a unit issue in the pressure expression; check on Wolfram that my combination of P_B(Bfield) has valid units\n It works it's just the Gaussian units thing\n \"\"\"\n\n\n def sigma_turb(alpha, sigma_total):\n return np.sqrt(alpha) * sigma_total\n\n def sigma_flow(alpha, sigma_total):\n return np.sqrt(1 - alpha) * sigma_total\n\n # rho is mass density\n n = n * u.cm**-3 # or 2e4\n # Neutral mass density\n rho = (n*mean_molecular_weight_neutral*Hmass).to(u.g/u.cm**3)\n\n def turb_pressure(alpha, sigma_total):\n # Combining magnetic and turbulent pressure, which have the same dependence on the quantity rho*sigma^2\n return (rho * sigma_turb(alpha, sigma_total)**2 / const.k_B).to(u.K / u.cm**3)\n\n\n p_turb_atomic = (rho * (1.3*kms)**2 / const.k_B).to(u.K / u.cm**3)\n print(f\"Atomic turbulent pressure: {p_turb_atomic:.2E}\")\n\n\n\n pillar_properties = { # area (pc2), mass (solMass from CO)\n 'P1a-head': (0.17886, 64.12), 'P2-head': (0.07557, 11.32), 'P3-head': (0.02191, 4.27)\n }\n def mdot_and_pillar_lifetime(alpha, sigma_total, pillar_label):\n # Return both so we can make 2 plots\n area_pc2, mass_solMass = pillar_properties[pillar_label]\n area = area_pc2 * u.pc**2\n mass = mass_solMass * u.solMass\n mass_loss_rate = (sigma_flow(alpha, sigma_total) * rho * area / 2.).to(u.solMass / u.Myr)\n lifetime = (mass / mass_loss_rate).to(u.Myr)\n return mass_loss_rate, lifetime\n\n alpha_range = np.arange(0, 1, 0.05)\n\n fig = plt.figure(figsize=(10, 9))\n ax1 = plt.subplot(221)\n ax2 = plt.subplot(222)\n ax3 = plt.subplot(223)\n ax4 = plt.subplot(224)\n\n transparency = 0.2\n p_therm_lo = n.to_value()*100/1e6\n p_therm_hi = n.to_value()*250/1e6\n print(f\"Atomic thermal pressure {p_therm_lo} -- {p_therm_hi}\")\n print(f\"Atomic total pressure {(p_turb_atomic+pB_atom).to_value()/1e6 + p_therm_lo:.1f} -- {(p_turb_atomic+pB_atom).to_value()/1e6 + p_therm_hi:.1f}\")\n pB_atom_val = pB_atom.to_value()/1e6\n\n colors = marcs_colors[:3]\n # selected_pillar = \"P2-head\"\n\n for i, sigma_total in enumerate([1.0, 1.1, 1.3][::-1]*kms):\n label = \"$\\\\sigma_{\\\\rm tot} =$ \" + f\"{sigma_total:.2f}\"\n ax1.plot(alpha_range, sigma_turb(alpha_range, sigma_total).to_value(), color=colors[i], label=label)\n ax1.plot(alpha_range, sigma_flow(alpha_range, sigma_total).to_value(), color=colors[i], linestyle='--')\n\n p_turb = turb_pressure(alpha_range, sigma_total).to_value()/1e6\n ax2.fill_between(alpha_range, p_therm_lo+pB_atom_val+p_turb, y2=p_therm_hi+pB_atom_val+p_turb, color=colors[i], alpha=transparency)\n\n mass_loss_rate, lifetime = mdot_and_pillar_lifetime(alpha_range, sigma_total, selected_pillar)\n ax3.plot(alpha_range, mass_loss_rate.to_value(), color=colors[i])\n ax4.plot(alpha_range, lifetime.to_value(), color=colors[i])\n\n ax1.legend()\n\n ax1.set_title(f\"bottom plots using {selected_pillar}\")\n ax2.set_title(f\"Density n={n:.1E}\")\n\n ax2.set_ylim([0, 40])\n ax2.axhspan(p_tot_mol_lo, p_tot_mol_hi, color=marcs_colors[5], alpha=transparency, label='$P_{{\\\\rm H}_2}$') # fill region\n ax2.axhspan(18, 36, color=marcs_colors[6], alpha=transparency, label='$P_{\\\\rm HII}$') # fill region\n ax2.axhline(pB_atom_val, color=marcs_colors[5], alpha=transparency, label='$P_{{\\\\rm HI,B}}$')\n ax2.axhspan(p_therm_lo + pB_atom_val, p_therm_hi + pB_atom_val, color=marcs_colors[7], alpha=transparency, label='$P_{{\\\\rm HI,B}} + P_{{\\\\rm HI,therm}}$')\n ax2.legend(loc='upper left')\n\n ax3.set_xlabel(\"$\\\\alpha$\")\n ax4.set_xlabel(\"$\\\\alpha$\")\n ax1.set_ylabel(\"1D Velocity dispersion $\\\\sigma$ (km s-1)\")\n ax2.set_ylabel(\"Total non-thermal pressure (cm-3)\")\n ax3.set_ylabel(f\"{selected_pillar}\" + \" $M_{\\\\odot}$ (solMass Myr-1)\")\n ax3.set_ylim([0, 100])\n ax4.set_ylabel(f\"{selected_pillar} Pillar lifetime (Myr)\")\n ax4.axhspan(1, 3, color=marcs_colors[5], alpha=transparency)\n ax4.set_ylim([0, 8])\n # 2023-02-06,21, 03-16,25\n fig.savefig(f\"/home/ramsey/Pictures/2023-03-25/pressure_mdot_tradeoff_{selected_pillar}_{n.to_value():.1E}.png\",\n metadata=catalog.utils.create_png_metadata(title=f\"B pressure scaled by density only; {selected_pillar}; n={n:.1E}\",\n file=__file__, func=\"lifetime_pressure_velocitydispersion_tradeoff\"))",
"def plot_particle_energy_gain():\n # beta_e = 0.005\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta00025-guide0-200-100-nppc200')\n tenergy1 = pic_info.tenergy\n kene_e1 = pic_info.kene_e\n kene_i1 = pic_info.kene_i\n\n # beta_e = 0.02\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta001-guide0-200-100-nppc400')\n tenergy2 = pic_info.tenergy\n kene_e2 = pic_info.kene_e\n kene_i2 = pic_info.kene_i\n\n # beta_e = 0.06\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta003-guide0-200-100-nppc200')\n tenergy3 = pic_info.tenergy\n kene_e3 = pic_info.kene_e\n kene_i3 = pic_info.kene_i\n\n # beta_e = 0.2\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta01-guide0-200-100-nppc200')\n tenergy4 = pic_info.tenergy\n kene_e4 = pic_info.kene_e\n kene_i4 = pic_info.kene_i\n\n # Estimate the energy gain for beta_e = 0.0072 using beta_e = 0.005\n kene_e12 = kene_e1[0] + (kene_e1 - kene_e1[0]) * 0.005 / 0.0072\n kene_i12 = kene_i1[0] + (kene_i1 - kene_i1[0]) * 0.005 / 0.0072\n\n print('The ratio of electron energy gain to its initial energy: ')\n print(' beta_e = 0.0072, 0.02, 0.06, 0.2: %f %f %f %f',\n ((kene_e12[-1]-kene_e12[0])/kene_e12[0],\n (kene_e2[-1]-kene_e2[0])/kene_e2[0],\n (kene_e3[-1]-kene_e3[0])/kene_e3[0],\n (kene_e4[-1]-kene_e4[0])/kene_e4[0]))\n # Electrons\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_e12 - kene_e12[0]) / kene_e12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_e2 - kene_e2[0]) / kene_e2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_e3 - kene_e3[0]) / kene_e3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_e4 - kene_e4[0]) / kene_e4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n #ax.set_ylim([0, 1.05])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_e/K_e(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(\n 680, 8.8, r'$\\beta_e=0.007$', color='blue', rotation=5, fontsize=16)\n ax.text(680, 5, r'$\\beta_e=0.02$', color='red', rotation=4, fontsize=16)\n ax.text(\n 680, 2.1, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(\n 680, -1.5, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n # Ions\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_i12 - kene_i12[0]) / kene_i12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_i2 - kene_i2[0]) / kene_i2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_i3 - kene_i3[0]) / kene_i3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_i4 - kene_i4[0]) / kene_i4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n ax.set_ylim([-5, 30])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_i/K_i(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(680, 22, r'$\\beta_e=0.007$', color='blue', rotation=0, fontsize=16)\n ax.text(680, 9, r'$\\beta_e=0.02$', color='red', rotation=0, fontsize=16)\n ax.text(680, 3, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(680, -4, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n plt.show()",
"def compute_set_product_price(self):\n self.ensure_one()\n phantom_boms = self.bom_ids.filtered(lambda b: b.type == \"phantom\")\n\n if not phantom_boms:\n raise UserError(\n _(\n \"No phantom BoM found for product %s. Please create\"\n \" a phantom BoM to compute the price of the set product.\"\n % self.name\n )\n )\n\n products_2compute = self.product_variant_ids\n date_now = fields.Datetime.now()\n dummy_so = self.env[\"sale.order\"].create(\n {\n \"name\": \"Phantom Bom Price Compute: %s, %s\"\n % (self.id, date_now.strftime(\"%d-%m-%Y\")),\n \"partner_id\": 12515, # Ahmet Altınışık test\n \"partner_invoice_id\": 12515,\n \"partner_shipping_id\": 12515,\n \"pricelist_id\": 136, # USD pricelist\n \"warehouse_id\": 1,\n \"company_id\": 1,\n \"currency_id\": 2, # USD\n \"date_order\": fields.Datetime.now(),\n }\n )\n for product in products_2compute:\n bom = self.env[\"mrp.bom\"].sudo()._bom_find(product=product)\n if not bom.type == \"phantom\":\n continue\n # Create a new sale order line\n dummy_sol = self.env[\"sale.order.line\"].create(\n {\n \"order_id\": dummy_so.id,\n \"product_id\": product.id,\n \"product_uom_qty\": 1,\n \"product_uom\": product.uom_id.id,\n \"price_unit\": product.v_fiyat_dolar,\n }\n )\n # Explode the phantom bom\n dummy_sol.explode_set_contents()\n # Compute the price\n dummy_so.recalculate_prices()\n # Update the product price\n _logger.info(\n \"Updating product price for product %s: %s -> %s\"\n % (product.display_name, product.v_fiyat_dolar, dummy_so.amount_untaxed)\n )\n product.v_fiyat_dolar = dummy_so.amount_untaxed\n # Clear sale order lines\n dummy_so.order_line.unlink()\n # Clear the dummy sale order\n dummy_so.unlink()\n self.env.cr.commit()\n return True",
"def cost_components(dframe=dframe):\n \n labels = ['fringe','unemployment','BaltCorps_fee','StrongCity_fee','stipend']\n colors = ['#eff3ff','#bdd7e7','#6baed6','#3182bd','#08519c']\n values = []\n for label in labels:\n values.append(dframe[label].sum())\n\n text = []\n for label,value in zip(labels,values):\n text.append('{}<br>${:,.0f}'.format(label.capitalize(),value))\n\n fig = {\n 'data':[\n {'labels': labels,\n 'values': values,\n 'marker': {'colors': colors},\n 'name': 'cost components',\n 'hole': .4,\n 'type': 'pie',\n 'text': text,\n 'hoverinfo':'text+percent'\n }],\n 'layout': {\n 'title':'Fellowship Cost Components',\n 'hovermode': 'closest',\n 'paper_bgcolor': '#bdbdbd',\n 'plot_bgcolor': '#bdbdbd',\n 'annotations': [\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text': '5yr Fees: ${:,.0f}'.format(dframe['BaltCorps_fee'].sum() + dframe['StrongCity_fee'].sum()),\n 'x':1.35,\n 'y':.4},\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text': '5yr OPCs: ${:,.0f}'.format(dframe['fringe'].sum() + dframe['unemployment'].sum()),\n 'x':1.35,\n 'y':.3},\n {\n 'font':{'size':12,\n 'color':'darkgrey'},\n 'showarrow':False,\n 'text': '<b>Source:</b> Data Provided by Baltimore Corps June 2019:<br>https://github.com/brl1906/fellowship-analysis',\n 'x':.5,\n 'y':-.2},\n\n ]\n }\n }\n\n return fig",
"def on_product(self):\n \n product_type = '_'.join(str(self.select_graph['p'].currentText()).lower().split())\n \n p = productGraph(self.nt['g']['nx'], self.nt['h']['nx'], \n product_type, self.slider['p'][0].value())\n \n self.nt['p']['nx'] = p\n self.graph_drawer(p.graph, p.product_type + \" \" + str(p.product_params), 'p')\n\n # enabling basis updater button\n self.result_button['p'].setEnabled(True)\n self.change = True",
"def gas_temperature (p, dindex, eindex, gf, gamma=5.0/3.0):\n\n #import ipdb\n #ipdb.set_trace()\n e = p.data[:,:,:,eindex]\n d = p.data[:,:,:,dindex]\n\n return ((gamma-1.0) * e / d)",
"def EGTS_only_perf(GR):\n #Power available\n P_APU = 62 # [kW] Available apu power\n P_sen = 0 # [kW]\n P_comp = 0 # [kW]\n P_av_e = (P_APU-P_sen-P_comp)*1000/2 # [W] APU power available per engine\n\n # Efficiencies powertrain\n n_circuit = 0.97\n n_gear = 0.9875 # Gear efficiency (torque loss -> power loss)\n amount_gears = 2\n n_emotor = 0.95 # Electricmotor efficiency (electrical loss - power loss)\n\n # Airplane characteristics\n w_rad_air = 1.27/2 # [m] wheel radius aircraft MLG wheels\n m_plane = 97400 # [kg] MRW\n weight_ratio = 0.952 # [-] Landing gear weight distribution ratio\n Roll_fric = 0.02 # [-] Rolling friction coefficient of airplane wheels\n\n # Engine output torque for available power at different RPM calculation\n P_av_e_out = n_circuit*n_emotor*P_av_e # [W] engine output power\n T_egts_w_em = np.array([500]) # [Nm] engine output torque\n\n v_slow = np.arange(0, 8.1, 0.1) # [kts] Velocity range\n v_slow = v_slow*0.514444 # to m/s\n w_slow = v_slow/w_rad_air # [rad/s] corresponding rotational speed wheels\n w_slow_eng = w_slow*GR # [rad/s] corresponding rotational speed engine\n for i in range(1, len(w_slow_eng)):\n # Enough power hence full torque\n if P_av_e_out/w_slow_eng[i] > 500:\n T_egts_w_em = np.append(T_egts_w_em, [500])\n # in sufficient power hence less torque\n elif P_av_e_out/w_slow_eng[i] < 500 and P_av_e_out/w_slow_eng[i] > 0:\n T_egts_w_em = np.append(T_egts_w_em, [P_av_e_out/w_slow_eng[i]])\n # not enough power\n else:\n T_egts_w_em = np.add(T_egts_w_em, [0])\n\n # Torque en power @ wheels = engine * gear efficiency\n T_egts_w_r = n_gear**amount_gears*GR*T_egts_w_em # [W] wheel power\n F_egts_w = T_egts_w_r/w_rad_air # [Nm] engine output torque\n\n # Resultant acceleration calculation\n # Determining friction for resultant acceleration calculation\n N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG\n N_mlg_w = N_mlg/4 # [N] Normal force per MLG wheel\n N_nlg = m_plane*(1-weight_ratio)*9.81 # [N] Total normal force of car\n F_fric = Roll_fric*N_mlg + Roll_fric*N_nlg # [N] Total force req to move plane at acceleration\n\n # Resultant force\n F_acc = 2*F_egts_w-F_fric # [N]\n\n # Resultant acceleration\n a_acc_slow = F_acc/m_plane # [m/s2]\n # Cut-off insignificant accelerations\n v_slow = v_slow[np.where(a_acc_slow >= 0.005)]\n a_acc_slow = a_acc_slow[np.where(a_acc_slow >= 0.005)]\n\n # Determine time intervals for velocity intervals w corresponding acceleration profile\n time = np.array([0])\n for i in range(1, len(v_slow)):\n time = np.append(time, [v_slow[i]/a_acc_slow[i]])\n\n # Plot\n# gs = gridspec.GridSpec(2, 2) # Define figure layout\n# fig = plt.figure(\"EGTS Only Performance\")\n# fig.suptitle(\" EGTS Only Performance \\n Pushback\")\n#\n# # Pushback velocity\n# ax1 = fig.add_subplot(gs[0, 0])\n# ax1.set_title(\"Velocity\")\n# ax1.set_xlabel(\"Time [s]\")\n# ax1.set_ylabel(\"Velocity [m/s]\")\n# ax1.plot(time[0:31], v_slow[0:31], color='g')\n# ax1.set_yticks([0, 0.5, 1, 1.5])\n# ax = ax1.twinx()\n# ax.plot(time[0:31], v_slow[0:31], color='g')\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3'])\n# # Pushback Acceleration graphs\n# ax2 = fig.add_subplot(gs[0, 1])\n# ax2.set_title(\"Acceleration\")\n# ax2.set_xlabel(\"Time [s]\")\n# ax2.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax2.set_ylim(0, max(a_acc_slow)+0.2)\n# ax2.plot(time[0:31], a_acc_slow[0:31], color='r')\n#\n# # Slow taxi title\n# ax0 = fig.add_subplot(gs[1, :])\n# ax0.axis('off')\n# ax0.set_title(\"Slow Taxi\", pad=20)\n# # Slow taxi\n# ax3 = fig.add_subplot(gs[1, 0])\n# ax3.set_title(\"Velocity\")\n# ax3.set_xlabel(\"Time [s]\")\n# ax3.set_ylabel(\"Velocity [m/s]\")\n# ax3.plot(time, v_slow, color='g')\n# ax3.plot(time, [2.88 for i in time], color='gray', linestyle='--')\n# ax3.set_yticks([0, 0.5, 1, 1.5, 2, 2.5, 3])\n# ax = ax3.twinx()\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144, 4*0.5144, 5*0.5144, 6*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3', '4', '5', '6'])\n# # Pushback Acceleration graphs\n# ax4 = fig.add_subplot(gs[1, 1])\n# ax4.set_title(\"Acceleration\")\n# ax4.set_xlabel(\"Time [s]\")\n# ax4.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax4.set_ylim(0, max(a_acc_slow)+0.2)\n# ax4.plot(time, a_acc_slow, color='r')\n\n # Plot & Save\n# fig.tight_layout()\n# fig.subplots_adjust(top=0.88)\n# fig.savefig('EGTS_Only_Perf', bbox_inches='tight')\n #plt.show()\n return a_acc_slow, F_acc, v_slow, time",
"def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[\n (\n (\n dataframe['wtCrossUp'] &\n dataframe['wtOversold'] \n )\n \n ),\n 'buy'] = 1\n\n return dataframe",
"def __calculate_extra_cols(self, df):\n df['total_price'] = df['CPRECIO'] * df['#UNIDADES'] * df['CTIPOCAM01']\n return df",
"def simulate_fundamental_price(self, events):\n price_changes = int_zeros(self.max_t)\n price_changes[events == EVENT_PRICE_CHANGE_DOWN] = -1\n price_changes[events == EVENT_PRICE_CHANGE_UP] = +1\n return self.initial_price + np.cumsum(price_changes)",
"def _showPrice(propName):\r\n amount = int(self.game.getTile(self.game.getTileId(propName))['houseCost']/2)\r\n Label(sellFrame, text=f\"Would receive: {amount}\").grid(row=1, column=0, columnspan=2)\r\n Button(sellFrame, text=\"Accept\", command=lambda: self._executeSell(\r\n propName)).grid(row=2, column=0, columnspan=2)",
"def calc_proposed_HF_cost (self):\n self.proposed_HF_cost = np.zeros(self.project_life)\n fuel_cost = self.diesel_prices + self.cd['heating fuel premium']# $/gal\n wood_price = self.cd['cordwood price']\n # are there ever o&m costs\n # $/gal * gal/yr = $/year\n self.proposed_HF_cost += \\\n self.proposed_fuel_Hoil_consumption * fuel_cost +\\\n self.proposed_fuel_biomass_consumption * wood_price",
"def calculate_price_volume_WhartonData(df,\n new_cols_subset=data_settings.NEW_COLS_SUBSET, # todo: rm\n target_subset=None,\n ):\n\n data = df.copy()\n # data = data[raw_df_cols_subset] # to deprecate\n data['ajexdi'] = data['ajexdi'].apply(lambda x: 1 if x == 0 else x) # todo-?\n # ajexdi is the \"daily adjustment factor\"; Adjusted close price = PRCCD/AJEXDI\n # https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&ved=2ahUKEwiJxPKSnurvAhUmxIUKHUmHBV8QFjABegQIERAD&url=https%3A%2F%2Fwrds-www.wharton.upenn.edu%2Fdocuments%2F1402%2FCompustat_Global_in_WRDS_the_basics.pptx&usg=AOvVaw1_EFVBLEqobE1mltGZXrQd\n\n # this is the (mandatory) base columns subset\n base_subset = ['datadate', 'tic']\n\n # if we didn't specify a target_subset, the subset of columns to be returned with this function,\n # we automatically by default create a subset based on the base_subset columns and the new columns created below\n # we discard the \"old\" columns we used to create the new ones, however if we wanted, we could specify above\n # in the target_subset that we want to kep these columns as well by passing a lst of all the columns we want to keep\n if target_subset is None:\n target_subset = base_subset + new_cols_subset\n\n # calculate adjusted closing price # todo-?\n if \"adjcp\" in target_subset:\n data['adjcp'] = data['prccd'] / data['ajexdi']\n # calculate opening price # todo-?\n if \"open\" in target_subset:\n data['open'] = data['prcod'] / data['ajexdi']\n # calculate intraday high price # todo-?\n if \"high\" in target_subset:\n data['high'] = data['prchd'] / data['ajexdi']\n # calculate intraday low price # todo-?\n if \"low\" in target_subset:\n data['low'] = data['prcld'] / data['ajexdi']\n # calculate daily trading volume # todo-?\n if \"volume\" in target_subset:\n data['volume'] = data['cshtrd']\n\n #data = data[target_subset]\n data = data.sort_values(['tic', 'datadate'], ignore_index=True)\n return data",
"def getPe(km,p_ref=100000.):\n ae, be = getEdge(km)\n return (ae + p_ref * be)",
"def get_price(self):\n\t\treturn self._price_p_night"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test google email and link expiration are in id_token for a linked account
|
def test_google_id_token_not_linked(oauth_test_client):
data = {"confirm": "yes"}
oauth_test_client.authorize(data=data)
tokens = oauth_test_client.token()
id_token = jwt.decode(
tokens.id_token, options={"verify_signature": False}, algorithms=["RS256"]
)
assert id_token["context"]["user"].get("google") is None
|
[
"def test_google_id_token_linked(db_session, encoded_creds_jwt, oauth_test_client):\n user_id = encoded_creds_jwt[\"user_id\"]\n proxy_group_id = encoded_creds_jwt[\"proxy_group_id\"]\n\n original_expiration = 1000\n google_account = \"some-authed-google-account@gmail.com\"\n\n # add google account and link\n existing_account = UserGoogleAccount(email=google_account, user_id=user_id)\n db_session.add(existing_account)\n db_session.commit()\n g_account_access = UserGoogleAccountToProxyGroup(\n user_google_account_id=existing_account.id,\n proxy_group_id=proxy_group_id,\n expires=original_expiration,\n )\n db_session.add(g_account_access)\n db_session.commit()\n\n # get google account info with utility function\n assert get_linked_google_account_email(user_id) == google_account\n assert get_linked_google_account_exp(user_id) == original_expiration\n\n # get the id token through endpoint\n data = {\"confirm\": \"yes\"}\n oauth_test_client.authorize(data=data)\n tokens = oauth_test_client.token()\n id_token = jwt.decode(\n tokens.id_token, options={\"verify_signature\": False}, algorithms=[\"RS256\"]\n )\n\n assert \"google\" in id_token[\"context\"][\"user\"]\n assert (\n id_token[\"context\"][\"user\"][\"google\"].get(\"linked_google_account\")\n == google_account\n )\n assert (\n id_token[\"context\"][\"user\"][\"google\"].get(\"linked_google_account_exp\")\n == original_expiration\n )",
"def test_get_email_link(self):\n self.get_using_valid_provider(\"email\")",
"def test_get_linkedin_link(self):\n self.get_using_valid_provider(\"linkedin\")",
"def test_confirmation_token(app, users):\n user = users[0][\"obj\"]\n token = generate_confirmation_token(user)\n # Valid\n expired, invalid, token_user = confirm_email_token_status(token)\n assert expired is False and invalid is False and token_user is user\n # Expired\n time.sleep(4)\n expired, invalid, token_user = confirm_email_token_status(token)\n assert expired is True and invalid is False and token_user is user",
"def linkGoogle():\n\tform = LinkGoogleForm()\n\t# Check if the Service-Provider is Facebook\n\tif form.validate_on_submit() and g.loginWith == 'Facebook' and g.currentUser['googleId'] is None:\n\t\tgoogleToken = GoogleModel.getTokenValidation(app.config['GOOGLE_CLIENT_ID'], form.token.data)\n\t\tif googleToken and googleToken['sub'] == form.googleId.data:\n\t\t\t# Continue only if the account doesn't exist yet.\n\t\t\tif not GoogleModel.doesUserExist(form.googleId.data):\n\t\t\t\tif GoogleModel.linkToUserId(g.currentUser['_id'], form.googleId.data):\n\t\t\t\t\treturn json.dumps({'result':'OK'}), 200\n\t\t\telse:\n\t\t\t\treturn abort(403)\n\t\telse:\n\t\t\treturn abort(401)\n\n\treturn abort(400)",
"def test_get_new_access_token(self):\n # get new access token and confirm side effects\n self.authorizer._get_new_access_token()\n self.on_refresh.assert_called_once()\n self.assertNotEqual(self.access_token, self.authorizer.access_token)\n\n # confirm AuthClient is still usable with new token\n get_res = self.tc.get_endpoint(GO_EP1_ID)\n self.assertEqual(get_res[\"id\"], GO_EP1_ID)",
"def test_generate_token_service_account(self):\n pass",
"def is_valid_gcal_access_token(token):\n # Build URL for token info API\n token_info = requests.get(\n '{0}?access_token={1}'.format(GOOGLE_TOKEN_INFO_API, token))\n\n # Return true if google access token still valid\n return token_info.ok and token_info.json()['expires_in'] > 100",
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def notify_expired_link(self, link_id: UUID):\n self._send_message({\n self._EVENT_TYPE: self._EXPIRED_LINK,\n self._LINK_ID: str(_not_falsy(link_id, 'link_id'))\n })",
"def test_google_user_fixture(google_id_token, user):\n u = user_for_google_id_token(google_id_token)\n assert u.id == user.id",
"def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)",
"def test_expiration(incoming, expected):\n assert BackingInstance(\n access_token_expiration=incoming).is_expired == expected",
"def test_request_another_access_token(self):\r\n request_token = self._obtain_request_token()\r\n self._request_authorization(request_token)\r\n request_token = self._update_token_from_db(request_token)\r\n self._obtain_access_token(request_token)\r\n\r\n parameters = self._make_access_token_parameters(request_token)\r\n response = self.c.get(\"/oauth/access_token/\", parameters)\r\n self.assertEqual(response.status_code, 400)\r\n self.assertEqual(response.content, 'Invalid request token.')",
"def test_invalid_verification_link(self):\n user = User.objects.get()\n token, uid = RegistrationAPIView.send_account_activation_email(user=user, send_email=False)\n\n # create the uid from a different username\n uid = urlsafe_base64_encode(force_bytes(\"invalid_username\")).decode(\"utf-8\")\n\n response = self.verify_account(token, uid)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.get()\n # Ensure the user is not verified\n self.assertFalse(user.is_verified)",
"def test_getExternalEmail(self):\n email = self.reset.getExternalEmail(self.loginAccount)\n self.assertEquals(email, 'joe@external.com')",
"def test_full_update_access_token(self):\n pass",
"def test_calendar_ical_sharing_link_get(self):\n pass",
"def test_getclassified_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n 'ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test google email and link expiration are in id_token for a linked account
|
def test_google_id_token_linked(db_session, encoded_creds_jwt, oauth_test_client):
user_id = encoded_creds_jwt["user_id"]
proxy_group_id = encoded_creds_jwt["proxy_group_id"]
original_expiration = 1000
google_account = "some-authed-google-account@gmail.com"
# add google account and link
existing_account = UserGoogleAccount(email=google_account, user_id=user_id)
db_session.add(existing_account)
db_session.commit()
g_account_access = UserGoogleAccountToProxyGroup(
user_google_account_id=existing_account.id,
proxy_group_id=proxy_group_id,
expires=original_expiration,
)
db_session.add(g_account_access)
db_session.commit()
# get google account info with utility function
assert get_linked_google_account_email(user_id) == google_account
assert get_linked_google_account_exp(user_id) == original_expiration
# get the id token through endpoint
data = {"confirm": "yes"}
oauth_test_client.authorize(data=data)
tokens = oauth_test_client.token()
id_token = jwt.decode(
tokens.id_token, options={"verify_signature": False}, algorithms=["RS256"]
)
assert "google" in id_token["context"]["user"]
assert (
id_token["context"]["user"]["google"].get("linked_google_account")
== google_account
)
assert (
id_token["context"]["user"]["google"].get("linked_google_account_exp")
== original_expiration
)
|
[
"def test_google_id_token_not_linked(oauth_test_client):\n data = {\"confirm\": \"yes\"}\n oauth_test_client.authorize(data=data)\n tokens = oauth_test_client.token()\n id_token = jwt.decode(\n tokens.id_token, options={\"verify_signature\": False}, algorithms=[\"RS256\"]\n )\n assert id_token[\"context\"][\"user\"].get(\"google\") is None",
"def test_get_email_link(self):\n self.get_using_valid_provider(\"email\")",
"def test_get_linkedin_link(self):\n self.get_using_valid_provider(\"linkedin\")",
"def test_confirmation_token(app, users):\n user = users[0][\"obj\"]\n token = generate_confirmation_token(user)\n # Valid\n expired, invalid, token_user = confirm_email_token_status(token)\n assert expired is False and invalid is False and token_user is user\n # Expired\n time.sleep(4)\n expired, invalid, token_user = confirm_email_token_status(token)\n assert expired is True and invalid is False and token_user is user",
"def linkGoogle():\n\tform = LinkGoogleForm()\n\t# Check if the Service-Provider is Facebook\n\tif form.validate_on_submit() and g.loginWith == 'Facebook' and g.currentUser['googleId'] is None:\n\t\tgoogleToken = GoogleModel.getTokenValidation(app.config['GOOGLE_CLIENT_ID'], form.token.data)\n\t\tif googleToken and googleToken['sub'] == form.googleId.data:\n\t\t\t# Continue only if the account doesn't exist yet.\n\t\t\tif not GoogleModel.doesUserExist(form.googleId.data):\n\t\t\t\tif GoogleModel.linkToUserId(g.currentUser['_id'], form.googleId.data):\n\t\t\t\t\treturn json.dumps({'result':'OK'}), 200\n\t\t\telse:\n\t\t\t\treturn abort(403)\n\t\telse:\n\t\t\treturn abort(401)\n\n\treturn abort(400)",
"def test_get_new_access_token(self):\n # get new access token and confirm side effects\n self.authorizer._get_new_access_token()\n self.on_refresh.assert_called_once()\n self.assertNotEqual(self.access_token, self.authorizer.access_token)\n\n # confirm AuthClient is still usable with new token\n get_res = self.tc.get_endpoint(GO_EP1_ID)\n self.assertEqual(get_res[\"id\"], GO_EP1_ID)",
"def test_generate_token_service_account(self):\n pass",
"def is_valid_gcal_access_token(token):\n # Build URL for token info API\n token_info = requests.get(\n '{0}?access_token={1}'.format(GOOGLE_TOKEN_INFO_API, token))\n\n # Return true if google access token still valid\n return token_info.ok and token_info.json()['expires_in'] > 100",
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def notify_expired_link(self, link_id: UUID):\n self._send_message({\n self._EVENT_TYPE: self._EXPIRED_LINK,\n self._LINK_ID: str(_not_falsy(link_id, 'link_id'))\n })",
"def test_google_user_fixture(google_id_token, user):\n u = user_for_google_id_token(google_id_token)\n assert u.id == user.id",
"def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)",
"def test_expiration(incoming, expected):\n assert BackingInstance(\n access_token_expiration=incoming).is_expired == expected",
"def test_request_another_access_token(self):\r\n request_token = self._obtain_request_token()\r\n self._request_authorization(request_token)\r\n request_token = self._update_token_from_db(request_token)\r\n self._obtain_access_token(request_token)\r\n\r\n parameters = self._make_access_token_parameters(request_token)\r\n response = self.c.get(\"/oauth/access_token/\", parameters)\r\n self.assertEqual(response.status_code, 400)\r\n self.assertEqual(response.content, 'Invalid request token.')",
"def test_invalid_verification_link(self):\n user = User.objects.get()\n token, uid = RegistrationAPIView.send_account_activation_email(user=user, send_email=False)\n\n # create the uid from a different username\n uid = urlsafe_base64_encode(force_bytes(\"invalid_username\")).decode(\"utf-8\")\n\n response = self.verify_account(token, uid)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.get()\n # Ensure the user is not verified\n self.assertFalse(user.is_verified)",
"def test_getExternalEmail(self):\n email = self.reset.getExternalEmail(self.loginAccount)\n self.assertEquals(email, 'joe@external.com')",
"def test_full_update_access_token(self):\n pass",
"def test_calendar_ical_sharing_link_get(self):\n pass",
"def test_getclassified_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n 'ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles GET requests. Specifically handles "/restaurant" path to print restaurant names.
|
def do_GET(self):
try:
if self.path.endswith("/restaurants"):
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
all_restaurants = session.query(Restaurant).all()
output = "<html><body><h3>All Restaurants:</h3>"
for restaurant in all_restaurants:
restaurant_url = str(restaurant.id) + "/" + restaurant.name.replace("'", "").replace(" ", "")
output += "<p>" + restaurant.name
output += "<br><a href=\"/restaurants/edit/" + restaurant_url + "\">Edit</a>"
output += "<br><a href=\"/restaurants/delete/" + restaurant_url + "\">Delete</a>"
output += "</p>"
output += "<h3><a href=\"/restaurants/create\">Create New Restaurant</a></h3>"
output += "</body></html>"
self.wfile.write(output.encode())
print(output)
return
elif "edit" in self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
output = "<html><body>Enter new restaurant name:<br>"
all_restaurants = session.query(Restaurant).all()
edit_restaurant = int(str.split(self.path, "/")[-2])
old_restaurant = ""
for restaurant in all_restaurants:
if restaurant.id == edit_restaurant:
old_restaurant = restaurant.name
output += "<form method=\"POST\">" \
"<input name=\"edit_name\" type=\"text\" action=\"d" + self.path + "\" " \
"value=\"" + old_restaurant + "\"><input type=\"submit\" value=\"Submit\"></form>"
output += "</body></html>"
self.wfile.write(output.encode())
elif "delete" in self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
output = "<html><body>Are you sure?"
output += "<br><form method=\"POST\">" \
"<input name=\"delete\" type=\"radio\" value=\"No\" checked>No<br>" \
"<input name=\"delete\" type=\"radio\" value=\"Yes\">Yes<br>" \
"<input type=\"submit\" value=\"Submit\">" \
"</form></body></html>"
self.wfile.write(output.encode())
print("Delete")
elif "create" in self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
output = "<html><body>Enter the name of your new restaurant:<br>" \
"<form method=\"POST\">" \
"<input name=\"create\" type=\"text\"><input type=\"submit\" value=\"Submit\">" \
"</form></body></html>"
self.wfile.write(output.encode())
except IOError:
self.send_error(404, "File Not Found: {}".format(self.path))
|
[
"def do_GET(self):\n path = self.path\n name = path[1:] # strip the leading slash\n \n # if the path is the name of a known pokemon, get its html string and construct the response:\n if name in self.pokemon_dictionary:\n self.send_response(http.HTTPStatus.OK)\n self.send_header(\"Content-type\", 'text/html')\n self.end_headers()\n html = self.boilerplate_prefix + self.pokemon_dictionary[name][0].html_body() + self.boilerplate_suffix\n self.wfile.write(html.encode('utf-8')) \n \n else:\n self.send_error(http.HTTPStatus.NOT_FOUND, \"Pokemon Not Found\".format(self.path))",
"def do_GET(self) -> None:\n logging.info('%s - %s', self.requestline, self.client_address)\n path = urlparse(self.path).path\n if path == '/probe':\n prometheus_client.MetricsHandler.do_GET(self)\n else:\n server.SimpleHTTPRequestHandler.do_GET(self)",
"def restaurant_detail(restaurant_id):\n try:\n restaurant = db_session.query(Restaurant).filter_by(id=restaurant_id).one()\n address = db_session.query(RestaurantAddress).filter_by(id=restaurant_id).one()\n items = db_session.query(MenuItem).filter_by(restaurant_id=restaurant.id)\n return render_template(\"restaurant/restaurantownpage.html\", restaurant=restaurant, items=items, address=address)\n\n except:\n return render_template(\"restaurant/restaurants.html\", error=\"Restaurant Not Found\")",
"def do_GET(self):\n self._set_response(400, 'text/html')\n self.wfile.write(b\"no GET requests\")",
"def restaurant_finder():\n restaurant_name = request.args.get('restaurantName')\n zip_code = request.args.get('zipCode')\n radius_in_meters = request.args.get('radius')\n\n if zip_code == '':\n geo_locator = Nominatim(user_agent='myapplication')\n location = geo_locator.geocode(zip_code, country_codes=\"US\")\n lat = location.raw['lat']\n lon = location.raw['lon']\n results = restaurant_search.find_many_by_zip_code(restaurant_name, int(radius_in_meters), lat, lon)\n else:\n restaurant_prepared_statement = {'name': {'$regex': restaurant_name, \"$options\": \"i\"}}\n results = restaurant_search.find_many_by_name(restaurant_prepared_statement)\n return jsonify(results)",
"def show_restaurants():\n restaurants = session.query(Restaurant).all()\n return render_template(\"restaurants.html\", restaurants=restaurants)",
"def get_flights_service_handler(self, req):\n # Build array of flight names\n flights = [flight[0] for flight in self._available_flights]\n\n # Return flights\n return GetFlightsResponse(flights)",
"def restaurants():\n return render_template('bestrestaurants.html')",
"def api_menu(name=None, r_id=None):\n if 'restaurant_id' in request.args:\n r_id = request.args.get('restaurant_id')\n if 'restaurant' in request.args:\n name = request.args.get('restaurant')\n if name:\n # Retrieve menu items by the given restaurant name.\n try:\n recs = app.q_MenuItem().join(Restaurant).filter_by(name=name)\n except NoResultFound:\n return jsonify(error='Restaurant not found.'), 400\n except MultipleResultsFound:\n resp = jsonify(error='Multiple restaurants found. Use ID instead.')\n return resp, 400\n else:\n # Retrieve menu items by the restaurant ID.\n recs = app.q_MenuItem().filter_by(restaurant_id=r_id)\n # Convert database objects to serializable dict objects.\n recs_json = [each.sdict for each in recs]\n return jsonify(menu=recs_json)",
"def index():\n if request.method == \"POST\":\n\n ingredient_list = request.values.get('ingredient')\n\n try:\n response = api.get_ingredients(ingredient_list)\n single_recipe = random.choice(response[\"matches\"])\n\n except: # silencing all errors - bad!\n ERRORS.append(\"Something went wrong!\")\n return render_template(\n \"index.html\", recipe=single_recipe, recipes=response, errors=ERRORS\n )\n else:\n return render_template(\"index.html\")",
"def get(self, request, *args, **kwargs):\r\n query = request.GET.get('q')\r\n if not query:\r\n query = \"\"\r\n hotels = Hotel.objects.filter(\r\n city__name__icontains=query\r\n )\r\n return render(request, self.template_name, {'hotels':hotels, 'query':query})",
"def test_request_handler_working_get_path(self):\n\t\t\n\t\tenviron = create_environ('/index', 'GET')\n\t\tresponse = self.app.request_handler(environ, lambda x, y: None)\n\t\tself.assertEqual(response, 'test')",
"def restaurants_api():\n restaurants = session.query(Restaurant).all()\n response = jsonify(\n restaurants=[restaurant.serialize for restaurant in restaurants]\n )\n\n return response",
"def get(self, request, slug, *args, **kwargs):\n\n queryset = Recipe.objects.filter(status=1)\n recipe = get_object_or_404(queryset, slug=slug)\n reviews = recipe.reviews.filter(approved=True).order_by('created_on')\n favourited = False\n if recipe.favourites.filter(id=self.request.user.id).exists():\n favourited = True\n\n return render(\n request,\n 'recipe_detail.html',\n {\n \"recipe\": recipe,\n \"reviews\": reviews,\n \"reviewed\": False,\n \"favourited\": favourited,\n \"review_form\": ReviewForm()\n\n },\n )",
"def describe_restaurant(self):\r\n print(self.restaurant_name.title() + \" serves \" + \r\n self.cuisine_type.title() + \".\")",
"def _HandleGet(self, short_url):\n raise web.HTTPError(405)",
"def index():\n\t# We have received the query string, display the results\n\tif \"last_name\" in request.args or \"first_name\" in request.args:\n\t\tfirst_name = request.args[\"first_name\"] if \"first_name\" in request.args else None\n\t\tlast_name = request.args[\"last_name\"] if \"last_name\" in request.args else None\n\t\t\n\t\t# Get the directory and filter the entries based on the keyword, then sort them\n\t\txml = generate_directory_xml(ews.yield_filtered_contacts(app.client, first_name=first_name, last_name=last_name))\n\t# If we haven't received the query string, display the search menu\n\telse:\n\t\txml = generate_search_xml()\n\treturn app.response_class(xml, mimetype='text/xml')",
"def get(self, url_path):\n # Check that the ShortURL is valid.\n short_url = yield self._CheckShortURL(url_path)\n\n # Invoke the derived class to handle the request.\n self._HandleGet(short_url, **short_url.json)",
"def getReviews(self, res_name, entity_id = 0, entity_type = \"\"):\n self.logger.info(\"Restaurant review for : %s\", res_name)\n res_review = []\n res_id = 0\n if entity_id == 0 and not entity_type:\n zomato_url = \"https://developers.zomato.com/api/v2.1/search?q=\"+res_name\n else:\n zomato_url = \"https://developers.zomato.com/api/v2.1/search?entity_id=\"+str(entity_id)+\"&entity_type=\"+entity_type+\"&q=\"+res_name\n\n resp = requests.get(zomato_url,headers=self.headers)\n resp_dict = json.loads(resp.text)\n restaurants = (resp_dict['restaurants'])\n #print (\"Found restaurants : \",restaurants)\n\n for r in restaurants:\n print (r['restaurant']['name'])\n # Sometimes the queries will contains results where the Restaurant\n # name is part of the address. So check specifically for the name\n if res_name == r['restaurant']['name']:\n zomato_dict = {}\n res_id = r['restaurant']['R']['res_id']\n self.logger.info(\"For %s, Restaurant ID = %d\", res_name, res_id)\n zomato_dict['fbcard_name'] = r['restaurant']['name']\n zomato_dict['fbcard_subtitle'] = \"Votes : \" + str(r['restaurant']['user_rating']['votes']) + \"\\n\" + \"Average Cost for Two : \" + str(r['restaurant']['average_cost_for_two'])\n zomato_dict['fbcard_url'] = r['restaurant']['url']\n zomato_dict['fbcard_photo'] = r['restaurant']['featured_image']\n menu_url = r['restaurant']['menu_url']\n review_url = menu_url.replace(\"menu\", \"reviews\", 1)\n #self.logger.info(\"Review URL = %s\", review_url)\n zomato_dict['button_url'] = review_url\n zomato_dict['button_title'] = \"Rating: \" + r['restaurant']['user_rating']['aggregate_rating'] + \"/5 (\" + r['restaurant']['user_rating']['rating_text'] + \")\"\n res_review.append(zomato_dict)\n\n return res_review"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attempts to resolve the given absolute or relative ``path``. If it doesn't exist as is, tries to create an absolute path using the ``relative_prefix``. If that fails, tries relative/absolute versions with each of ``possible_extensions``.
|
def resolve_possible_paths(path, relative_prefix, possible_extensions=None, leading_underscore=False):
possible_extensions = [''] + list(possible_extensions) if possible_extensions else ['']
possible_paths = [path + e if os.path.isabs(path + e) else os.path.join(relative_prefix, path + e)
for e in possible_extensions]
if leading_underscore and not os.path.basename(path).startswith('_'):
extra_paths = [os.path.join(os.path.dirname(p), '_' + os.path.basename(p))
for p in possible_paths]
possible_paths = possible_paths + extra_paths
for p in possible_paths:
p = os.path.normpath(p)
if os.path.isfile(p):
return p
return None
|
[
"def resolvePath(path):\n global prefix\n if os.path.isabs(path):\n return path\n return os.path.abspath(os.path.join(prefix, path))",
"def _resolve_relative_path(self, path):\n if not os.path.isabs(path):\n return os.path.join(self._relpath_root, path)\n else:\n return path",
"def resolve(self, *args):\n rtn = os.path.join(self.__base, *args)\n if not self.__exists(rtn):\n raise Exception(\"Bad path: %s (base: %s)\" % (rtn, self.__base))\n return rtn",
"def resolve_path(path, prefix=None, normpath=True):\n sys_prefix = sys.prefix\n site = distutils.sysconfig.get_python_lib()\n path = path.replace('<prefix>', sys_prefix).replace('<site>', site)\n\n def repl_env(mobj):\n name = mobj.group(0)[2:-1]\n default = None\n if '|' in name:\n name, default = name.split('|', 1)\n value = os.environ.get(name, default)\n if value is not None:\n return value\n warnings.warn(\n f'resolve_path: environment variable {name} not defined.',\n stacklevel=4)\n return mobj.group(0)\n\n path = re.sub(r'[$][{]([a-zA-Z_][a-zA-Z_\\d]*([|][^}]*|))[}]',\n repl_env, path)\n if not os.path.isabs(path) and prefix is not None:\n path = os.path.join(prefix, path)\n if normpath:\n path = os.path.normpath(path)\n return path",
"def adjust_rel_path(path, prefix):\n result = path\n if not os.path.isabs(path):\n new_path = os.path.relpath(os.path.join(prefix, path))\n # if the does not exist we do nothing. There are three cases:\n # - path was already fixed and therefore cannot be found\n # - it is a url for a git repo\n # - the original path is broken\n if os.path.exists(new_path):\n # docker wants the relpath to start with a dot\n result = os.path.join('.', new_path)\n return result",
"def _resolve_include(\n path: str, include_dirs: Sequence[pathlib.Path], mount_point: str\n) -> pathlib.Path:\n if path.startswith(\"/\"):\n full_path = pathlib.Path(path)\n if not full_path.exists():\n raise ValueError(f\"Cannot find include path {path}\")\n return full_path\n if path.endswith(EXT):\n path = path[: -len(EXT)]\n possible_includes = []\n mount_point = mount_point.strip(\".\")\n if mount_point:\n include_dirs = list(include_dirs) + [\n p / mount_point.replace(\".\", \"/\") for p in include_dirs\n ]\n for include_path in include_dirs:\n full_path = include_path / (path + EXT)\n if full_path.exists():\n return full_path\n elif full_path.parent.exists():\n possible_includes.extend(\n str(p.resolve())[len(str(include_path.resolve())) : -len(EXT)].lstrip(\"/\")\n for p in full_path.parent.iterdir()\n if str(p).endswith(EXT)\n )\n\n err_msg = f\"Cannot find include {path}\"\n if possible_includes:\n err_msg += \". Possible typo, known includes:\\n%s\" % \"\\n\".join(possible_includes)\n raise ValueError(err_msg)",
"def resolve_path(path):\n urls = [(r'^$', books),\n (r'^book/(id[\\d]+)$', book)]\n matchpath = path.lstrip('/')\n for regexp, func in urls:\n match = re.match(regexp, matchpath)\n if match is None:\n continue\n args = match.groups([])\n return func, args\n # we get here if no url matches\n raise NameError",
"def resolved_path(path_or_uri_like):\n if \"://\" not in path_or_uri_like:\n return path_or_uri_like\n elif path_or_uri_like.startswith(\"file://\"):\n return path_or_uri_like[len(\"file://\"):]\n else:\n return UNRESOLVED_URI",
"def resolve_path(path, parser, context):\n # Find out what type it is:\n # If it contains any / or {{ resolving as django var\n # is going to throw an error. Prevent unneeded exception, just skip\n # rendering as var in that case.\n path_resolved = \"\"\n if not in_list([\"{\", \"}\", \"\\\\\", \"/\"], path):\n compiled_filter = parser.compile_filter(strip_quotes(path))\n path_resolved = compiled_filter.resolve(context)\n # if resolved filename is empty, resolution failed, just treat this\n # param as a filepath\n if path_resolved == \"\":\n filename = strip_quotes(path)\n else:\n filename = path_resolved\n # if there are {{}}'s in there, try to substitute this with url\n # parameter given in the url\n filename = substitute(filename, context[\"request\"].GET.items())\n # If any {{parameters}} are still in filename they were not replaced.\n # This filename is missing information, show this as error text.\n if re.search(r\"{{\\w+}}\", str(filename)):\n missed_parameters = re.findall(r\"{{\\w+}}\", str(filename))\n found_parameters = context[\"request\"].GET.items()\n if not found_parameters:\n found_parameters = \"None\"\n error_msg = (\n \"I am missing required url parameter(s) %s, url parameter(s) found: %s \"\n \"\" % (missed_parameters, found_parameters)\n )\n raise PathResolutionException(error_msg)\n\n return filename",
"def expand_relative(path, basepath):\n path = os.path.expanduser(path)\n if os.path.isabs(path):\n return os.path.realpath(path) # Return canonical path, already absolute.\n if not os.path.isabs(basepath):\n basepath = os.path.realpath(os.path.abspath(basepath))\n joined_path = os.path.realpath(os.path.abspath(os.path.join(basepath, path)))\n return joined_path",
"def resolve_symlink(path):\n if not is_windows():\n # Only does this dance on Windows.\n return path\n parts = os.path.normpath(path).split(os.path.sep)\n for i in range(2, len(parts)):\n partial = os.path.sep.join(parts[:i])\n if os.path.isfile(partial):\n with open(partial) as f:\n link = f.read()\n assert '\\n' not in link and link, link\n parts[i-1] = link\n return os.path.normpath(os.path.sep.join(parts))",
"def _get_absolute_file_path(self, path=None):\n\n def raise_for_absolute_path(absolute_path):\n\n if len(absolute_path.suffixes) != 1:\n raise RuntimeError('Paths with multiple suffixes should be weeded out.')\n\n if absolute_path.suffix != '.py':\n if absolute_path.suffix in EXTENSIONS:\n raise UnsupportedFileType(absolute_path)\n else:\n raise UnknownFileType(absolute_path)\n\n if not absolute_path.parent.exists():\n raise SettingFileDirDoesNotExist(absolute_path.parent)\n\n if not absolute_path.exists():\n raise SettingFileDoesNotExist(absolute_path)\n\n # This can happen if the original path points to a directory with a file\n # like name (i.e. a directory named folder.py).\n if not absolute_path.is_file():\n raise SettingFileIsNotFilePath(absolute_path)\n\n file_path = path or self.file_path()\n base_path = self.absolute_base_path()\n\n if base_path:\n if file_path.is_absolute():\n try:\n file_path.relative_to(self.absolute_base_path())\n except ValueError:\n raise SettingFileLoadError(file_path, detail=(\n \"The path %s is not relative to the absolute base path %s.\"\n % (str(file_path), str(self.absolute_base_path()))\n ))\n else:\n raise_for_absolute_path(file_path)\n return file_path\n else:\n try:\n relative_path = file_path.relative_to(self.base_path())\n except ValueError:\n absolute_file_path = file_path.absolute()\n try:\n relative_path = absolute_file_path.relative_to(self.absolute_base_path())\n except ValueError:\n # If the filepath is not absolute and cannot be found to be relative\n # to the base path or absolute base path, we append it to the\n # base path.\n file_path = self.base_path().joinpath(file_path)\n absolute_file_path = file_path.absolute()\n raise_for_absolute_path(absolute_file_path)\n return absolute_file_path\n else:\n # Example:\n # absolute_base_path = '/root/users/app/settings'\n # file_path = 'app/settings/development/dev.py'\n # absolute_file_path = '/root/users/app/settings/development/dev.py'\n # relative_path = 'development/dev.py'\n absolute_file_path = self.absolute_base_path().joinpath(relative_path)\n raise_for_absolute_path(absolute_file_path)\n return absolute_file_path\n else:\n # Example:\n # base_path = 'app/settings'\n # file_path = 'app/settings/development/dev.py'\n # relative_path = 'development/dev.py'\n file_path = self.base_path().joinpath(relative_path)\n absolute_file_path = file_path.absolute()\n raise_for_absolute_path(absolute_file_path)\n return absolute_file_path\n else:\n absolute_file_path = file_path\n if not file_path.is_absolute():\n absolute_file_path = file_path.absolute()\n raise_for_absolute_path(absolute_file_path)\n return absolute_file_path",
"def _resolvePath(self, path):\n obj = __import__(path[0])\n objs = [obj]\n fullpath = path[0]\n for name in path[1:]:\n fullpath += '.' + name\n if not hasattr(obj, name):\n __import__(fullpath) # not very clean, but seems to work\n obj = getattr(obj, name)\n objs.append(obj)\n return objs",
"def resolve_file_path(file_path):\n if not os.path.isfile(file_path):\n # Allow loading config files relative to rltime/configs directory\n base_path = os.path.dirname(rltime.__file__)\n rel_file_path = os.path.join(base_path, \"configs\", file_path)\n if os.path.isfile(rel_file_path):\n return rel_file_path\n return file_path",
"def resolve_path(self, path, tags):\n aliases = QFramework.TQTaggable()\n aliases.setTagString(\"lepch\", tags.getTagStringDefault(\"input.lepch\", \"?\"))\n aliases.setTagString(\"channel\", tags.getTagStringDefault(\"input.channel\", \"?\"))\n aliases.setTagString(\"eatachannel\", tags.getTagStringDefault(\"input.datachannel\", \"?\"))\n aliases.importTagsWithoutPrefix(tags, \"alias.\")\n aliases.importTagsWithoutPrefix(tags, \"input.\")\n\n return aliases.replaceInText(path)",
"def is_relative_to(path: pathlib.Path, base: pathlib.Path) -> bool:\n try:\n path.relative_to(base)\n return True\n except ValueError:\n return False",
"def _resolve_path_load(self, cdx, is_original, failed_files):\n\n if is_original:\n (filename, offset, length) = (cdx['orig.filename'],\n cdx['orig.offset'],\n cdx['orig.length'])\n else:\n (filename, offset, length) = (cdx['filename'],\n cdx['offset'],\n cdx['length'])\n\n # optimization: if same file already failed this request,\n # don't try again\n if failed_files is not None and filename in failed_files:\n raise ArchiveLoadFailed('Skipping Already Failed', filename)\n\n any_found = False\n last_exc = None\n last_traceback = None\n for resolver in self.path_resolvers:\n possible_paths = resolver(filename)\n\n if possible_paths:\n for path in possible_paths:\n any_found = True\n try:\n return self.record_loader.load(path, offset, length)\n\n except Exception as ue:\n last_exc = ue\n import sys\n last_traceback = sys.exc_info()[2]\n\n # Unsuccessful if reached here\n if failed_files is not None:\n failed_files.append(filename)\n\n if last_exc:\n #msg = str(last_exc.__class__.__name__)\n msg = str(last_exc)\n else:\n msg = 'Archive File Not Found'\n\n raise ArchiveLoadFailed(msg, filename), None, last_traceback",
"def auto_detect_file(parent_paths, relative_path):\n rc = []\n for p in parent_paths:\n candidate = os.path.join(p, relative_path)\n if os.path.isfile(candidate):\n rc.append(p)\n if len(rc) > 1:\n print('SConstruct WARNING: found \"{}\" in multiple ' + \\\n 'root directories:\\n {}\\n' + \\\n ' default to {}'.format(relative_path, rc, rc[0]))\n\n return rc[0], os.path.join(rc[0], relative_path)",
"def resolve(self, s, paths, context, merge):\n if isinstance(s, dict):\n return self.resolve_dict(s, paths, context, merge)\n elif isinstance(s, list):\n return list(self.resolve_list(s, paths, context, merge))\n elif not self.can_resolve(s):\n return s\n\n base = self.resolve_function(s, paths, context, merge)\n\n # If we can string interpolate the result, lets do that\n if isinstance(base, str):\n base = self.resolve_interpolates(base, context)\n\n return base"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Insert a PPI_preview in the database. The id of the PPI_preview is updated
|
def create_ppi_preview(self):
ppi_id = None
sqlObj = _PPIpreview_sql_new()
ppi_id = sqlObj.insert_PPI(self.score_ppi_prev, self.type_ppi_prev, self.fk_couple, self.fk_prot_bact, self.fk_prot_phage)
self.id_ppi_prev = ppi_id
return ppi_id
|
[
"def write_preview(self, previewmeta):\n\n if (isinstance(previewmeta['db_plate_id'], int) and \n (previewmeta['db_plate_id'] > 0)):\n plate_id = previewmeta['db_plate_id']\n else:\n plate_id = self.get_plate_id(previewmeta['plate_num'],\n previewmeta['archive_id'])\n\n if plate_id is None:\n plate_id = self.get_plate_id_wfpdb(previewmeta['wfpdb_id'])\n\n col_list = ['plate_id']\n val_tuple = (plate_id,)\n\n # Add preview_id only if it is given in previewmeta\n if (isinstance(previewmeta['preview_id'], int) and \n (previewmeta['preview_id'] > 0)):\n col_list.append('preview_id')\n val_tuple += (previewmeta['preview_id'],)\n\n # Get preview table columns from database schema\n preview_table = self.get_table_dict('preview')\n del preview_table['plate_id']\n del preview_table['preview_id']\n\n for k in preview_table.keys():\n if k in previewmeta:\n col_list.append(k)\n val_tuple = val_tuple + (previewmeta[k], )\n\n col_str = ','.join(col_list)\n val_str = ','.join(['%s'] * len(col_list))\n\n sql = ('INSERT INTO {} ({}) VALUES ({}) RETURNING preview_id'\n .format(self.table_name('preview'), col_str, val_str))\n preview_id = self.db.execute_query(sql, val_tuple)\n return preview_id",
"def test_preview_save(self):\n\n c = self.c\n example_data = {\n 'content': 'The modified text',\n 'current_revision': '1',\n 'preview': '1',\n # 'save': '1', # probably not too important\n 'summary': 'why edited',\n 'title': 'wiki test'\n }\n\n # test preview\n response = c.post(\n reverse('wiki:preview', kwargs={'path': ''}), # url: '/_preview/'\n example_data\n )\n\n self.assertContains(response, 'The modified text')",
"def preview(record):\n pass",
"def _new_video_inserted(self, video):\n pass",
"def upload_preview(connector, host, key, collectionid, previewfile, previewmetadata):\n\n connector.message_process({\"type\": \"collection\", \"id\": collectionid}, \"Uploading collection preview.\")\n\n logger = logging.getLogger(__name__)\n headers = {'Content-Type': 'application/json'}\n\n # upload preview\n url = '%sapi/previews?key=%s' % (host, key)\n with open(previewfile, 'rb') as filebytes:\n result = requests.post(url, files={\"File\": filebytes},\n verify=connector.ssl_verify if connector else True)\n result.raise_for_status()\n previewid = result.json()['id']\n logger.debug(\"preview id = [%s]\", previewid)\n\n # associate uploaded preview with original collection\n if collectionid and not (previewmetadata and 'section_id' in previewmetadata and previewmetadata['section_id']):\n url = '%sapi/collections/%s/previews/%s?key=%s' % (host, collectionid, previewid, key)\n result = requests.post(url, headers=headers, data=json.dumps({}),\n verify=connector.ssl_verify if connector else True)\n result.raise_for_status()\n\n # associate metadata with preview\n if previewmetadata is not None:\n url = '%sapi/previews/%s/metadata?key=%s' % (host, previewid, key)\n result = requests.post(url, headers=headers, data=json.dumps(previewmetadata),\n verify=connector.ssl_verify if connector else True)\n result.raise_for_status()\n\n return previewid",
"def inserta_imagen(id_pokemon, nombre_pokemon, ruta_imagen):\r\n\r\n\tconn = None\r\n\ttry:\r\n\t\tconn_string = \"host='localhost' dbname='dbpokemon' user='postgres' password='postgres'\"\r\n\t\tconn = psycopg2.connect(conn_string)\r\n\r\n\t\timagen = open(ruta_imagen, 'rb').read()\r\n\t\ttamanio = len(imagen)\r\n\r\n\t\tcursor = conn.cursor()\r\n\t\tcursor.execute(\"INSERT INTO pokemon (id_pokemon, nombre, tamanio_imagen, imagen) VALUES (%s,%s,%s,%s);\",(id_pokemon, nombre_pokemon, tamanio, psycopg2.Binary(imagen)))\r\n\t\tconn.commit()\r\n\t\tcursor.close()\r\n\texcept (Exception, psycopg2.DatabaseError) as error:\r\n\t\tprint(error)\r\n\tfinally:\r\n\t\tif conn is not None:\r\n\t\t\tconn.close()",
"def push_preview(self, **kwargs):\n return _taskpipeoperation(self, 'push_preview', **kwargs)",
"def preview_capture_example():",
"def on_preview_draw(self):\n if self.first_point_input:\n return\n\n input_pnt = self.coord_input.GetCurrentPoint(self.first_point).GetPoint()\n self.draw_preview(input_pnt, False)",
"def preview_resource_page(self):\n\n self.resource_preview.click()",
"def on_preview(self, operation, preview, context, parent):\n operation.run(Gtk.PrintOperationAction.PREVIEW, None)\n return False",
"def sql_insert(self, database):\n\n database.execute(\n '''\n INSERT OR REPLACE INTO franchises (\n franchise,\n name\n ) VALUES (\n ?,\n ?\n )\n ''', (self['franchise'], self['name']))",
"def save_player_db(self):\n players_db = self.player_db\n self.p_id = players_db.insert(self.serialize_player())\n players_db.update({'id': self.p_id}, doc_ids=[self.p_id])",
"def start_preview_stream(self) -> GoProResp:",
"def insert_snippet(self, data):\r\n\r\n template = data['template']\r\n active_view().run_command('insert_snippet', {'contents': template})",
"def set_previewable(previewable):",
"def preview_link(self, preview_link):\n\n self._preview_link = preview_link",
"def publish_pclass(id):\n data = ProjectClass.query.get_or_404(id)\n data.set_published()\n db.session.commit()\n\n return redirect(redirect_url())",
"def save(self):\n if not self._in_db:\n raise ValueError(\n \"Use `sample.save()` to save newly added frames to a sample\"\n )\n\n super().save()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all PPI scores grouped in a array given its couple id
|
def get_ppi_preview_scores_grouped_by_couple_id(couple_id):
list_scores_PPI = []
sqlObj = _PPIpreview_sql_new()
results = sqlObj.select_all_ppi_preview_grouped_by_couple_id(couple_id)
for element in results:
list_scores_PPI.append(int(element[2]))
return list_scores_PPI
|
[
"def _get_scores(self):\n a = numpy.array([x['scores'] for x in self.results])\n return a",
"def score(self, pairs):\n pass",
"def get_rhos(loan_repaid_probs, scores):\n n_scores = len(scores)\n n_groups = len(loan_repaid_probs)\n rhos = np.zeros((n_groups, n_scores))\n for j, s in enumerate(scores):\n for i in range(n_groups):\n rhos[i, j] = loan_repaid_probs[i](s)\n return rhos",
"def get_gene_scores_from_chrom(chrom_array, chrom, all_genes, genes_by_chrom,\n genes_scores):\n try:\n id_list = genes_by_chrom[chrom]\n except KeyError:\n id_list = []\n for id in id_list:\n gene = all_genes[id]\n score = sum(chrom_array[gene.start:gene.end])\n genes_scores[gene.ensembl_id] = score",
"def get_list(person: str, person_to_friends: Dict[str, List[str]], \\\n person_to_networks: Dict[str, List[str]]) -> List[Tuple[str, int]]:\n\n friends_and_scores = []\n\n for p in potential_friends(person_to_friends, person):\n if scoring(person, p, person_to_friends, person_to_networks) >= 1:\n friends_and_scores.append((p, scoring(person, p, person_to_friends, \\\n person_to_networks)))\n return friends_and_scores",
"def get_all_ppi_preview_couple():\n list_scores_PPI_fk_couple = []\n sqlObj = _PPIpreview_sql_new()\n results = sqlObj.select_all_ppi_preview_fk_couples()\n for element in results:\n list_scores_PPI_fk_couple.append(element[0])\n return list_scores_PPI_fk_couple",
"def sum_properties_scores(scores):\n return [sum(t) for t in [x for x in zip(*scores)]]",
"def get_ppmdeviations(self, theomips = True):\r\n ppm_results_list = []\r\n if theomips == True:\r\n template = self.get_theomips()\r\n else:\r\n leadcamel_key = self.get_anchor_results_keys_sorted()[-1]\r\n template = self.anchor_results_dict[leadcamel_key].get_expmips()\r\n keyssorted = self.get_anchor_results_keys_sorted()\r\n for key in keyssorted:\r\n ppm_list = []\r\n expmips = self.anchor_results_dict[key].get_expmips()\r\n for ele in expmips:\r\n expmz = ele[0]\r\n x15N = ele[-1]\r\n for x in template:\r\n if x[-1] == x15N:\r\n ppm = mfbt.Ppmprecision(x[0], expmz).getppmprecision()\r\n ppm_list.append(ppm)\r\n ppm_results_list.append((key, ppm_list))\r\n return ppm_results_list",
"def get_scores(self, player_id=None):\n if player_id is None:\n return self.score\n else:\n return self.order_for_player(player_id, self.score)",
"def build_pairs(code, all_possible):\n pairs = []\n r = 2\n for i, key in enumerate(all_possible.keys()):\n if i < r:\n score = all_possible[key][1]\n for code2 in all_possible[key][0]:\n pairs += [(code, code2, score, key)]\n return pairs",
"def unzip_scores(score):\n return np.array([i for i, j in score]), np.array([j for i, j in score])",
"def mi_score(res):\r\n res = res.sort_values('ID')\r\n score = skm.adjusted_mutual_info_score(meta['Group'], res['Group'])\r\n a = {'score': score, 'nc': len(res.groupby('Group'))}\r\n return a",
"def regression_scores_to_pred(\n accuracy_group_proportions: List[float], scores: List[float]\n) -> List[int]:\n assert sum(accuracy_group_proportions) == pytest.approx(1.0)\n\n accumulated_percentile = 0\n bounds = []\n for i in range(3):\n accumulated_percentile += accuracy_group_proportions[i]\n bounds.append(np.percentile(scores, accumulated_percentile * 100))\n\n def classify(score):\n if score <= bounds[0]:\n return 0\n elif score <= bounds[1]:\n return 1\n elif score <= bounds[2]:\n return 2\n else:\n return 3\n\n return list(map(classify, scores))",
"def get_result_as_tuples(self) -> List[Tuple[discord.Member, int]]:\n tuples = [(member, self.score_dict[member]) for member in self.all_members]\n tuples.sort(key=lambda x: x[1], reverse=True) # TODO extra handling for case that a score appears twice or more\n return tuples",
"def get_model_scores(pred_boxes):\n model_score = {}\n for img_id, val in pred_boxes.items():\n for score in val['scores']:\n if score not in model_score.keys():\n model_score[score] = [img_id]\n else:\n model_score[score].append(img_id)\n return model_score",
"def _get_best_scores(self):\n a = numpy.array([x['best_scores'] for x in self.results])\n return a",
"def aggregate_points(tournament_results):\n # each of these is a dict of events to dicts of points by school\n # {Mixed Foil: {SLO: 3, Cal: 42}}\n results = [tournament_points(tournament) for tournament in tournament_results]\n\n # get list of all events in case events differ between tournaments\n # shouldn't be the case for ncifl but whatever.\n events = set()\n for result in results:\n for event in result.keys():\n events.add(event)\n\n all_results = dict(zip(events, [{} for i in xrange(len(events))]))\n for result in results:\n for event in result:\n # combine with the results from this tournament\n for school in result[event]:\n try:\n all_results[event][school] += result[event][school]\n except KeyError:\n all_results[event][school] = result[event][school]\n\n print_ordered(all_results)\n\n totals = {}\n for event in all_results:\n for school in all_results[event]:\n try:\n totals[school] += all_results[event][school]\n except KeyError:\n totals[school] = all_results[event][school]\n\n rank = 1\n for score in sort_dict_by_val(totals):\n print rank, score\n rank += 1",
"def get_tfidf_scores(tfidf: TfIdf) -> list[dict[str, float]]:\n return [\n {term: tfidf(document_id, term) for term in tfidf.terms}\n for document_id in range(tfidf.nr_documents)\n ]",
"def scores_by_class(self):\n return self.map_results(lambda x: set(x['bounding_box_classes']) & set(x['ground_truth_classes']),\n f1_score)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all PPI preview couple treated
|
def get_all_ppi_preview_couple():
list_scores_PPI_fk_couple = []
sqlObj = _PPIpreview_sql_new()
results = sqlObj.select_all_ppi_preview_fk_couples()
for element in results:
list_scores_PPI_fk_couple.append(element[0])
return list_scores_PPI_fk_couple
|
[
"def getPronunciations(self):\n pass",
"def data_for_paragraph_selector(self): #TODO maybe, if you're bored and there is another lockdown, rename this.\n result = []\n for point in self.data:\n # supp_facts = set([fact[0] for fact in point[\"supporting_facts\"]])\n\n supp_facts_detailed = {}\n for fact in point[\"supporting_facts\"]:\n if supp_facts_detailed.get(fact[0]):\n supp_facts_detailed[fact[0]].append(fact[1])\n else:\n supp_facts_detailed[fact[0]] = [fact[1]]\n result.append([\n point[\"_id\"],\n supp_facts_detailed, # we used to use supp_facts here\n point[\"question\"],\n point[\"context\"],\n point[\"answer\"]\n ])\n return result",
"def sample_pareto_fronts(self, num_of_design_samples=5,\n num_of_gp=5,\n num_of_design_points=1000, verbose=False):\n import design\n Y_p = []\n for _ in xrange(num_of_design_samples):\n X_design = design.latin_center(num_of_design_points, self.X.shape[1])\n Y = []\n for m in self.surrogates:\n _m = copy.copy(m)\n _m.Gaussian_noise.variance.unconstrain()\n _m.Gaussian_noise.variance.fix(1e-8)\n y = _m.posterior_samples(X_design, size=num_of_gp, full_cov=True)\n Y.append(y)\n Y = np.array(Y)\n for i in xrange(Y.shape[2]):\n if verbose:\n print 'sampling pareto', _, i\n idx = get_idx_of_observed_pareto_front(Y[:, :, i].T)\n y_p = Y[:, idx, i].T\n Y_p.append(y_p)\n return Y_p",
"def appearance_reconstructions(self):\n if self.appearance_parameters:\n return [self.fitter.appearance_model.instance(w)\n for w in self.appearance_parameters]\n else:\n return [self.fitter.template for _ in self.shapes]",
"def pvs(self):\n pv_types = ('readback', 'readset', 'setpoint')\n pv_names = (self.readback, self.readset, self.setpoint)\n pv_dict = dict(zip(pv_types, pv_names))\n return {k: v for k, v in pv_dict.items() if v is not None}",
"def dialogPrms(self):\n return self.parametersWidget_dialog.results()",
"def pnf_info(self) -> List[PnfInfoIm]:\n return self._pnf_info",
"def find_container_p_tags(document):\n ret = []\n\n for p_tag in document.findAll('p'):\n if has_img_tag(p_tag):\n ret.append(p_tag)\n return ret",
"def get_available_structure_predictions( self ):\n _check_type(self)\n return _get_available(self, \"psipred_\")",
"def get_primals(self):\n pass",
"def create_ppi_preview(self):\n ppi_id = None\n sqlObj = _PPIpreview_sql_new()\n\n ppi_id = sqlObj.insert_PPI(self.score_ppi_prev, self.type_ppi_prev, self.fk_couple, self.fk_prot_bact, self.fk_prot_phage)\n self.id_ppi_prev = ppi_id\n return ppi_id",
"def preview_capture_example():",
"def remove_PPI_preview_by_protein_id(id_protein):\n sqlObj = _PPIpreview_sql_new()\n id_couple = sqlObj.remove_PPI_preview_by_prot_id(id_protein)\n return id_couple",
"def selected_gpencil_frames(self, context):\n ctrl_points = set()\n for o in context.selected_objects:\n if o.type == 'GPENCIL':\n for l in o.data.layers:\n for f in l.frames:\n if f.select:\n ctrl_points.add(f.frame_number)\n return sorted(ctrl_points)",
"def select_proposals_with_visible_keypoints(proposals):\n ret = []\n all_num_fg = []\n selection_masks = []\n for proposals_per_image in proposals:\n # If empty/unannotated image (hard negatives), skip filtering for train\n if len(proposals_per_image) == 0:\n ret.append(proposals_per_image)\n continue\n gt_keypoints = proposals_per_image.gt_keypoints.tensor\n # #fg x K x 3\n vis_mask = gt_keypoints[:, :, 2] >= 1\n xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1]\n proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4\n kp_in_box = (\n (xs >= proposal_boxes[:, :, 0])\n & (xs <= proposal_boxes[:, :, 2])\n & (ys >= proposal_boxes[:, :, 1])\n & (ys <= proposal_boxes[:, :, 3])\n )\n selection = (kp_in_box & vis_mask).any(dim=1)\n selection_idxs = torch.nonzero(selection).squeeze(1)\n all_num_fg.append(selection_idxs.numel())\n ret.append(proposals_per_image[selection_idxs])\n selection_masks.append(selection)\n\n storage = get_event_storage()\n storage.put_scalar(\"keypoint_head/num_fg_samples\", np.mean(all_num_fg))\n return ret, selection_masks",
"def getFaceToPossibilities(self):\n\t\traise NotImplementedError()",
"def rep_pra_infos(self):\n return self._rep_pra_infos",
"def preview_patterns(is_ctrl_held, grid, pattern_name):\n\n pos = pygame.mouse.get_pos()\n\n if is_ctrl_held:\n pattern = grid.preview(pos, pattern_name)\n else:\n pattern = grid.preview(pos)\n\n return pattern, pos",
"def internal_episodes(self):\n comp = self.composition\n return self.properties[comp] if comp != None else []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the max ppi score obtained in the DB
|
def get_max_ppi_score():
list_scores_PPI_fk_couple = []
sqlObj = _PPIpreview_sql_new()
results = sqlObj.select_all_score_PPI()
for element in results:
list_scores_PPI_fk_couple.append(element[2])
max_value = max(list_scores_PPI_fk_couple)
return max_value
|
[
"def max_objective_score(self):\r\n return self.data.maxObjectivePlayerScore",
"def personal_best(self) -> int:\n return max(self._scores)",
"def get_max_score(self):\n return sum(self.maxpoints.values())",
"def max_team_score(self):\r\n return self.data.maxTeamObjective",
"def get_p_max(self, query_id):\n k_q = len(self.data['queries'][query_id][0]['values'])\n k_r = len(self.data['reference'][0]['values'])\n p_max = np.floor(min(k_q, k_r)/abs(k_q - k_r)) if abs(k_q - k_r) > 0 else k_r\n return p_max",
"def max_raw_score(self):\n if self._max_raw_score is None:\n self._max_raw_score = self.matrix.max(axis=0).sum()\n return self._max_raw_score",
"def get_highest_score(self):\n highest_scored_topic = models.Topic.objects.order_by('-score').first()\n if not highest_scored_topic:\n return 0 + self.HIGHEST_SCORE_ADDITION\n else:\n return highest_scored_topic.score + self.HIGHEST_SCORE_ADDITION",
"def getBestScore(self):\n return self.bestScore",
"def _calc_max(self):\n return np.max(self.get_points()) + 1",
"def _get_best_score(self):\n a = numpy.array([x['best_scores'][-1] for x in self.results])\n return a",
"def max_combat_score(self):\r\n return self.data.maxCombatPlayerScore",
"def get_global_p_max(self):\n p_maxs = [self.get_p_max(query_id) for query_id in self.data['queriesID']]\n return int(min(p_maxs))",
"def points_max(self):\n if self._games is None:\n raise TypeError('games has not been set')\n return self._games['points'].max()",
"def max_total_score(self):\n return self._max_total_score",
"def GetMaxPoint(self):\n ...",
"def get_max_sid():\r\n # Now I need to get the max sid so I can continue to increment\r\n q_string = \"\"\"\r\n SELECT max(sid)\r\n FROM summary;\r\n \"\"\"\r\n try:\r\n cursor.execute(q_string)\r\n result = cursor.fetchall()\r\n except:\r\n print(\"ERROR: Could not fetch max sid from summary table\")\r\n sys.exit()\r\n \r\n if result[0][0] is None:\r\n result = 0\r\n else:\r\n result = result[0][0]\r\n\r\n return result",
"def score(hand):\n #score list keeps a record of the possible summed totals\n \n #max_score will find the max out of score list and be returned\n score_list = []\n max_score = 0\n \n for dummy_num in hand:\n x = hand.count(dummy_num)\n score_for_num = x * dummy_num\n score_list.append(score_for_num)\n \n #print score_list\n score_list.sort()\n #print \"Max score is\"\n max_score = score_list[-1]\n #print score_list[-1]\n \n return max_score",
"def max_total_rps(self):\n\n max_tested = self.max_tested_total_rps\n return max([max_tested, self.highest_recorded_rps, self.max_tested_rps])",
"def get_max_score(friend_score_list: List[Tuple[str, int]]) -> int:\n\n max = 0\n for t in friend_score_list:\n if t[1] > max:\n max = t[1]\n return max"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the ppi score given the bacterium and phage protein ids
|
def get_number_ppi_score_by_bact_phage_prots(fk_prot_bac, fk_prot_phage):
sqlObj = _PPIpreview_sql_new()
results = sqlObj.count_ppi_preview_by_ids_ppi(fk_prot_bac, fk_prot_phage)
return results[0][0]
|
[
"def score(PDBfile):\n from pro_angle import find_residue\n from Bio.PDB.PDBParser import PDBParser\n from pro_length import length\n import os\n import string\n\n score = 0 #initialize \n pars = PDBParser(PERMISSIVE = 1)\n struct = pars.get_structure(PDBfile.rstrip('.pdb'), PDBfile)\n model = struct.child_list[0]\n chain = model.child_list[0]\n\n score = float(0)\n size=length(chain)\n\n for res_index in range(1, size-2): #not first or last res\n res = chain.child_list[res_index]\n cur = res.resname\n pre = chain.child_list[res_index-1].resname\n pos = chain.child_list[res_index+1].resname\n \n filename = pre + '_' + cur + '_' + pos + '.scr'\n \n table_file = '/home/marciovm/proteins/bdtrimers/' + string.lower(cur) + '/' + filename\n\n chain_index = chain.child_list[res_index].id[1]\n\n table = load_scores(table_file)\n if table != 0:\n new = score_help(chain, chain_index, table)\n else:\n new = 0\n score = score + new \n try:\n score = (score/size)*1000 #normalize score\n return score\n except ZeroDivisionError:\n print \"calculated protein length 0 -> returning score 0\"\n score = 0\n return score",
"def test_pvalue(self):\n file_path = os.path.normpath('{}/{}'.format(BASE_DIR,\n 'test_data/entrez_sample.txt'))\n f = open(file_path, 'r')\n sample = pandas.read_csv(f, header='infer', sep='\\t')\n sample = sample.set_index(keys=sample.columns[0])\n f.close()\n # prepare signatures\n sig_path = os.path.normpath('{}/{}'.format(BASE_DIR,\n '/test_sigs/tgfb_upDown.txt'))\n sig = open(sig_path, 'r')\n sigs = pandas.read_csv(sig, header='infer', sep='\\t')\n sig.close()\n # subset the data for up and down\n up = sigs[sigs['upDown'] == 'up']\n down = sigs[sigs['upDown'] == 'down']\n # get a list of ids\n up = list(up['EntrezID'])\n down = list(down['EntrezID'])\n p = permutate(sample=sample, n_up=50, n_down=50,\n reps=10)\n scores = score(up_gene=up, down_gene=down,sample=sample)\n\n\n self.assertIsInstance(empiricalpval(permutations=p, score=scores),\n pandas.DataFrame)",
"def calc_protein_mass(P):\n\n P = P.upper()\n total_mass = 0\n\n for protein in P:\n total_mass += AA_MASS.get(protein)\n\n return total_mass",
"def Peirce_score(contingency):\n \n numer_1 = contingency.where(contingency.reference_category == contingency.comparison_category) \\\n .sum(dim=('reference_category','comparison_category'), skipna=True) / \\\n _sum_contingency(contingency, 'total')\n\n numer_2 = (_sum_contingency(contingency, 'reference') * \\\n _sum_contingency(contingency, 'comparison')).sum(dim='category', skipna=True) / \\\n (_sum_contingency(contingency, 'total')**2)\n\n denom = 1 - (_sum_contingency(contingency, 'reference')**2).sum(dim='category', skipna=True) / \\\n (_sum_contingency(contingency, 'total')**2)\n\n return ((numer_1 - numer_2) / denom).rename('Peirce_score')",
"def calculate_perturbation_propensity(protein, source_residues):",
"def birth_paradox(i, p):\n\tparcial = 0.0\n\tfor t in range(i):\n\t\tbirths = random_births(p)\n\t\tif has_duplicates(births):\n\t\t\tparcial += 1\n\tresult = (parcial / i) * 100.0\n\treturn result",
"def pr(text, profile):\n nuc_loc = {'A':0, 'C':1, 'G':2, 'T':3}\n p = 1\n \n for j, nucleotide in enumerate(text):\n\t p *= profile[j][nuc_loc[nucleotide]]\n\n return p",
"def bp_fitch_score(tree, genotypes):\n ts = tree.tree_sequence\n bp_tree = Bio.Phylo.read(io.StringIO(tree.as_newick()), \"newick\")\n records = [\n Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(str(genotypes[j])), id=f\"n{u}\")\n for j, u in enumerate(ts.samples())\n ]\n alignment = Bio.Align.MultipleSeqAlignment(records)\n ps = Bio.Phylo.TreeConstruction.ParsimonyScorer()\n return ps.get_score(bp_tree, alignment)",
"def pI(pkalist,chargelist):\n\t#Check if Only acidic or basic groups are present.\n\tif len(unique(chargelist))== 0: #No pKa groups present.\n\t\tpI = 21\n\telif len(unique(chargelist))== 1: # Only one type is present\n\t\tif chargelist[0] == 0: #Only acidic groups are present\n\t\t\tpI = -42\n\t\telif chargelist[0] == 1: #Only basic groups are present\n\t\t\tpI = 42 #42 The answer to everything ;-)\n\telse:\n\t\t#Find pI by simulation in the pH range 0 to 0\n\t\tchargecol = []\n\t\tfor i in range(0,1400):\n\t\t\tph= i/100.\n\t\t\tchargecol.append(charge(ph,pkalist,chargelist)) #Calculate charge\n\t\tpI = argmin(np_abs(chargecol))/100. # Simply taking the smallest absolute value, and dividing the index with 100\n\t#print \"pI %.1f\"%pI\n\treturn pI",
"def _calc_matching_prob(self):\n if not self.professional:\n return 1",
"def score(self, pairs):\n pass",
"def score_proportion(ingredients, proportion):\n # Wrap intermediate functions\n return multiply_properties_scores(\n sum_properties_scores(\n score_properties_for_proportion(ingredients, proportion)))",
"def get_pscore(self, score):\n\n cache = tools.Cache()\n if self.halfmove % 2 == 0:\n white = 1\n else:\n white = -1\n if cache(re.search(r\"cp ([-\\d]+)\", score)):\n return white * int(cache.output.groups()[0]) / 100\n if cache(re.search(r\"mate ([-\\d]+)\", score)):\n if \"-\" in cache.output.groups()[0]:\n return white * -999\n else:\n return white * 999\n return 0",
"def probs_test(self):\n\t\talignment = '0-0 1-1 2-2 4-3 3-4'\n\t\tsentence = 'a b c d e'\n\t\tlabels = dict(zip([(i,i+1) for i in xrange(5)] + [(0,5),(1,5),(0,3),(3,5),(2,5),(0,2),(1,3)],['0','1','2','4','3','A','B','C','D','E','F','G']))\n\t\ta = Alignments(alignment,sentence)\n\t\tHAT_dict = a.HAT_dict(labels)\n\t\tprobs = {}\n\t\th = HATGrammar(HAT_dict, 'A')\n\t\th.probmass('A', probs = probs)\n\t\tassert probs == {('B', 'G', 'D'): 1, ('A', 'F', 'E'): 1, ('3', 'e'): 1, ('2', 'c'): 1, ('0',): 1, ('2',): 1, ('A', '0', 'B'): 2, ('4',): 1, ('A',): 5, ('C',): 2, ('1', 'b'): 1, ('E',): 1, ('G',): 1, ('E', '2', 'D'): 1, ('C', 'F', '2'): 1, ('B', '1', 'E'): 1, ('C', '0', 'G'): 1, ('1',): 1, ('G', '1', '2'): 1, ('3',): 1, ('F', '0', '1'): 1, ('D', '4', '3'): 1, ('0', 'a'): 1, ('B',): 2, ('D',): 1, ('4', 'd'): 1, ('A', 'C', 'D'): 2, ('F',): 1}\n\t\treturn True",
"def ft_compute_pmi(word1, word2, word_count, pair_count):\n proba_word1 = word_count[word1] / sum(word_count.values())\n proba_word2 = word_count[word2] / sum(word_count.values())\n # we use setdefault to avoid KeyErrors if the key is not in the dictionary\n combined_proba = (\n pair_count.setdefault((word1, word2), 0) +\n pair_count.setdefault((word2, word1), 0)) /\\\n sum(pair_count.values())\n try:\n return log(float(combined_proba) / float(proba_word1 * proba_word2), 2)\n except BaseException:\n return 0",
"def spearman_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None):\n if db_gene_names is not None:\n data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_gene_names)\n data_subset = input_data[data_gene_ids]\n results = []\n for cell_type_name, data in db_data.items():\n if db_gene_names is not None:\n db_data_subset = data[db_gene_ids]\n elif db_gene_data is not None:\n db_genes = db_gene_data[cell_type_name].astype(str)\n data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_genes)\n data_subset = input_data[data_gene_ids]\n db_data_subset = data[db_gene_ids]\n score, pval = scipy.stats.spearmanr(data_subset, db_data_subset)\n results.append((cell_type_name, score, pval))\n results.sort(key=lambda x: x[1], reverse=True)\n return results",
"def run_pairwise_comp(self, ref_df):\n\n #List of lists\n ref_df_peps = self._get_all_peptides_from_df(ref_df) #Extract high affinity peptides\n score_dict_per_len = self._get_protein_dict_per_len(self.filt_dfs, ref_df_peps) #Create scores dictionary\n\n for prot_name in self.original_proteins:\n\n prot_seq = self.original_proteins_df.ProtSeq[self.original_proteins_df.ID == prot_name].values[0]\n ranges = self.original_proteins_df.Ranges[self.original_proteins_df.ID == prot_name].values[0]\n #Ranges: index data about the location of high affinity peptides in protein being used for comparison\n #Ranges_2: make shallow list from deep list of lists\n ranges_2 = [item for sublist in [i[0] for i in ranges] for item in sublist]\n\n matches_range = []\n\n for list_pep in ref_df_peps:\n for single_pep in list_pep:\n\n high_aa_count = 0\n pep_len = len(single_pep)\n count = prot_seq.count(single_pep) #Number of times a single pep occurs in the entire prot seq\n\n if count > 0: #Find locations where matches occur\n it = re.finditer(single_pep, prot_seq)\n\n for i in it:\n present_range = list(range(i.start(), i.end()))\n if set(present_range).issubset(set(ranges_2)):\n high_aa_count += 1\n matches_range.append(present_range) #Retain match location data\n\n self._update_dict_values_per_len(score_dict_per_len, prot_name, count,\n pep_len, high_aa_count, matches_range)\n\n return score_dict_per_len",
"def _calculate_bigram_proba(self) -> None:\n for w1 in self._bigram_model.keys():\n words2 = self._bigram_model[w1]\n total_freq = 0\n for w2, val in words2.items():\n total_freq += val[\"freq\"]\n for w2, val in words2.items():\n val[\"proba\"] = val[\"freq\"] / total_freq\n return",
"def test_pid_prec2():\n d = bivariates['sum']\n pid = PID_Prec(d, [[0], [1]], [2])\n assert pid[((0,), (1,))] == pytest.approx(0.5, abs=1e-3)\n assert pid[((0,),)] == pytest.approx(0.0, abs=1e-3)\n assert pid[((1,),)] == pytest.approx(0.0, abs=1e-3)\n assert pid[((0, 1),)] == pytest.approx(1.0, abs=1e-3)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
remove a PPI_preview given the protein id
|
def remove_PPI_preview_by_protein_id(id_protein):
sqlObj = _PPIpreview_sql_new()
id_couple = sqlObj.remove_PPI_preview_by_prot_id(id_protein)
return id_couple
|
[
"def del_plate(self):\n removed_plate = Plate(self._args.plate_id, plate=self.plates.pop(self._args.plate_id))\n write_file(self._args.plate_file, self.plates)\n print('Successfully removed the plate!')\n if removed_plate.wells:\n display(removed_plate)",
"def remove(self, (id, item)):\n registry = getUtility(IRegistry)\n ids = registry['collective.cropimage.ids']\n ids = [i for i in ids if i['id'] != id]\n registry['collective.cropimage.ids'] = ids",
"def remove_professor(self, p):\n self.professor = None",
"def remove_person(self, id):\n self.data.drop(self.data[self.data.p==id].index, inplace=True)",
"def removePointMarker(self, id):\n del self.pointmarkers[id]",
"def delete_paragraph(paragraph):\n p = paragraph._element\n p.getparent().remove(p)\n p._p = p._element = None",
"def remove_slide(self, index):\n pass",
"def delete_sketch(request, sketch_id):\n try:\n sketch = Sketch.objects.get(pk=sketch_id)\n if sketch.image:\n sketch.image.delete()\n sketch.delete()\n except Sketch.DoesNotExist:\n pass\n\n messages.error(request, \"sketch deleted\")\n return redirect(\"/browse/\")",
"async def remove_field(self):\n if len(self.preview_embed.fields) == 1:\n index = 0\n else:\n index = await UserInput(self.ctx).ask('index of the field to remove', regex=common.Re.INDEX)\n\n # User has cancelled the input\n if index is None:\n return\n\n self.preview_embed.remove_field(int(index))\n\n await self.stack_to_history()",
"def test_bswap_remove_liquidity_preview():\n\n response = client.bswap_remove_liquidity_preview(**complete_params)\n response.should.equal(mock_item)",
"def remove(item):\n _viewport.remove(item)",
"def remove(self):\n index = self.notebook.page_num(self)\n self.notebook.remove_page(index)",
"def check_delete(self, preview, filename):\n try:\n with storage.open(filename, 'w') as f:\n f.write('sample data\\n')\n assert storage.exists(filename)\n preview.delete()\n assert not storage.exists(filename)\n finally:\n if storage.exists(filename):\n storage.delete(filename)",
"def delete_presentation(self, presentation_id):\r\n\r\n logging.info(\"Deleting Mediasite presentation: \" + presentation_id)\r\n\r\n #request mediasite folder information on the \"Mediasite Users\" folder\r\n result = self.mediasite.api_client.request(\"delete\", \"Presentations('presentation_id')\")\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n #if there is an error, log it\r\n if \"odata.error\" in result:\r\n logging.error(result[\"odata.error\"][\"code\"] + \": \" + result[\"odata.error\"][\"message\"][\"value\"])\r\n\r\n return result",
"async def remove_profilepic(delpfp):\n await delpfp.edit(\"`Processing...`\")\n group = delpfp.text[8:] \n if delpfp.is_channel and not delpfp.is_group:\n await delpfp.edit(\"`delpfp Commad isn't permitted on channels`\")\n return\n if group == 'all':\n lim = 0\n elif group.isdigit():\n lim = int(group)\n else:\n lim = 1\n\n pfplist = await delpfp.client(\n GetUserPhotosRequest(user_id=delpfp.from_id,\n offset=0,\n max_id=0,\n limit=lim))\n input_photos = []\n for sep in pfplist.photos:\n input_photos.append(\n InputPhoto(id=sep.id,\n access_hash=sep.access_hash,\n file_reference=sep.file_reference))\n await delpfp.client(DeletePhotosRequest(id=input_photos))\n await delpfp.edit(\n f\"`Successfully deleted {len(input_photos)} profile picture(s).`\")",
"def delete(self, id):\r\n return eliminar_color(id)",
"def delete_example(self, id):\n if id in self.examples:\n del self.examples[id]",
"def stop_preview_stream(self) -> GoProResp:",
"def remove(tag):\n ierr = c_int()\n lib.gmshViewRemove(\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshViewRemove returned non-zero error code: \",\n ierr.value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
replace image's pixel in specific area
|
def replace(self, img, dst_clr):
for i in range(80, 340): #x1 x2
for j in range(500, 800): #y1 y2
img[j][i] = dst_clr
return img
|
[
"def replace_fast(self, img, dst_clr):\n img[535:750, :290, :] = dst_clr #h(y) w(x) c\n img[575:705, 900:, :] = dst_clr\n return img",
"def rescaled_image():",
"def replace(im,colo1,colo2):\n try:\n (x0,y0,x1,y1) = im.getbbox()\n except:\n print \"you need to give me a image handle\"\n for x in range(x0,x1):\n for y in range(y0,y1):\n r,g,b = im.getpixel((x,y))\n if r == colo1[0] and g == colo1[1] and b == colo1[2]:\n im.putpixel((x,y),colo2)\n\n return im",
"def _replace(img, old_color, new_color):\n img_data = img.load()\n for y in range(img.size[1]):\n for x in range(img.size[0]):\n if img_data[x, y] == old_color:\n img_data[x, y] = new_color",
"def set_pixel(x, y, value):\n st7567.set_pixel(x, y, value)",
"def replace2(im,colo1,colo2):\n try:\n (x0,y0,x1,y1) = im.getbbox()\n except:\n print \"you need to give me a image handle\"\n for x in range(x0,x1):\n for y in range(y0,y1):\n r,g,b = im.getpixel((x,y))\n if r != colo1[0] and g != colo1[1] and b != colo1[2]:\n im.putpixel((x,y),colo2)\n\n return im",
"def setPixels(*args, **kwargs):\n \n pass",
"def inpaint(img, missing_value=0):\r\n img = cv2.copyMakeBorder(img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)\r\n mask = (img == missing_value).astype(np.uint8)\r\n\r\n # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.\r\n scale = np.abs(img).max()\r\n img = img.astype(np.float32) / scale # Has to be float32, 64 not supported.\r\n img = cv2.inpaint(img, mask, 1, cv2.INPAINT_NS)\r\n\r\n # Back to original size and value range.\r\n img = img[1:-1, 1:-1]\r\n img = img * scale\r\n\r\n return img",
"def fill(self, value):\n self.image.fill(value)",
"def update_tile_pixel(self, idx, x, y, color):\n self.modified = True\n self.tile_data[idx].set(x,y,color)",
"def exchangerbg(four_channel_img, trhee_channel_img, coord=None):\n mask2 = four_channel_img.copy()\n four_channel_img2=four_channel_img.copy()\n four_channel_img2.paste(trhee_channel_img, coord)#into 4 channel\n array_m = np.array(mask2)[:,:,3]\n array_b = np.array(four_channel_img2)\n array_b[:,:,3] = array_m\n final = Image.fromarray(array_b)\n return final",
"def center_image(original, new_width, new_height):\n new = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True,\n original.get_bits_per_sample(), new_width, new_height)\n new.fill(0x000000)\n offset_x = (new_width / 2) - (original.get_width() / 2)\n offset_y = (new_height / 2) - (original.get_height() / 2)\n original.copy_area(0, 0, original.get_width(), original.get_height(),\n new, offset_x, offset_y)\n return new",
"def zoom(image):",
"def _clear_pixel(self):\r\n self.ap.set_pixel(self.x, self.y, [0, 0, 0])",
"def mark_pixels(image, lower, upper):\n\tcopy = cv.CloneMat(image)\n\n\tfor i in range(image.rows):\n\t\tfor j in range(image.cols):\n\t\t\tvalue = cv.Get2D(image, i, j)\n\t\t\t#print i,j,value\n\t\t\tif lower[0] < value[0] and lower[1] < value[1] and lower[2] < value[2] and upper[0] > value[0] and upper[1] > value[1] and upper[2] > value[2]:\n\t\t\t\tcv.Set2D(copy, i, j, cv.RGB(0,0,0))\t\t\t\n\t\t\telse:\n\t\t\t\tcv.Set2D(copy, i, j, cv.RGB(255,255,255))\t\t\t\n\treturn copy",
"def PutPixle(win, x, y):\r\n pt = Point(x, y)\r\n pt.draw(win)",
"def set_pixel(self, x : int, y : int, value : int):\n startByte, endByte, startBit, endBit = self.find_pixel(x, y)\n \n self.data[startByte:endByte] = set_bits(\n n = int.from_bytes(self.data[startByte:endByte], byteorder = 'little'),\n start = startBit,\n end = endBit,\n value = value & ((2 ** self.pixelBitLength) - 1)\n ).to_bytes(length = endByte - startByte, byteorder = 'little')",
"def add_point(self, x, y):\n try:\n self.image[int(y)][int(x)][0] = 0\n self.image[int(y)][int(x)][2] = 255\n except IndexError:\n pass\n # raise Exception('Invalid coordinates x={0} y={1}'.format(x, y))",
"def copy_image(self): \r\n\r\n for i in range(0, self.width):\r\n for j in range(0, self.height): \r\n self.image_old[i, j] = self.image[i, j]\r\n \r\n return self.image_old"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
fast replace image's pixel in specific area
|
def replace_fast(self, img, dst_clr):
img[535:750, :290, :] = dst_clr #h(y) w(x) c
img[575:705, 900:, :] = dst_clr
return img
|
[
"def replace(self, img, dst_clr):\n for i in range(80, 340): #x1 x2\n for j in range(500, 800): #y1 y2\n img[j][i] = dst_clr\n return img",
"def _replace(img, old_color, new_color):\n img_data = img.load()\n for y in range(img.size[1]):\n for x in range(img.size[0]):\n if img_data[x, y] == old_color:\n img_data[x, y] = new_color",
"def rescaled_image():",
"def replace(im,colo1,colo2):\n try:\n (x0,y0,x1,y1) = im.getbbox()\n except:\n print \"you need to give me a image handle\"\n for x in range(x0,x1):\n for y in range(y0,y1):\n r,g,b = im.getpixel((x,y))\n if r == colo1[0] and g == colo1[1] and b == colo1[2]:\n im.putpixel((x,y),colo2)\n\n return im",
"def update_tile_pixel(self, idx, x, y, color):\n self.modified = True\n self.tile_data[idx].set(x,y,color)",
"def replace2(im,colo1,colo2):\n try:\n (x0,y0,x1,y1) = im.getbbox()\n except:\n print \"you need to give me a image handle\"\n for x in range(x0,x1):\n for y in range(y0,y1):\n r,g,b = im.getpixel((x,y))\n if r != colo1[0] and g != colo1[1] and b != colo1[2]:\n im.putpixel((x,y),colo2)\n\n return im",
"def block(img):\n # FIXME: grid searchowac ten fragment?\n img = exposure.equalize_adapthist(img)\n img = exposure.adjust_gamma(img)\n img = unsharp_mask(img, radius=3, amount=2)\n img = ndimage.uniform_filter(img, size=2)\n return (img * 255).astype(np.uint8)",
"def badPixelRemove(image, dq):\n meanImage = (np.roll(image, 1, axis = 0) + np.roll(image, -1, axis = 0) + np.roll(image, 1, axis = 1) + np.roll(image, -1, axis = 1)) #array that the values are the\n #dqbin = ['{0:016b}'.format(i) for i in dq.flat]\n #isBad = np.array([True if dqstr[-5] == '1' or dqstr[-6] == '1' else False for dqstr in dqbin]).reshape(np.shape(dq))\n image[dq == 40] = meanImage[dq == 40]\n return image",
"def set_pixel(x, y, value):\n st7567.set_pixel(x, y, value)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n #test function is no longer used\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def inpaint(img, missing_value=0):\r\n img = cv2.copyMakeBorder(img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)\r\n mask = (img == missing_value).astype(np.uint8)\r\n\r\n # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.\r\n scale = np.abs(img).max()\r\n img = img.astype(np.float32) / scale # Has to be float32, 64 not supported.\r\n img = cv2.inpaint(img, mask, 1, cv2.INPAINT_NS)\r\n\r\n # Back to original size and value range.\r\n img = img[1:-1, 1:-1]\r\n img = img * scale\r\n\r\n return img",
"def PixelateVariant(img, pixelSize = 1):\r\n\r\n w, h = img.size\r\n if pixelSize < 1: pixelSize = 1\r\n\r\n new = Image.new(\"RGB\", (w,h))\r\n output = new.load()\r\n \r\n widthRemainder = w % pixelSize\r\n heightRemainder = h % pixelSize\r\n\r\n pixels = np.array(img)\r\n\r\n for x in range(pixelSize, w + widthRemainder, pixelSize * 2):\r\n for y in range(pixelSize, h + heightRemainder, pixelSize * 2):\r\n r, g, b = img.getpixel((x, y))\r\n neighbors = []\r\n for xx in range(-pixelSize, pixelSize + 1):\r\n for yy in range(-pixelSize, pixelSize + 1):\r\n if x + xx < 0 or x + xx >= w or y + yy < 0 or y + yy >= h: continue\r\n else:\r\n neighbors.append((y + yy, x + xx))\r\n \r\n for i in neighbors:\r\n output[i[1], i[0]] = (int(r), int(g), int(b))\r\n\r\n return new",
"def pixelMove():\n pass",
"def setNoBlue(img):\n def fn(pixel):\n pixel[BLUE] = 0\n return pixel\n return mapPixels(img, fn)",
"def setPixels(*args, **kwargs):\n \n pass",
"def masked_interpolation(self, method='cubic'):\n bad_pixel_mask = self.bad_pixel_map > 0\n x = np.arange(0, self.image.shape[1])\n y = np.arange(0, self.image.shape[0])\n self.image[bad_pixel_mask] = np.nan\n self.image = np.ma.masked_invalid(self.image)\n xx, yy = np.meshgrid(x, y)\n x1 = xx[~self.image.mask]\n y1 = yy[~self.image.mask]\n newarr = self.image[~self.image.mask]\n assert isinstance(x1, np.ndarray)\n assert isinstance(y1, np.ndarray)\n self.image = interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method=method)",
"def apply_on_image(image, patch, idx, function):\n patch = patch[0] // 2, patch[1] // 2\n for (v, u) in zip(idx[0], idx[1]):\n if (v + patch[0] > image.shape[0]) or (v - patch[0]) < 0:\n continue\n if (u + patch[1] > image.shape[1]) or (u - patch[1]) < 0:\n continue\n image[v - patch[0]:v + patch[0], u - patch[1]:u + patch[1]] = function(\n image[v - patch[0]:v + patch[0], u - patch[1]:u + patch[1]])\n\n return image",
"def copy_image(self): \r\n\r\n for i in range(0, self.width):\r\n for j in range(0, self.height): \r\n self.image_old[i, j] = self.image[i, j]\r\n \r\n return self.image_old",
"def updatePixels():\n new = createImage(width,height,'RGBA')\n color = _getColor((200))\n glClearColor (*color)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n if npy:\n new.pixels = numpy.array(screen.pixels)\n new.updatePixels()\n else: \n for i in range(width*height): new.pixels[i] = screen.pixels[i]\n image(new,0,0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
write images after replace ..
|
def read_write_img(self):
for file in os.listdir(self.path):
filelist = input_path + file
img = cv2.imread(filelist)
dst_img = self.replace_fast(img, (0, 0, 0))
# cv2.imwrite(out_path + file[:-4] + '.jpg', re_img)
plt.subplot(121), plt.imshow(img), plt.title('initial')
plt.subplot(122), plt.imshow(dst_img), plt.title('result')
plt.show()
|
[
"def new_image(image):\n os.replace(image,PICTURES_IN + image)\n return",
"def write_images(self):\n while self.cache:\n # pop the first and write it out\n fn, image = self.cache.pop(0)\n tifffile.imwrite(fn, image)",
"def write_images():\n dataset = NTU_RGB_D(DATASETS_PATH, filetype='pt', preprocess=False)\n dataset.save_images(DATASETS_PATH + 'raw/all/')",
"def rename_imgs(path):",
"def write_debug(img, name, sample):\n cv2.imwrite(f\"output/{sample}-{name}.jpg\", img)",
"def treat_image_append(namefile) :\n tempnameLocation = os.getcwd()+u'/'+namefile\n MetaLex.treatImages.append(tempnameLocation)",
"def reload_image_folder():",
"def save_images(figs, save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n for fig in figs:\n filename = fig.layout.title.text.lower().replace(' ','_')\n file = save_path + '/' + filename + '.webp'\n\n fig.write_image(file)\n\n im = Image.open(file)\n im.show()",
"def create_output_image(img, instances):\n pass",
"def saving_only_annotations(path,img ,xmin, xmax, ymin, ymax,name_damage, img_name):\n name = (path + '/'+ name_damage+\"_\"+img_name+ \"adionis_.jpg\")\n annotation = img[ymin:ymax, xmin:xmax]\n cv2.imwrite(name, annotation)\n print(\"saving image\")",
"def fusionImages(pathBackground,pathOverlay,pathFusioned):\n til = Image.new(\"RGB\",(514,257))\n background = Image.open(pathBackground)\n overlay = Image.open(pathOverlay)\n til.paste(background)\n til.paste(overlay, mask=overlay)\n\n \n til.save(pathFusioned,\"PNG\")\n\n return til",
"def write_image(img, output):\n cv2.imwrite(output, img)",
"def jpg_to_png(self) -> None:\n for img in os.listdir(self.img_path):\n if os.path.splitext(img)[1] == '.jpg':\n img_path = self.img_path + '/' + img\n frame = cv2.imread(img_path)\n index = int(img.replace('.jpg', ''))\n cv2.imwrite(f'{self.img_path}/{os.path.dirname(img)}/image_{index:05d}.png', frame)\n os.remove(img_path)",
"def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix): \n resample = sitk.ResampleImageFilter()\n resample.SetReferenceImage(fixed_image)\n \n # SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results. \n resample.SetInterpolator(sitk.sitkLinear) \n resample.SetTransform(transform)\n sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')\n sitk.WriteTransform(transform, outputfile_prefix+'.tfm')",
"def saveImage(self):\r\n files = listdir(self.out_dir)\r\n filename = \"slicer-{}-output\".format(self.slice_mode)\r\n\r\n counter = 1\r\n while filename + self.props.extension in files:\r\n filename = \"slicer-\" + self.slice_mode + \"-output\" + str(counter)\r\n counter += 1\r\n\r\n fullname = path.join(self.out_dir, filename + self.props.extension)\r\n self.final_img.save(fullname)",
"def update_image_paths(image_map):\n for file in os.listdir(join(ROOT, '_posts')):\n print(f\"Updating image paths in {file}\")\n post_name = splitext(file)[0]\n path = join(ROOT, '_posts', file)\n with open(path) as in_file:\n content = in_file.read()\n images = [i for i, p in image_map.items() if p == post_name]\n for image in images:\n content = content.replace(f'image: {image}',\n f'image: /assets/{post_name}/{image}')\n with open(path, 'w') as out_file:\n out_file.write(content)",
"def WriteImage(self, filename):\r\n cv2.imwrite(filename,self.img)",
"def save_img_list(self, output):\n with open(output, \"w+\") as out:\n out.write(\"\\n\".join(self.imgs))",
"def change_extension_of_image(images_annotations):\n counter = 0\n for image in images_annotations:\n # we need to split image path because it has also folder namd apped with it.\n image_name = image.image.path_name.split('/')[2]\n # '', 'images', '8d02117d-6c71-4e47-b50a-6cc8d5eb1d55.png']\n img = cv2.imread(images_path + image_name)\n if img is None:\n image_name = image_name.split('.')[0] + \".jpg\"\n img = cv2.imread(images_path + image_name)\n # remove first image and then save other because both .png and .jpg file remains in\n # the folder\n os.remove(images_path + image_name)\n # append .png extension on all dataset.\n image_name = image_name.split('.')[0] + \".png\"\n cv2.imwrite(images_path + image_name, img)\n if counter % 20 == 0:\n print(counter)\n counter += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a mapping of elements to reference keys A list is returned, with each element being a dictionary with entries 'refdata' containing data for (possibly) multiple references, and 'elements' which is a list of element Z numbers that those references apply to
|
def compact_references(basis_dict, reffile_path):
ref_data = io.read_references(reffile_path)
element_ref_map = []
# Create a dictionary of elements -> refdata
for el, eldata in basis_dict['basisSetElements'].items():
ref = sorted(eldata['elementReferences'])
for x in element_ref_map:
if x['refkeys'] == ref:
x['elements'].append(el)
break
else:
element_ref_map.append({'refkeys': ref, 'elements': [el]})
for item in element_ref_map:
item['refdata'] = [ref_data[k] for k in item['refkeys']]
item.pop('refkeys')
return element_ref_map
|
[
"def extract_references(elem):\n wos_id = extract_wos_id(elem)\n references = elem.findall('./static_data/fullrecord_metadata/references/reference')\n ref_list = list()\n for reference in references:\n ref_dict = dict()\n for tag in ['uid', 'citedAuthor', 'year', 'page',\n 'volume', 'citedTitle', 'citedWork', 'doi']:\n ref_tag = reference.find(tag)\n if ref_tag is not None:\n ref_dict[tag] = ref_tag.text\n else:\n ref_dict[tag] = ''\n ref_dict.update({'wos_id': wos_id})\n ref_list.append(ref_dict)\n return ref_list",
"def get_reference(reference:list):\n\treturn {email:name for name,email in reference}",
"def to_elements(element_references, doc=revit.doc):\r\n element_references = to_iterable(element_references)\r\n return [to_element(e_ref) for e_ref in element_references]",
"def to_element_ids(element_references):\r\n element_references = to_iterable(element_references)\r\n return [to_element_id(e_ref) for e_ref in element_references]",
"def get_key_to_references_mapping(keys, references):\n key_to_reference = {key: set() for key in keys}\n\n for ref in references:\n if ref in {'default', 'fallback'}:\n continue\n\n expr = get_regex_for_reference(ref)\n matching_keys = [key for key in key_to_reference if expr.match(key)]\n if not matching_keys:\n raise NetworkValidationError(\n \"{} does not match any keys. Possible keys are: {}\".format(\n ref, sorted(key_to_reference.keys())))\n\n for key in matching_keys:\n key_to_reference[key].add(ref)\n\n if 'default' in references:\n for key, refs in key_to_reference.items():\n if not refs:\n refs.add('default')\n\n return key_to_reference",
"def references(self):\n refs = []\n for refel in self.tree.findall('reference'):\n ref = {'citation': refel.text}\n if 'order' in refel.attrib:\n ref['number'] = refel.attrib['order']\n refs.append(ref)\n return refs",
"def __make_nodes_elements_mapping(data):\n\n nodes = {}\n for e in data[\"elements\"]:\n for node in e[\"nodes\"]:\n if node in nodes:\n nodes[node].append(e[\"id\"])\n else:\n nodes[node] = [e[\"id\"]]\n\n return nodes",
"def _get_data_references_cache(self) -> Dict[str, List[BatchDefinition] | None]:\n if len(self._data_references_cache) == 0:\n # Map data_references to batch_definitions.\n for data_reference in self.get_data_references():\n mapped_batch_definition_list: List[\n BatchDefinition\n ] | None = self._map_data_reference_string_to_batch_definition_list_using_regex(\n data_reference=data_reference\n )\n self._data_references_cache[\n data_reference\n ] = mapped_batch_definition_list\n\n return self._data_references_cache",
"def references(self):\n out = []\n cdef INDEX_T i\n for i in range(self.count):\n out.append(self._references[i])\n return out",
"def cross_references(self):\n out = []\n cdef INDEX_T i\n for i in range(self.max_reference+1):\n out.append( self._crossref[i] )\n return out",
"def __mixing_references_on_n(self, list_refs):\n all = {x: [0, ''] for x in set.union(*map(set, list_refs))}\n for ref in list_refs:\n for word, origin in ref.items():\n all[word][0] += 1\n all[word][1] = origin\n return {word: origin for word, (count, origin) in all.items() if count >= self.mix}",
"def references(self):\n out = []\n fields = 'position id doi title authors sourcetitle publicationyear '\\\n 'volume issue first last text fulltext'\n ref = namedtuple('Reference', fields)\n items = self._tail.get('bibliography', {}).get('reference', [])\n if not isinstance(items, list):\n items = [items]\n for item in items:\n info = item['ref-info']\n volisspag = info.get('ref-volisspag', {})\n try:\n auth = info['ref-authors']['author']\n if not isinstance(auth, list):\n auth = [auth]\n authors = [', '.join([d['ce:surname'], d['ce:initials']])\n for d in auth]\n except KeyError: # No authors given\n authors = None\n ids = info['refd-itemidlist']['itemid']\n if not isinstance(ids, list):\n ids = [ids]\n try:\n doi = [d['$'] for d in ids if d['@idtype'] == 'DOI'][0]\n except IndexError:\n doi = None\n new = ref(position=item.get('@id'),\n id=[d['$'] for d in ids if d['@idtype'] == 'SGR'][0],\n doi=doi, authors=authors,\n title=info.get('ref-title', {}).get('ref-titletext'),\n sourcetitle=info.get('ref-sourcetitle'),\n publicationyear=info.get('ref-publicationyear', {}).get('@first'),\n volume=volisspag.get('voliss', {}).get('@volume'),\n issue=volisspag.get('voliss', {}).get('@issue'),\n first=volisspag.get('pagerange', {}).get('@first'),\n last=volisspag.get('pagerange', {}).get('@last'),\n text=info.get('ref-text'),\n fulltext=item.get('ref-fulltext'))\n out.append(new)\n return out or None",
"def __get_cached_elements(self, elements: List[Element],\n connection: Optional[redis.StrictRedis] = None) -> Dict[str, Element]:\n if connection is None:\n connection = get_redis_connection()\n\n redis_elements = {}\n for element in elements:\n element_dict = connection.hgetall(f'{self.__redis_name}:elements:{element.get_id()}')\n if element_dict:\n redis_element = Element.from_dictionary(self.__platform_access, element_dict)\n redis_elements[redis_element.get_id()] = redis_element\n return redis_elements",
"def getReferencePairs(referenceCollection, collectionOfInterest, objName):\n\n # start_time = timeit.default_timer()\n referencePair = []\n matched_indices = hgcalHelpers.getClosestObjectIndices(referenceCollection[['eta', 'phi']], collectionOfInterest[['eta', 'phi']], deltaR=deltaRMaxRef)\n for idx1, idx2 in matched_indices.iteritems():\n objEnergy = 0\n refEnergy = 0\n try:\n refEnergy = referenceCollection.iloc[idx1].energy\n if (objName == \"pfcluster\"):\n objEnergy = collectionOfInterest.iloc[idx2].correctedEnergy\n else:\n objEnergy = collectionOfInterest.iloc[idx2].energy\n except IndexError:\n print \"IndexError\"\n print referenceCollection\n print collectionOfInterest\n else:\n if objEnergy > refEnergy * relativeFractionRef:\n referencePair.append((referenceCollection.iloc[idx1], collectionOfInterest.iloc[idx2]))\n\n # elapsed = timeit.default_timer() - start_time\n # print \"Time:\", elapsed\n return referencePair",
"def refname2key(idf, refname):\n return [item[0] for item in getallobjlists(idf, refname)]",
"def extract_references(self, references):\n\n reference_info = []\n for label in self.nonauthor_references:\n for reference in references.resource_references(self)[label]:\n reference_info.append(\n dict(\n docname=reference.docname,\n label=reference.props.label\n )\n )\n return reference_info",
"def get_references(self, article):\n art_page = article.page\n rlist = art_page.find('ul', {'class': 'rlist separator'}).find_all('li')\n refs = dict()\n for k, r in enumerate(rlist):\n refs[k] = dict()\n \n urls = [i.get('href') for i in r.find_all('a')]\n year = r.find('span', {'class': 'references__year'})\n title = r.find('span', {'class': 'references__article-title'})\n journal = r.find('span', {'class': 'references__source'})\n volume = r.find('i')\n authors = r.find('span', {'class': 'references__authors'})\n \n refs[k]['url'] = [i for i in urls if 'http' in i]\n refs[k]['year'] = int(year.get_text()[:4]) if year else -1\n refs[k]['title'] = title.get_text() if title else \"\"\n refs[k]['volume'] = -1\n if volume:\n if volume.get_text().isnumeric():\n refs[k]['volume'] = int(volume.get_text()) \n refs[k]['journal'] = journal.get_text() if journal else \"\"\n refs[k]['authors'] = authors.get_text().split(', ') if authors else []\n return refs",
"def create_reference_dict(synonyms):\n reference_dict = {}\n\n for word_set in synonyms:\n first_value, second_value = word_set\n\n if first_value in reference_dict:\n reference_dict[first_value].append(second_value)\n else:\n reference_dict[first_value] = [second_value]\n\n if second_value in reference_dict:\n reference_dict[second_value].append(first_value)\n else:\n reference_dict[second_value] = [first_value]\n\n return reference_dict",
"def align_records(records: Dict[str, List[Reference]]) \\\n -> List[List[Tuple[str, Reference]]]:\n # If only one extraction succeeded, there is nothing to do.\n if len(records) == 1:\n extractor = list(records.keys())[0]\n return [[(extractor, ref)] for ref in list(records.values())[0]]\n\n def _jacard_max(r0: Reference, rlist: List[Reference]) -> float:\n # calculate the maximum jacard score between r0 and the list rlist\n return max([jacard(digest(r0), digest(r1)) for r1 in rlist])\n\n cutoff = similarity_cutoff(records)\n # pairwise integrate the lists together, keeping the output list as the\n # master record as we go. 0+1 -> 01, 01+2 -> 012 ...\n # extractors = list(records.keys())\n\n # Start with the largest extraction.\n extractors = [extractor for extractor, records in sorted(records.items(),\n key=lambda extraction: -len(extraction[1]))]\n output = [[(extractors[0], rec)] for rec in records[extractors[0]]]\n for ikey, extractor in islice(enumerate(extractors), 1, len(records)):\n used: List[int] = []\n\n record = records[extractor]\n for iref, ref in enumerate(record):\n # Create a list of possible indices in the output onto which we\n # will map the current reference. only keep those above the cutoff.\n # keep track of the indices to only use each once\n # FIXME -- maybe we don't want to do greedy descent (instead global\n # optimization of scores for all references at once, but that is\n # combinatorial and needs to have careful algorithms)\n scores = []\n for iout, out in enumerate(output):\n score = _jacard_max(ref, [l[1] for l in out])\n if score <= cutoff:\n continue\n scores.append((score, iout))\n\n scores = [\n (score, index) for score, index in reversed(sorted(scores))\n if index not in used\n ]\n\n entry = [(extractor, ref)]\n if scores:\n score, index = scores[0]\n # used.append(index)\n if extractor not in list(zip(*output[index]))[0]:\n output[index] = output[index] + entry\n else:\n output.append(entry)\n else:\n output.append(entry)\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate isocurve from 2D data using marching squares algorithm.
|
def isocurve(data, level, connected=False, extend_to_edge=False):
# This function is SLOW; plenty of room for optimization here.
if extend_to_edge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0, 0] = d2[0, 1]
d2[0, -1] = d2[1, -1]
d2[-1, 0] = d2[-1, 1]
d2[-1, -1] = d2[-1, -2]
data = d2
side_table = [
[],
[0, 1],
[1, 2],
[0, 2],
[0, 3],
[1, 3],
[0, 1, 2, 3],
[2, 3],
[2, 3],
[0, 1, 2, 3],
[1, 3],
[0, 3],
[0, 2],
[1, 2],
[0, 1],
[]
]
edge_key = [
[(0, 1), (0, 0)],
[(0, 0), (1, 0)],
[(1, 0), (1, 1)],
[(1, 1), (0, 1)]
]
level = float(level)
lines = []
# mark everything below the isosurface level
mask = data < level
## make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2, 2), dtype=object)
slices = [slice(0, -1), slice(1, None)]
for i in [0, 1]:
for j in [0, 1]:
fields[i, j] = mask[slices[i], slices[j]]
vertIndex = i+2*j
index += (fields[i, j] * 2**vertIndex).astype(np.ubyte)
# add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = side_table[index[i, j]]
for l in range(0, len(sides), 2): # faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0, 1]: # points in this face
# p1, p2 are points at either side of an edge
p1 = edge_key[edges[m]][0]
p2 = edge_key[edges[m]][1]
# v1 and v2 are the values at p1 and p2
v1 = data[i+p1[0], j+p1[1]]
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
# interpolate between corners
p = (p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5)
if extend_to_edge:
# check bounds
p = (min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)))
if connected:
gridKey = (i + (1 if edges[m] == 2 else 0),
j + (1 if edges[m] == 3 else 0),
edges[m] % 2)
# give the actual position and a key identifying the
# grid location (for connecting segments)
pts.append((p, gridKey))
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
# turn disjoint list of segments into continuous lines
points = {} # maps each point to its connections
for a, b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a, b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b, a])
# rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: # already used this point elsewhere
continue
for chain in chains:
x = None
while True:
if x == chain[-1][1]:
break # nothing left to do on this chain
x = chain[-1][1]
if x == k:
# chain has looped; we're done and can ignore the opposite
# chain
break
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
chain.extend(conn[1:])
del points[x]
if chain[0][1] == chain[-1][1]:
# looped chain; no need to continue the other direction
chains.pop()
break
# extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
# join together ends of chain
chain = chain[1][1:][::-1] + chain[0]
else:
chain = chain[0]
lines.append([pt[0] for pt in chain])
return lines # a list of pairs of points
|
[
"def isocurve(data, level, connected=False, extendToEdge=False, path=False): \n \n if path is True:\n connected = True\n \n if extendToEdge:\n d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)\n d2[1:-1, 1:-1] = data\n d2[0, 1:-1] = data[0]\n d2[-1, 1:-1] = data[-1]\n d2[1:-1, 0] = data[:, 0]\n d2[1:-1, -1] = data[:, -1]\n d2[0,0] = d2[0,1]\n d2[0,-1] = d2[1,-1]\n d2[-1,0] = d2[-1,1]\n d2[-1,-1] = d2[-1,-2]\n data = d2\n \n sideTable = [\n [],\n [0,1],\n [1,2],\n [0,2],\n [0,3],\n [1,3],\n [0,1,2,3],\n [2,3],\n [2,3],\n [0,1,2,3],\n [1,3],\n [0,3],\n [0,2],\n [1,2],\n [0,1],\n []\n ]\n \n edgeKey=[\n [(0,1), (0,0)],\n [(0,0), (1,0)],\n [(1,0), (1,1)],\n [(1,1), (0,1)]\n ]\n \n \n lines = []\n \n ## mark everything below the isosurface level\n mask = data < level\n \n ### make four sub-fields and compute indexes for grid cells\n index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)\n fields = np.empty((2,2), dtype=object)\n slices = [slice(0,-1), slice(1,None)]\n for i in [0,1]:\n for j in [0,1]:\n fields[i,j] = mask[slices[i], slices[j]]\n #vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme\n vertIndex = i+2*j\n #print i,j,k,\" : \", fields[i,j,k], 2**vertIndex\n np.add(index, fields[i,j] * 2**vertIndex, out=index, casting='unsafe')\n #print index\n #print index\n \n ## add lines\n for i in range(index.shape[0]): # data x-axis\n for j in range(index.shape[1]): # data y-axis \n sides = sideTable[index[i,j]]\n for l in range(0, len(sides), 2): ## faces for this grid cell\n edges = sides[l:l+2]\n pts = []\n for m in [0,1]: # points in this face\n p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge\n p2 = edgeKey[edges[m]][1]\n v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2\n v2 = data[i+p2[0], j+p2[1]]\n f = (level-v1) / (v2-v1)\n fi = 1.0 - f\n p = ( ## interpolate between corners\n p1[0]*fi + p2[0]*f + i + 0.5, \n p1[1]*fi + p2[1]*f + j + 0.5\n )\n if extendToEdge:\n ## check bounds\n p = (\n min(data.shape[0]-2, max(0, p[0]-1)),\n min(data.shape[1]-2, max(0, p[1]-1)), \n )\n if connected:\n gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2\n pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)\n else:\n pts.append(p)\n \n lines.append(pts)\n\n if not connected:\n return lines\n \n ## turn disjoint list of segments into continuous lines\n\n #lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]\n #lines = [[(float(a), a), (float(b), b)] for a,b in lines]\n points = {} ## maps each point to its connections\n for a,b in lines:\n if a[1] not in points:\n points[a[1]] = []\n points[a[1]].append([a,b])\n if b[1] not in points:\n points[b[1]] = []\n points[b[1]].append([b,a])\n\n ## rearrange into chains\n for k in list(points.keys()):\n try:\n chains = points[k]\n except KeyError: ## already used this point elsewhere\n continue\n #print \"===========\", k\n for chain in chains:\n #print \" chain:\", chain\n x = None\n while True:\n if x == chain[-1][1]:\n break ## nothing left to do on this chain\n \n x = chain[-1][1]\n if x == k: \n break ## chain has looped; we're done and can ignore the opposite chain\n y = chain[-2][1]\n connects = points[x]\n for conn in connects[:]:\n if conn[1][1] != y:\n #print \" ext:\", conn\n chain.extend(conn[1:])\n #print \" del:\", x\n del points[x]\n if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction\n chains.pop()\n break\n \n\n ## extract point locations \n lines = []\n for chain in points.values():\n if len(chain) == 2:\n chain = chain[1][1:][::-1] + chain[0] # join together ends of chain\n else:\n chain = chain[0]\n lines.append([p[0] for p in chain])\n \n if not path:\n return lines ## a list of pairs of points\n \n path = QtGui.QPainterPath()\n for line in lines:\n path.moveTo(*line[0])\n for p in line[1:]:\n path.lineTo(*p)\n \n return path",
"def soli8s(ex, ey, ez, ep, D, ed):\n\n ir = ep[0]\n ngp = ir*ir*ir\n\n ir = ep[0]\n ngp = ir*ir*ir\n\n if ir == 1:\n g1 = 0.0\n w1 = 2.0\n gp = np.array([g1, g1, g1]).reshape(1, 3)\n w = np.array([w1, w1, w1]).reshape(1, 3)\n elif ir == 2:\n g1 = 0.577350269189626\n w1 = 1\n gp = np.zeros((8, 3))\n w = np.zeros((8, 3))\n gp[:, 0] = np.array([-1, 1, 1, -1, -1, 1, 1, -1])*g1\n w[:, 0] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n gp[:, 1] = np.array([-1, -1, 1, 1, -1, -1, 1, 1])*g1\n w[:, 1] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n gp[:, 2] = np.array([-1, -1, -1, -1, 1, 1, 1, 1])*g1\n w[:, 2] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n else:\n g1 = 0.774596669241483,\n g2 = 0.0\n w1 = 0.555555555555555\n w2 = 0.888888888888888\n\n gp = np.zeros((27, 3))\n w = np.zeros((27, 3))\n\n I1 = np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1]).reshape(1, 9)\n I2 = np.array([0, -1, 0, 0, 1, 0, 0, 1, 0]).reshape(1, 9)\n\n gp[:, 0] = np.concatenate((I1, I1, I1), axis=1)*g1\n gp[:, 0] = np.concatenate((I2, I2, I2), axis=1)*g2 + gp[:, 0]\n\n I1 = np.abs(I1)\n I2 = np.abs(I2)\n\n w[:, 0] = np.concatenate((I1, I1, I1), axis=1)*w1\n w[:, 0] = np.concatenate((I2, I2, I2), axis=1)*w2 + w[:, 0]\n\n I1 = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1]).reshape(1, 9)\n I2 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0]).reshape(1, 9)\n\n gp[:, 1] = np.concatenate((I1, I1, I1), axis=1)*g1\n gp[:, 1] = np.concatenate((I2, I2, I2), axis=1)*g2 + gp[:, 1]\n\n I1 = np.abs(I1)\n I2 = np.abs(I2)\n\n w[:, 1] = np.concatenate((I1, I1, I1), axis=1)*w1\n w[:, 1] = np.concatenate((I2, I2, I2), axis=1)*w2 + w[:, 1]\n\n I1 = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1]).reshape(1, 9)\n I2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]).reshape(1, 9)\n I3 = np.abs(I1)\n\n gp[:, 2] = np.concatenate((I1, I2, I3), axis=1)*g1\n gp[:, 2] = np.concatenate((I2, I3, I2), axis=1)*g2 + gp[:, 2]\n\n w[:, 2] = np.concatenate((I3, I2, I3), axis=1)*w1\n w[:, 2] = np.concatenate((I2, I3, I2), axis=1)*w2 + w[:, 2]\n\n wp = w[:, 0]*w[:, 1]*w[:, 2]\n\n xsi = gp[:, 0]\n eta = gp[:, 1]\n zet = gp[:, 2]\n r2 = ngp*3\n\n N = np.zeros((ngp, 8))\n dNr = np.zeros((r2, 8))\n\n N[:, 0] = (1-xsi)*(1-eta)*(1-zet)/8\n N[:, 1] = (1+xsi)*(1-eta)*(1-zet)/8\n N[:, 2] = (1+xsi)*(1+eta)*(1-zet)/8\n N[:, 3] = (1-xsi)*(1+eta)*(1-zet)/8\n N[:, 4] = (1-xsi)*(1-eta)*(1+zet)/8\n N[:, 5] = (1+xsi)*(1-eta)*(1+zet)/8\n N[:, 6] = (1+xsi)*(1+eta)*(1+zet)/8\n N[:, 7] = (1-xsi)*(1+eta)*(1+zet)/8\n\n dNr[0:r2+1:3, 0] = -(1-eta)*(1-zet)\n dNr[0:r2+1:3, 1] = (1-eta)*(1-zet)\n dNr[0:r2+1:3, 2] = (1+eta)*(1-zet)\n dNr[0:r2+1:3, 3] = -(1+eta)*(1-zet)\n dNr[0:r2+1:3, 4] = -(1-eta)*(1+zet)\n dNr[0:r2+1:3, 5] = (1-eta)*(1+zet)\n dNr[0:r2+1:3, 6] = (1+eta)*(1+zet)\n dNr[0:r2+1:3, 7] = -(1+eta)*(1+zet)\n dNr[1:r2+2:3, 0] = -(1-xsi)*(1-zet)\n dNr[1:r2+2:3, 1] = -(1+xsi)*(1-zet)\n dNr[1:r2+2:3, 2] = (1+xsi)*(1-zet)\n dNr[1:r2+2:3, 3] = (1-xsi)*(1-zet)\n dNr[1:r2+2:3, 4] = -(1-xsi)*(1+zet)\n dNr[1:r2+2:3, 5] = -(1+xsi)*(1+zet)\n dNr[1:r2+2:3, 6] = (1+xsi)*(1+zet)\n dNr[1:r2+2:3, 7] = (1-xsi)*(1+zet)\n dNr[2:r2+3:3, 0] = -(1-xsi)*(1-eta)\n dNr[2:r2+3:3, 1] = -(1+xsi)*(1-eta)\n dNr[2:r2+3:3, 2] = -(1+xsi)*(1+eta)\n dNr[2:r2+3:3, 3] = -(1-xsi)*(1+eta)\n dNr[2:r2+3:3, 4] = (1-xsi)*(1-eta)\n dNr[2:r2+3:3, 5] = (1+xsi)*(1-eta)\n dNr[2:r2+3:3, 6] = (1+xsi)*(1+eta)\n dNr[2:r2+3:3, 7] = (1-xsi)*(1+eta)\n\n dNr = dNr/8.0\n\n ex = np.asarray(ex).reshape((8, 1))\n ey = np.asarray(ey).reshape((8, 1))\n ez = np.asarray(ez).reshape((8, 1))\n\n JT = dNr@np.concatenate((ex, ey, ez), axis=1)\n\n eps = np.finfo(float).eps\n \n eci = N@np.concatenate((ex, ey, ez), axis=1)\n et = np.zeros((ngp, 6))\n es = np.zeros((ngp, 6))\n\n ed = ed.reshape(1, 24)\n\n for i in range(ngp):\n indx = [i*3, i*3+1, i*3+2]\n detJ = np.linalg.det(JT[indx, :])\n if detJ < 10*eps:\n print('Jacobideterminant equal or less than zero!')\n JTinv = np.linalg.inv(JT[indx, :])\n dNx = JTinv@dNr[indx, :]\n\n B = np.zeros((6, 24))\n N2 = np.zeros((3, 24))\n\n B[0, 0:24:3] = dNx[0, :]\n B[1, 1:25:3] = dNx[1, :]\n B[2, 2:26:3] = dNx[2, :]\n B[3, 0:24:3] = dNx[1, :]\n B[3, 1:25:3] = dNx[0, :]\n B[4, 0:24:3] = dNx[2, :]\n B[4, 2:26:3] = dNx[0, :]\n B[5, 1:25:3] = dNx[2, :]\n B[5, 2:26:3] = dNx[1, :]\n\n N2[0, 0:24:3] = N[i, :]\n N2[1, 1:25:3] = N[i, :]\n N2[2, 2:26:3] = N[i, :]\n\n # [6x24] x [24,1]\n ee = B@np.transpose(ed)\n\n et[i, :] = ee.reshape(6,)\n es[i, :] = (D@ee).reshape(6,)\n\n return et, es, eci",
"def _interpolate_naive_S2(pts, val, sigma, x0, step, size):\n offset = interpolation._normalize_values(val)\n \n grid_val = np.zeros(size, dtype=np.float64)\n \n scale = 2*sigma**2\n for j in range(size[0]):\n # compute y-coordinate of grid point\n yc = x0[1] + j*step\n for i in range(size[1]):\n # compute x-coordinate of grid point\n xc = x0[0] + i*step\n \n # use numpy to directly compute numerator and denominator of equ. (1)\n dist = _dist_S2(xc, yc, pts[:,0], pts[:,1])\n weight = np.exp(-dist*dist/scale)\n weighted_sum = np.dot(weight, val)\n weight_total = np.sum(weight)\n \n if weight_total > 0.0:\n grid_val[j,i] = weighted_sum / weight_total + offset\n else:\n grid_val[j,i] = np.NaN\n \n return grid_val",
"def data_generator_simulation1():\n # Target : 1 nuage de point\n nt = 1000\n mu_t = np.array([50, 50])\n cov_t = np.array([[60, 40], \n [40, 60]])\n xt = ot.datasets.make_2D_samples_gauss(nt, mu_t, cov_t)\n\n # Source : 3 nuages de points\n ns1 = 700\n mu_s = np.array([25, 60])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs = ot.datasets.make_2D_samples_gauss(ns1, mu_s, cov_s)\n\n ns2 = 400\n mu_s = np.array([55, 80])\n cov_s = np.array([[30, 10], \n [10, 30]])\n xs=np.append(xs,ot.datasets.make_2D_samples_gauss(ns2, mu_s, cov_s),axis=0)\n\n\n # Compute the distribution laws associate with the clouds of dots.\n ns=ns1+ns2\n a, b = ot.unif(ns), ot.unif(nt) # uniform distribution on samples\n return (xs,a),(xt,b)",
"def quintic_spline_image_filter(IA):\n\n # doesn't work if the image is less than 43pixels wide/high\n if np.shape(IA)[0] < 43:\n raise ValueError(\"number of pixels in x and y must be at least 43\")\n if np.shape(IA)[1] < 43:\n raise ValueError(\"number of pixels in x and y must be at least 43\")\n\n # define coefficients\n scale = 120\n z = [-0.430575347099973, -0.0430962882032647] # poles\n K0_tol = np.spacing(1)\n\n # initialise output\n C = IA * scale * scale\n dims = np.shape(C)\n C_rows = int(dims[0])\n C_cols = int(dims[1])\n # print(type(C_rows))\n\n # start = time.time()\n\n for i in range(2):\n K0 = math.ceil(math.log(K0_tol) / math.log(np.absolute(z[i])))\n indices = np.arange(K0, dtype=np.int32)\n # print(type(indices))\n\n # scaling term for current pole\n C0 = -z[i] / (1 - z[i]**2)\n\n # column wise for each pole\n # apply symmetric filter over each column\n for k in range(C_cols):\n C[:, k] = sym_filt.sym_exp_filt(\n C[:, k], C_rows, C0, z[i], K0, indices)\n\n # row-wise for each pole\n # apply symmetric filter over each column\n for k in range(C_rows):\n C[k, :] = sym_filt.sym_exp_filt(\n C[k, :], C_cols, C0, z[i], K0, indices)\n\n # print(\"time: {}\".format(time.time() - start))\n\n return C",
"def __curve_splicing(self):",
"def interpolate(\n self, from_image, from_points, to_image, to_points, dense_flow=...\n ) -> dense_flow:\n ...",
"def Interpolate(x, y, data):\n\n widthToUse = data.shape[1]\n heightToUse = data.shape[0]\n\n ix=numba.int32(x)\n iy=numba.int32(y)\n\n xIndex = np.zeros((4,), dtype=numba.int32)\n yIndex = np.zeros((4,), dtype=numba.int32)\n\n# Set X indexes\n# p is the index of the rightmost influencing spline\n p = (ix + 2) if (0.0 <= x) else (ix + 1)\n for k in range(4):\n xIndex[k] = -1 if (p<0 or p>=widthToUse) else p\n p -= 1\n\n# Set Y indexes\n p = (iy + 2) if (0.0 <= y) else (iy + 1)\n for k in range(4):\n yIndex[k] = -1 if (p<0 or p>=heightToUse) else p\n p -= 1\n\n\n# Compute how much the sample depart from an integer position\n# [ conditional because int rounds down for positive numbers and up for negative numbers ]\n\n ex = x - ((ix) if (0.0 <= x) else (ix - 1))\n ey = y - ((iy) if (0.0 <= y) else (iy - 1))\n\n xWeight = np.zeros((4,), dtype=numba.float64)\n yWeight = np.zeros((4,), dtype=numba.float64)\n\n\n# Set X weights for the image and derivative interpolation\n for (weight, e) in [(xWeight, ex), (yWeight, ey)]:\n s = 1.0 - e\n weight[0] = 0.5 * e * e * e / 3.0 \n weight[1] = 2.0 / 3.0 - (2.0 - s) * 0.5 * s * s\n weight[2] = 2.0 / 3.0 - (2.0 - e) * 0.5 * e * e \n weight[3] = 0.5 * s * s * s / 3.0 \n\n\n\n ival = 0.0\n for j in range(4):\n s = 0.0\n iy=yIndex[j]\n if iy != -1:\n for i in range(4):\n ix=xIndex[i]\n if ix!=-1:\n s += xWeight[i]*data[iy][ix]\n ival+=yWeight[j] * s\n return ival",
"def match_det2cube_miripsf(alpha_resol, beta_resol, wave_resol,\n naxis1, naxis2, naxis3,\n xcenters, ycenters, zcoord,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n spaxel_alpha, spaxel_beta, spaxel_wave,\n flux,\n coord1, coord2, wave, alpha_det, beta_det,\n rois_pixel, roiw_pixel, weight_pixel, softrad_pixel):\n\n nplane = naxis1 * naxis2\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n lower_limit = softrad_pixel[ipt]\n#________________________________________________________________________________\n # xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n\n xdistance = (xcenters - coord1[ipt])\n ydistance = (ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n\n indexr = np.where(radius <= rois_pixel[ipt])\n indexz = np.where(abs(zcoord - wave[ipt]) <= roiw_pixel[ipt])\n\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for ir, rr in enumerate(indexr[0]):\n#________________________________________________________________________________\n#________________________________________________________________________________\n# if weight is miripsf -distances determined in alpha-beta coordinate system\n\n weights = FindNormalizationWeights(wave[ipt],\n wave_resol,\n alpha_resol,\n beta_resol)\n\n\n cube_index = istart + rr\n\n alpha_distance = alpha_det[ipt] - spaxel_alpha[cube_index]\n beta_distance = beta_det[ipt] - spaxel_beta[cube_index]\n wave_distance = abs(wave[ipt] - spaxel_wave[cube_index])\n\n xn = alpha_distance / weights[0]\n yn = beta_distance / weights[1]\n wn = wave_distance / weights[2]\n\n # only included the spatial dimensions\n wdistance = (xn * xn + yn * yn + wn * wn)\n weight_distance = np.power(np.sqrt(wdistance), weight_pixel[ipt])\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if weight_distance < lower_limit: weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + weight_distance * flux[ipt]\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + weight_distance\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1",
"def generate_calibration(single_pnt_cali_d=508,\n single_pnt_cali_ior=1.6,\n single_pnt_cali_int=1000,\n ior=1.5,\n dr=[110, 3400],\n no_pts=600,\n no_cal_pts=30,\n plot=False,\n raise_error=True,\n test=False\n ):\n dr = np.array(dr)\n\n single_pnt_cali_d *= 1e-3\n rr = dr / 1000.\n cal_d = pd.Series(index=np.logspace(np.log10(rr[0]), np.log10(rr[1]), no_cal_pts + 2)[1:-1])\n # cal_d = pd.Series(index = np.logspace(np.log10(rr[0]), np.log10(rr[1]), no_cal_pts) * 2)\n\n if test:\n return cal_d\n\n d, amp = mie.makeMie_diameter(noOfdiameters=no_pts, diameterRangeInMikroMeter=rr, IOR=ior)\n ds = pd.Series(amp, d)\n if ior == single_pnt_cali_ior:\n ds_spc = ds\n else:\n d, amp = mie.makeMie_diameter(noOfdiameters=no_pts, diameterRangeInMikroMeter=rr, IOR=single_pnt_cali_ior)\n ds_spc = pd.Series(amp, d)\n\n ampm = ds.rolling(int(no_pts / no_cal_pts), center=True).mean()\n\n cali = ampm.append(cal_d).sort_index().interpolate().reindex(cal_d.index)\n\n spc_point = ds_spc.append(pd.Series(index=[single_pnt_cali_d])).sort_index().interpolate().reindex(\n [single_pnt_cali_d]) # .values[0]\n scale = single_pnt_cali_int / spc_point.values[0]\n\n cali *= scale\n cali.index *= 1e3\n\n cali_inst = pd.DataFrame(cali, columns=['amp'])\n cali_inst['d'] = cali_inst.index\n cali_inst = Calibration(cali_inst)\n\n if raise_error:\n ct = cali.values\n if (ct[1:] - ct[:-1]).min() < 0:\n raise ValueError(\n 'Clibration function is not bijective. usually decreasing the number of calibration points will help!')\n\n cal_fkt_test = cali_inst.calibrationFunction(cali_inst.data.amp.values)\n if not np.all(~np.isnan(cal_fkt_test)):\n raise ValueError(\n 'Clibration function is not bijective. usually decreasing the number of calibration points will help!')\n\n if plot:\n f, a = plt.subplots()\n a.plot(ds.index * 1e3, ds.values * scale, label='POPS resp.')\n a.plot(ampm.index * 1e3, ampm.values * scale, label='POPS resp. smooth')\n g, = a.plot(cali.index, cali.values, label='cali')\n g.set_linestyle('')\n g.set_marker('x')\n g.set_markersize(10)\n g.set_markeredgewidth(2)\n g, = a.plot(single_pnt_cali_d * 1e3, single_pnt_cali_int, label='single ptn cal')\n g.set_linestyle('')\n g.set_marker('o')\n g.set_markersize(10)\n g.set_markeredgewidth(2)\n # st.plot(ax = a)\n a.loglog()\n a.legend()\n return cali_inst, a\n\n return cali_inst",
"def Bilinear_Interpolation(x,y,data):\n for i in range(len(data['x'])-1):\n if data['x'][i]<=x and data['x'][i+1]>=x:\n index1=i\n break\n for j in range(len(data['y'])-1):\n if data['y'][j]<=y and data['y'][j+1]>=y:\n index2=j\n break\n Q11=data['f'][index1][index2]\n Q12=data['f'][index1][index2+1]\n Q21=data['f'][index1+1][index2]\n Q22=data['f'][index1+1][index2+1]\n x1=data['x'][index1]\n x2=data['x'][index1+1]\n y1=data['y'][index2]\n y2=data['y'][index2+1]\n return Q11*(x2-x)*(y2-y)/((x2-x1)*(y2-y1))+Q21*(x-x1)*(y2-y)/((x2-x1)*(y2-y1))+Q12*(x2-x)*(y-y1)/((x2-x1)*(y2-y1))+Q22*(x-x1)*(y-y1)/((x2-x1)*(y2-y1))",
"def generate2DGuassian(image, wsize, sigma):",
"def toSquareMap(self):\n l1 = int(numpy.sqrt(2*self.length))+1\n d = numpy.zeros((l1,l1))\n for ijfg in range(1, self.length+1):\n i = self[ijfg]['I']\n j = self[ijfg]['J']\n val = self[ijfg]['FIE']\n if val < 0:\n d[i-1][j-1] = self[ijfg]['FIE']\n else:\n d[j-1][i-1] = self[ijfg]['FIE']\n return d",
"def makeDataset_sweep_2D(data, gates, sweepgates, sweepranges, measure_names='measured', location=None, loc_record=None,\n fig=None):\n\n scantype = loc_record['label']\n if 'vec' not in scantype and not isinstance(sweepgates, dict):\n # simple gate type\n gate_horz = getattr(gates, sweepgates[0])\n gate_vert = getattr(gates, sweepgates[1])\n\n initval_horz = gate_horz.get()\n initval_vert = gate_vert.get()\n\n if type(measure_names) is list:\n data_measured = data[0]\n else:\n data_measured = data\n\n sweep_horz = gate_horz[initval_horz - sweepranges[0] /\n 2:sweepranges[0] / 2 + initval_horz:sweepranges[0] / len(data_measured[0])]\n sweep_vert = gate_vert[initval_vert - sweepranges[1] /\n 2:sweepranges[1] / 2 + initval_vert:sweepranges[1] / len(data_measured)]\n else:\n # vector scan\n gate_horz = 'gate_horz'\n gate_vert = 'gate_vert'\n p1 = qcodes.Parameter('gate_horz', set_cmd=None)\n p2 = qcodes.Parameter('gate_vert', set_cmd=None)\n\n sweepranges[0]\n xvals = np.linspace(-sweepranges[0] / 2, sweepranges[0] / 2, data.shape[1])\n yvals = np.linspace(-sweepranges[1] / 2, sweepranges[1] / 2, data.shape[0])\n\n sweep_horz = p1[xvals]\n sweep_vert = p2[yvals]\n # sweep_horz=p1[0:data.shape[0]:1]\n # sweep_vert=p2[0:data.shape[0]:1]\n assert (data.shape[0] == len(list(sweep_vert)))\n assert (data.shape[1] == len(list(sweep_horz)))\n\n dataset = makeDataSet2D(sweep_vert, sweep_horz, measure_names=measure_names,\n location=location, loc_record=loc_record, preset_data=data)\n\n if fig is None:\n return dataset, None\n else:\n if fig is not None:\n plt.figure(fig).clear()\n plot = MatPlot(dataset.measured, interval=0, num=fig)\n return dataset, plot",
"def smooth_interp(self):\n #### 1) Prepare for ROMS grid\n xroms=np.zeros_like(self.lon0) ## xroms: longitude, yroms: latitude\n yroms=np.zeros_like(self.lat0)\n (y,x)=self.lon0.shape\n for i in range(y):\n for j in range(x):\n (yroms[i][j],xroms[i][j])=utm.from_latlon(self.lat0[i][j],self.lon0[i][j])[0:2]\n \n xy_roms = np.vstack((xroms[self.maskss0==1],yroms[self.maskss0==1])).T\n Fuv = interpXYZ(xy_roms,xy_roms, method='kriging')\n \n uroms, vroms = self.combine()\n for tstep in range(self.time_ss.shape[0]):\n utem=np.zeros_like(xroms)\n utem[self.maskss0==1]=Fuv(uroms[self.ind0+tstep,:,:][self.maskss0==1])\n uroms[self.ind0+tstep,:,:]=utem\n \n vtem=np.zeros_like(xroms)\n vtem[self.maskss0==1]=Fuv(vroms[self.ind0+tstep,:,:][self.maskss0==1])\n vroms[self.ind0+tstep,:,:]=vtem\n \n basemap = Basemap(projection='merc',llcrnrlat=self.lat0.min(),urcrnrlat=self.lat0.max(), \\\n llcrnrlon=self.lon0.min(),urcrnrlon=self.lon0.max(),resolution='i')\n fig1 = plt.figure()\n ax = fig1.add_subplot(111)\n \n basemap.drawcoastlines()\n basemap.fillcontinents()\n basemap.drawcountries()\n basemap.drawstates()\n x_rho, y_rho = basemap(self.lon0, self.lat0)\n\n basemap.pcolormesh(x_rho, y_rho, uroms[-2,:,:], vmin=uroms.min(),vmax=uroms.max()) \n plt.show() \n \n #pdb.set_trace()",
"def get_bezier_curve_edgematrix(p1, i1, i2, p2, step=30):\n def x(t): return Generator.get_bezier_function(\n p1[0], i1[0], i2[0], p2[0])(t)\n def y(t): return Generator.get_bezier_function(\n p1[1], i1[1], i2[1], p2[1])(t)\n def z(t): return 0\n parametric = Parametric(x, y, z)\n edgematrix = EdgeMatrix()\n step_range = Generator.get_step_range(0, 1, step)\n for i in range(len(step_range) - 1):\n edgematrix.add_edge(parametric.get_point(step_range[i]),\n parametric.get_point(step_range[i + 1]))\n return edgematrix",
"def applyHLSCurve(self, hCurve, lCurve, sCurve):\n \n \n #TODO CHECK ROI\n #TODO CHECK CURVE SIZE\n #TODO CHECK COLORSPACE\n #TODO CHECK CURVE SIZE\n temp = cv.CreateImage(self.size(), 8, 3)\n #Move to HLS space\n cv.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS)\n tempMat = cv.GetMat(temp) #convert the bitmap to a matrix\n #now apply the color curve correction\n tempMat = np.array(self.getMatrix()).copy()\n tempMat[:, :, 0] = np.take(hCurve.mCurve, tempMat[:, :, 0])\n tempMat[:, :, 1] = np.take(sCurve.mCurve, tempMat[:, :, 1])\n tempMat[:, :, 2] = np.take(lCurve.mCurve, tempMat[:, :, 2])\n #Now we jimmy the np array into a cvMat\n image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)\n cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])\n cv.CvtColor(image, image, cv.CV_HLS2RGB) \n return Image(image, colorSpace=self._colorSpace)",
"def spline2D(\n boundary_points=((0, 5), (1, 2), (-4, -1)),\n decision_points=((1, 1), (2, 2))\n):\n raise NotImplementedError(\"It should be a good idea but I don't have time to implement.\")",
"def create_anisotropic_dataset():\n n0 = 2 * n_samples // 3\n n1 = n_samples // 3\n x0 = np.random.normal(size=(n0, n_features))\n x1 = np.random.normal(size=(n1, n_features))\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n x0 = np.dot(x0, transformation)\n x1 = np.dot(x1, transformation) + [0, 3]\n x = np.concatenate((x0, x1))\n y = np.concatenate((np.zeros(n0), np.ones(n1)))\n return x, y"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a protorpc Message into a list suitable for PBLite.
|
def MessageToPBLiteList(msg):
index_keys = dict([(f.number, f.name) for f in msg.all_fields()])
if not index_keys:
return []
max_index = max(index_keys.keys())
json_list = [None] * max_index
for index, key in index_keys.iteritems():
value = getattr(msg, key, None)
if isinstance(value, messages.Message):
value = MessageToPBLiteList(value)
json_list[index-1] = value
return json_list
|
[
"def split_eap_message(eap_messages: bytes) -> list:\n if len(eap_messages) < 253:\n return [eap_messages]\n _stop = len(eap_messages)\n _step = 253\n return [eap_messages[pos:pos+_step] for pos in range(0, _stop, _step)]",
"def decode_all(cls, buf):\n msg_list = []\n while len(buf) > 0:\n msg_items, buf = cls.decode(buf)\n msg_list.append(msg_items)\n return msg_list",
"def read_messages(self) -> List[DltMessage]:\r\n return [message for message in self.__iter__()]",
"def test_msg_to_list_of_string(self):\n self.json_message[\"msg_to\"] = [\"01b51fcc-ed43-4cdb-ad1c-450f9986859b\"]\n with self.app.app_context():\n g.user = User(self.json_message[\"msg_from\"], \"respondent\")\n try:\n MessageSchema().load(self.json_message)\n except ValidationError:\n self.fail(\"Schema should've been correct and not thrown an error\")",
"def messages_from_bytes(cls, bytestring):\n return sum((message_class.many_from_bytes(bytestring)\n for message_class in cls.handled_message_types), [])",
"def tensor_to_msg_list(tensor):\r\n if tensor.shape == (INPUT_SIZE, ):\r\n msg_type = ACCEPTED_MESSAGES[tensor[:4].argmax()]\r\n msg_time = tensor[4]\r\n if msg_type == \"end_of_track\":\r\n msg = mido.MetaMessage(\"end_of_track\")\r\n else:\r\n msg = mido.Message(msg_type, time=msg_time)\r\n if msg_type == 'program_change':\r\n msg.program = int(tensor[5])\r\n elif msg_type == 'control_change':\r\n msg.control = int(tensor[6])\r\n msg.value = int(tensor[7])\r\n elif msg_type == 'note_on':\r\n msg.note = int(tensor[8])\r\n msg.velocity = int(tensor[9])\r\n return msg\r\n \r\n else:\r\n track = []\r\n for i in tensor:\r\n track.append(tensor_to_msg_list(i))\r\n return track",
"def unpack_aux_message(data: bytes) -> list:\n if len(data) < 16:\n raise MuxError(\"aux data is too small to unpack\")\n\n args = []\n br = BinReader(data)\n br.read(16) # ignore first 16 magic bytes\n while True:\n typedata = br.read(4)\n if not typedata:\n break\n _type = struct.unpack(\"I\", typedata)[0]\n if _type == 2:\n _len = br.read_u32()\n archived_data = br.read(_len)\n val = bplist.objc_decode(archived_data)\n args.append(val)\n elif _type in [3, 5]:\n val = br.read_u32()\n args.append(val)\n elif _type in [4, 6]:\n val = br.read_u64()\n args.append(val)\n elif _type == 10:\n # Ignore\n pass\n else:\n raise MuxError(\"Unknown type\", hex(_type))\n return args",
"def parsing(message):\n\n m = padding(message)\n M = []\n for i in range(0, len(m), 32):\n M.append(m[i:i+32])\n return M",
"def get_messages(self) -> list:\n fetched_data = self.read_database()\n\n users = []\n messages = []\n service = []\n\n for data in fetched_data:\n users.append(data.user_id)\n messages.append(data.text)\n service.append(data.service)\n\n data = list(zip(users, messages, service))\n\n return data",
"def convert_str(message: str) -> [int]:\r\n new_list = []\r\n for character in message:\r\n new_list.append(ord(character))\r\n return new_list",
"def ParseProtoMessage(message, fingerprint_enabled):\n care_map_proto = care_map_pb2.CareMap()\n care_map_proto.MergeFromString(message)\n\n info_list = []\n for info in care_map_proto.partitions:\n assert info.name, \"partition name is required in care_map\"\n assert info.ranges, \"source range is required in care_map\"\n info_list += [info.name, info.ranges]\n if fingerprint_enabled:\n assert info.id, \"property id is required in care_map\"\n assert info.fingerprint, \"fingerprint is required in care_map\"\n info_list += [info.id, info.fingerprint]\n\n return '\\n'.join(info_list)",
"def _parse_message(self, message_buffer):\n if not isinstance(message_buffer, bytes):\n message_buffer = message_buffer.encode(\"utf-8\")\n\n if all((char in string.printable for char in message_buffer)):\n return self._parse_json_message(message_buffer)\n else:\n return self._parse_protobuf_message(message_buffer)",
"def get_parts(message):\n\n parts = list()\n\n simple_body = ''\n if message.body():\n simple_body = message.body()\n parts.append(('body', simple_body))\n\n for part in message.walk():\n part_type = get_part_type(part)\n if part_type == 'body':\n part_content = part.body\n if part_content == simple_body:\n continue#avoid duplication\n elif part_type in ('attachment', 'inline'):\n part_content = format_attachment(part)\n else:\n continue\n parts.append((part_type, part_content))\n return parts",
"def messages_to_json(messages):\n return [message_to_json(m) for m in messages]",
"def message_cache(self) -> List[StarboardMessage]:\n return list(self._cache.values())",
"def __listMessages(self, conn):\n (res, data) = conn.search(None, 'ALL')\n if res != 'OK':\n raise RuntimeError('Unvalid reply: ' + res)\n msgids = data[0].split()\n return msgids",
"def process_messages(card_list, message_list, function_type):\n encrypted_messages = []\n for message in message_list:\n new_message = process_message(card_list, message, function_type)\n encrypted_messages.append(new_message)\n return encrypted_messages",
"def fetch_to_list(self, data):\n liste = list()\n for part in data:\n liste.append(part[0])\n return liste",
"def msgList_getMessage(self,attr = None, longNames = True, cull = True):\n\ttry:\n\t #log.debug(\">>> %s.msgList_getMessage(attr = '%s') >> \"%(self.p_nameShort,attr) + \"=\"*75) \n\t d_attrs = self.get_sequentialAttrDict(attr)\n\t l_return = []\n\t for i,k in enumerate(d_attrs.keys()):\n\t\tstr_msgBuffer = self.getMessage(d_attrs[i],longNames = longNames)\n\t\tif str_msgBuffer:str_msgBuffer = str_msgBuffer[0]\n\t\tl_return.append(str_msgBuffer)\n\t\t#log.debug(\"index: %s | msg: '%s' \"%(i,str_msgBuffer))\n\t if cull:\n\t\tl_return = [o for o in l_return if o]\n\t #log.debug(\"-\"*100) \n\t return l_return\n\texcept StandardError,error:\n\t raise StandardError, \"%s.msgList_getMessage >>[Error]<< : %s\"(self.p_nameShort,error)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple threadsafe memoization decorator. Uses a repr() of the params. This will be ok unless there is a custom __repr__ that obscures important information.
|
def _Memoize(func):
l = threading.Lock()
cache = {}
def _Caller(*args, **kwargs):
with l:
params = repr((args, kwargs))
try:
return cache[params]
except KeyError:
result = func(*args, **kwargs)
cache[params] = result
return result
return _Caller
|
[
"def memoize_mutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = pickle.dumps(args) + pickle.dumps(kwargs) #To use as hash for mutable objects.\n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper",
"def memoize_immutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items())) #Must use frozenset because kwargs (= a dictionary) cannot be used as part of dictionary key \n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper",
"def memoize(*args, **kwargs):\n if args:\n assert len(args) == 1\n assert not kwargs\n return memoize()(args[0])\n key_func = kwargs.pop('key_func', None)\n if kwargs:\n raise TypeError('memoize() got unexpected keyword arguments: %s', ', '.join(kwargs))\n\n return _memory_decorator({}, key_func)",
"def memoize(func, cache, num_args):\n def wrapper(*args):\n mem_args = args[:num_args]\n if mem_args in cache:\n return cache[mem_args]\n result = func(*args)\n cache[mem_args] = result\n return result\n return wraps(func)(wrapper)",
"def search_param_memoize(func):\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key in search_param_cache:\r\n return search_param_cache[key]\r\n else:\r\n rv = func(*args, **kwargs)\r\n search_param_cache[key] = rv\r\n return rv\r\n return wrapper",
"def _memo(fn):\n\n cache = {}\n\n @_functools.wraps(fn)\n def _fn(*args):\n if key: args = key(*args)\n try: ret = cache[args]\n except KeyError: ret = cache[args] = fn(*args)\n return ret\n\n _fn._cache = cache\n return _fn",
"def memoize(function):\n cache = {}\n @functools.wraps(function)\n def _memoize(*args):\n if args in cache:\n return cache[args]\n result = function(*args)\n cache[args] = result\n return result\n return function",
"def __repr__(self) -> str:\n return f\"<CachedComputation key={self.key} task={self.computation} hash={self.hash}>\"",
"def test_cached_func_returns_the_same_as_original():\n\n def foo(a, b):\n return a + b\n\n arguments = 10, 5\n cached_foo = cache(foo)\n\n assert foo(*arguments) == cached_foo(*arguments)",
"def self_memoized(func):\n\n cache_name = '_cache_{}'.format(func.__name__)\n\n def wrapper(self, *args, **kwargs):\n # Install the self-specific cache, if needed\n cache = getattr(self, cache_name, {})\n setattr(self, cache_name, cache)\n\n key = (args, tuple(kwargs.items()))\n try:\n result = cache[key]\n except KeyError:\n result = func(self, *args, **kwargs)\n cache[key] = result\n return result\n\n def reset(self):\n setattr(self, cache_name, {})\n\n wrapper.reset = reset\n\n return wrapper",
"def memoized(*args, **kwargs):\n\n arguments = args + tuple((a, b) for a, b in kwargs.items())\n\n if arguments not in cache:\n cache[arguments] = function(*args, **kwargs)\n\n return cache[arguments]",
"def memoize_stampede(\n cache, expire, name=None, typed=False, tag=None, beta=1, ignore=()\n):\n # Caution: Nearly identical code exists in Cache.memoize\n def decorator(func):\n \"\"\"Decorator created by memoize call for callable.\"\"\"\n base = (full_name(func),) if name is None else (name,)\n\n def timer(*args, **kwargs):\n \"\"\"Time execution of `func` and return result and time delta.\"\"\"\n start = time.time()\n result = func(*args, **kwargs)\n delta = time.time() - start\n return result, delta\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Wrapper for callable to cache arguments and return values.\"\"\"\n key = wrapper.__cache_key__(*args, **kwargs)\n pair, expire_time = cache.get(\n key,\n default=ENOVAL,\n expire_time=True,\n retry=True,\n )\n\n if pair is not ENOVAL:\n result, delta = pair\n now = time.time()\n ttl = expire_time - now\n\n if (-delta * beta * math.log(random.random())) < ttl:\n return result # Cache hit.\n\n # Check whether a thread has started for early recomputation.\n\n thread_key = key + (ENOVAL,)\n thread_added = cache.add(\n thread_key,\n None,\n expire=delta,\n retry=True,\n )\n\n if thread_added:\n # Start thread for early recomputation.\n def recompute():\n with cache:\n pair = timer(*args, **kwargs)\n cache.set(\n key,\n pair,\n expire=expire,\n tag=tag,\n retry=True,\n )\n\n thread = threading.Thread(target=recompute)\n thread.daemon = True\n thread.start()\n\n return result\n\n pair = timer(*args, **kwargs)\n cache.set(key, pair, expire=expire, tag=tag, retry=True)\n return pair[0]\n\n def __cache_key__(*args, **kwargs):\n \"\"\"Make key for cache given function arguments.\"\"\"\n return args_to_key(base, args, kwargs, typed, ignore)\n\n wrapper.__cache_key__ = __cache_key__\n return wrapper\n\n return decorator",
"def _instancememo(fn):\n\n cache_name = '_cache_' + fn.__name__\n\n def _get_cache(self, fn):\n \"\"\"cache is stored in the self namespace, retrieved at runtime.\"\"\"\n try:\n return getattr(self, cache_name)\n except AttributeError:\n setattr(self, cache_name, {})\n return getattr(self, cache_name)\n\n @_functools.wraps(fn)\n def _fn(self, *args):\n cache = _get_cache(self, fn)\n if key: args = key(*args)\n try: ret = cache[args]\n except: ret = cache[args] = fn(self, *args)\n return ret\n\n return _fn",
"def wrapper(*args, **kwargs):\n key = wrapper.__cache_key__(*args, **kwargs)\n result = g.cache.get(key, ENOVAL)\n\n if result is ENOVAL:\n result = func(*args, **kwargs)\n g.cache[key] = result\n\n return result",
"def memorize(func):\n # Store results in a dict that maps arguments to results\n cache = {}\n # Define the wrapper function to return.\n @wraps(func)\n def wrapper(*args, **kwargs):\n # If these arguments haven't been seen before,\n if (args, kwargs) not in cache:\n # Call func() and store the result.\n cache[(args, kwargs)] = func(*args, **kwargs)\n return cache[(args, kwargs)]\n return wrapper",
"def memoize(k):\n\n def inner(f):\n\n cache = LRUCache(k)\n\n @functools.wraps(f)\n def wrapper(x):\n if x not in cache:\n cache[x] = f(x)\n return cache[x]\n\n return wrapper\n\n return inner",
"def keymemo(key):\n\n def _memo(fn):\n \"\"\"the memoize decorator itself.\"\"\"\n\n cache = {}\n\n @_functools.wraps(fn)\n def _fn(*args):\n if key: args = key(*args)\n try: ret = cache[args]\n except KeyError: ret = cache[args] = fn(*args)\n return ret\n\n _fn._cache = cache\n return _fn\n\n return _memo",
"def memoize(func):\n if _DISABLE_FOR_TEST:\n return func\n\n cache = AsyncLRUCache()\n\n @defer.inlineCallbacks\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n result = yield cache.get(func, *args, **kwargs)\n defer.returnValue(result)\n return wrapper",
"def memoize(func, resolver=None):\n\n def memoized(*args: P.args, **kwargs: P.kwargs):\n if resolver:\n key = resolver(*args, **kwargs)\n else:\n key = f\"{args}{kwargs}\"\n\n if key not in memoized.cache: # type: ignore\n memoized.cache[key] = func(*args, **kwargs) # type:ignore\n\n return memoized.cache[key] # type: ignore\n\n memoized.cache = {}\n\n return memoized"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load devshell credentials from the proxy. Also sets various attributes on the credential object expected by other parties.
|
def LoadDevshellCredentials():
try:
return DevshellCredentials(
user_agent=config.CLOUDSDK_USER_AGENT,)
except Exception: # pylint:disable=broad-except, any problem means None
return None
|
[
"def __LoadAuthCredentials(self):\n return super(DfpClient, self)._LoadAuthCredentials()",
"def Load():\n if Check(): # exists and has valid refresh so load it\n credentials = json.loads(os.environ.get(Varname()))\n return credentials",
"def patch_using_env(self):\n if self.cred_properties:\n credentials_config = self.cred_properties\n\n user = getenv(\"HERE_USER_ID\") or credentials_config[\"user\"]\n client = getenv(\"HERE_CLIENT_ID\") or credentials_config[\"client\"]\n key = (\n getenv(\"HERE_ACCESS_KEY_ID\")\n or getenv(\"HERE_ACCESS_KEY\")\n or credentials_config[\"key\"]\n )\n secret = (\n getenv(\"HERE_ACCESS_KEY_SECRET\")\n or getenv(\"HERE_ACCESS_SECRET\")\n or credentials_config[\"secret\"]\n )\n endpoint = (\n getenv(\"HERE_TOKEN_ENDPOINT_URL\")\n or getenv(\"HERE_TOKEN_ENDPOINT\")\n or credentials_config[\"endpoint\"]\n )\n credentials_config[\"user\"] = user\n credentials_config[\"client\"] = client\n credentials_config[\"key\"] = key\n credentials_config[\"secret\"] = secret\n credentials_config[\"endpoint\"] = endpoint",
"def load(self):\n if not self._credfile_exists():\n return self.handle_no_store()\n\n try:\n json_str = self.crypto.decrypt(self.credfile_loc, self.passphrase)\n except GPGCommunicator.KeyfileDecodeError as e:\n print(\"%s bad password?\" % e.value)\n exit(1)\n\n dict_list = json.loads(json_str)\n\n return [Credential(**c_dict) for c_dict in dict_list]",
"def load_credentials(credentials=None):\n if credentials is None:\n credentials = '~/.unsw_credentials'\n\n if credentials.startswith('~'):\n credentials = os.path.expanduser(credentials)\n\n with open(credentials) as fh:\n username, password = fh.readline().split(':', 1)\n\n password = password.strip()\n\n return username, password",
"def install_creds(arguments):\n\n global credentials\n if arguments.verbose:\n print \"Installing credentials...\"\n credentials = storage.get()",
"def _get_credentials(self):\n cred = {\n 'username': self.username,\n 'password': self.password,\n 'host': self.host,\n 'port': self.port,\n }\n return cred",
"def set_proxy_credentials(self, username, password):\n self._set_proxy_credentials(username.encode(), password.encode())",
"def _read_credential_file(self, cfg):\n self.username = cfg.get(\"rackspace_cloud\", \"username\")\n try:\n self.password = cfg.get(\"rackspace_cloud\", \"api_key\", raw=True)\n except ConfigParser.NoOptionError as e:\n # Allow either the use of either 'api_key' or 'password'.\n self.password = cfg.get(\"rackspace_cloud\", \"password\", raw=True)",
"def read_creds(self, debug):\n # trying sample hemlock_creds as a default, if that\n # fails, then ask for a file or parameters\n try:\n f = open('hemlock/hemlock_creds_sample', 'r')\n for line in f:\n self.log.debug(debug, line)\n if len(line) > 0 and line[0] != \"#\" and \"=\" in line:\n # split each line on the first '='\n line_a = line.split(\"=\",1)\n try:\n os.environ[line_a[0]] = line_a[1].strip()\n except: # pragma: no cover\n print \"Malformed Hemlock Creds file.\"\n self.log.debug(debug, str(sys.exc_info()[0]))\n f.close()\n except: # pragma: no cover\n resp = \"\"\n while resp != 'y' and resp != 'n':\n resp = raw_input(\"Do you have a hemlock_creds file? (y/n)\")\n resp = resp.lower()\n if resp == 'y':\n resp = raw_input(\"Path to hemlock_creds file: \")\n try:\n f = open(resp, 'r')\n for line in f:\n self.log.debug(debug, line)\n if len(line) > 0 and line[0] != \"#\" and \"=\" in line:\n # split each line on the first '='\n line_a = line.split(\"=\",1)\n try:\n os.environ[line_a[0]] = line_a[1].strip()\n except:\n print \"Malformed Hemlock Creds file.\"\n self.log.debug(debug, str(sys.exc_info()[0]))\n f.close()\n except:\n print \"Unable to open \"+resp\n self.log.debug(debug, str(sys.exc_info()[0]))\n return",
"def _read_credential_file(self, cfg):\r\n self.username = cfg.get(\"rackspace_cloud\", \"username\")\r\n try:\r\n self.password = cfg.get(\"rackspace_cloud\", \"api_key\", raw=True)\r\n except ConfigParser.NoOptionError as e:\r\n # Allow either the use of either 'api_key' or 'password'.\r\n self.password = cfg.get(\"rackspace_cloud\", \"password\", raw=True)",
"def _set_proxy_credential_json(self, destination_cloud):\n\n if 'amazon_s3' in destination_cloud:\n self._proxy_credential_json = {\n \"instanceType\": 5,\n \"s3Instance\": {\n \"hostURL\": destination_cloud.get('amazon_s3', {}).get('s3_host_url', 's3.amazonaws.com'),\n \"accessKeyId\": destination_cloud.get('amazon_s3', {}).get('s3_access_key', \"\"),\n \"secretAccessKey\": destination_cloud.get('amazon_s3', {}).get('s3_secret_key', \"\")\n }\n }\n\n elif 'google_cloud' in destination_cloud:\n self._proxy_credential_json = {\n \"instanceType\": 20,\n \"googleCloudInstance\": {\n \"serverName\": destination_cloud.get('google_cloud', {}).get('google_host_url', 'storage.googleapis.com'),\n \"credentials\": {\n \"userName\": destination_cloud.get('google_cloud', {}).get('google_access_key', \"\"),\n \"password\": destination_cloud.get('google_cloud', {}).get('google_secret_key', \"\")\n }\n }\n }\n\n elif 'azure_blob' in destination_cloud:\n self._proxy_credential_json = {\n \"instanceType\": 6,\n \"azureInstance\": {\n \"hostURL\": destination_cloud.get('azure_blob', {}).get('azure_host_url', 'blob.core.windows.net'),\n \"accountName\": destination_cloud.get('azure_blob', {}).get('azure_account_name', \"\"),\n \"accessKey\": destination_cloud.get('azure_blob', {}).get('azure_access_key', \"\")\n }\n }",
"def setup_basic_authed_pycurl_connection(self):\n\t\t_connection, _string_io = self.get_basic_pycurl_connection()\n\t\tif self.username is not None and self.password is not None:\n\t\t\t_connection.setopt(pycurl.USERPWD, \"%s:%s\" % \\\n\t\t\t\t(urllib.quote(self.username), \\\n\t\t\t\turllib.quote(self.password)) )\n\t\treturn _connection, _string_io",
"def load_from_config(self):\n self.http_pass = self.config.ejbca_jks_password\n self.db_pass = self.config.ejbca_db_password\n self.master_p12_pass = self.config.ejbca_p12master_password\n self.do_vpn = self.config.vpn_installed",
"def load(self):\n try:\n with open(self.filename, 'rb') as ciphertext:\n self.__accounts = self.__decoder.decode(self.__gpg.decrypt(\n ciphertext,\n verify=False,\n passphrase=getpass(\"Password: \")\n )[0].decode())\n print(\"Credentials loaded.\", file=sys.stderr)\n except FileNotFoundError:\n pass",
"def _setup_http_proxy(self):\r\n headers = {}\r\n\r\n if self.proxy_username and self.proxy_password:\r\n # Include authentication header\r\n user_pass = '%s:%s' % (self.proxy_username, self.proxy_password)\r\n encoded = base64.encodestring(b(urlunquote(user_pass))).strip()\r\n auth_header = 'Basic %s' % (encoded.decode('utf-8'))\r\n headers['Proxy-Authorization'] = auth_header\r\n\r\n if hasattr(self, 'set_tunnel'):\r\n # Python 2.7 and higher\r\n # pylint: disable=no-member\r\n self.set_tunnel(host=self.host, port=self.port, headers=headers)\r\n elif hasattr(self, '_set_tunnel'):\r\n # Python 2.6\r\n # pylint: disable=no-member\r\n self._set_tunnel(host=self.host, port=self.port, headers=headers)\r\n else:\r\n raise ValueError('Unsupported Python version')\r\n\r\n self._set_hostport(host=self.proxy_host, port=self.proxy_port)",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'credentials.json')\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def test_construct_from_properties_with_proxy_authentication(self):\n self.config.set('ConnectSDK', \"connect.api.proxy.uri\", \"http://proxy.example.org:3128\")\n self.config.set('ConnectSDK', \"connect.api.proxy.username\", \"connect-username\")\n self.config.set('ConnectSDK', \"connect.api.proxy.password\", \"connect-password\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertDefaults(communicator_config)\n self.assertIsNone(communicator_config.api_key_id)\n self.assertIsNone(communicator_config.secret_api_key)\n proxy_config = communicator_config.proxy_configuration\n self.assertIsNotNone(proxy_config)\n self.assertEqual(\"http\", proxy_config.scheme)\n self.assertEqual(\"proxy.example.org\", proxy_config.host)\n self.assertEqual(3128, proxy_config.port)\n self.assertEqual(\"connect-username\", proxy_config.username)\n self.assertEqual(\"connect-password\", proxy_config.password)",
"def initialize_credentials():\n\n # import credentials\n # https://kedro.readthedocs.io/en/stable/04_kedro_project_setup/02_configuration.html\n from kedro.config import ConfigLoader\n\n # conf_paths = [\"../conf/base\", \"../conf/local\"]\n conf_paths = [Path(BASE_DIR, \"conf/local\")]\n print(f\"conf_paths are: {conf_paths}\")\n\n print\n conf_loader = ConfigLoader(conf_paths)\n credentials = conf_loader.get(\"credentials*\", \"credentials*/**\")\n\n # Environment setup\n os.environ[\"X_NFER_BASEURL\"] = \"https://preview.nferx.com\"\n os.environ[\"NFERENCE_USER\"] = credentials[\"nfer_access_key\"] # \"yash@nference.net\"\n os.environ[\"NFERENCE_TOKEN\"] = credentials[\"nfer_secret_key\"] # \"<api_token>\"\n\n print(\"Loaded credentials. Nference SDK ready to use.\\n\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add metric_id to the df if it isn't already in there
|
def define_metric(df, source):
valid_sources = ['dalynator', 'codem', 'epi', 'como', 'dismod']
assert source in valid_sources, "Must pass one of %s" % valid_sources
if 'metric_id' not in df.columns:
met_map = pd.read_csv('%s/bin/get_pct_change_helpers/'
'source_metric_map.csv'
% os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
metric_id = met_map.set_index('source').ix['%s' % source, 'metric_id']
df['metric_id'] = metric_id
df = df.sort_values(by='metric_id').reset_index(drop=True)
return df
|
[
"def add_integrity_metric(self, metric):\n if metric is None:\n return\n\n for m in self._integrity_metrics:\n if metric == m:\n # add to existing metric\n m.merge(metric)\n break\n else:\n self._integrity_metrics.add(metric)",
"def track_metric(self, metric):\n if not hasattr(self, \"_metrics\"):\n raise RuntimeError(\n \"Need to call Evaluator.__init__ before adding metrics\")\n if not isinstance(metric, metrics.Metric):\n raise TypeError(\n \"Evaluator.track_metric() passed type %s, not a tfe.metrics.Metric\" %\n (type(metric),))\n if metric.name in self._metrics:\n if metric is self._metrics[metric.name]:\n return metric\n raise ValueError(\n \"Attempt to add two Metrics with the name '%s' to the same Evaluator \"\n \"'%s'\" % (metric.name, self.name))\n # pylint: disable=protected-access\n if hasattr(metric, \"_added_to_an_evaluator\"):\n raise ValueError(\"Metric %s already added to Evaluator %s\" %\n (metric.name, metric._added_to_an_evaluator))\n metric._added_to_an_evaluator = self.__class__.__name__\n # pylint: enable=protected-access\n self._metrics[metric.name] = metric\n return metric",
"def db_update_metrics():\n db_put_metrics(get_metric_list())",
"def register_global_metrics(metrics):\n state.global_metrics.update({m.name: m for m in metrics})",
"def compute_daily_metrics(df, metric):\r\n df0 = df[df['Metric'] == metric]\r\n new_metric = 'Daily ' + metric\r\n identities = list(\r\n set(\r\n df0['Country - Region - Age - Gender'].values\r\n )\r\n )\r\n\r\n for ide in identities:\r\n print(ide)\r\n df1 = df0[df0['Country - Region - Age - Gender'] == ide]\r\n L = [(index, row) for index, row in df1.iterrows()]\r\n\r\n new_rows_list = []\r\n\r\n for row_number in range(len(L) - 1):\r\n row0 = L[row_number][1]\r\n row1 = L[row_number+1][1]\r\n\r\n for j in range(row0.gap_in_day + 1, row1.gap_in_day + 1):\r\n new_row = row0.copy()\r\n new_row.gap_in_day = j\r\n new_row.Metric = new_metric\r\n\r\n try:\r\n new_row.Value = int(\r\n 100 * (row1.Value - row0.Value) / (row1.gap_in_day - row0.gap_in_day)\r\n ) / 100\r\n\r\n except ZeroDivisionError:\r\n new_row.Value = None\r\n\r\n new_rows_list.append(new_row)\r\n \r\n for i in range(len(new_rows_list)):\r\n new_row = new_rows_list[i]\r\n df.loc[-1] = new_row\r\n df.index = df.index + 1\r\n\r\n print('daily metric computed')\r\n return df",
"def stable_id():\n return 'ExampleMetric'",
"def add_metric_to_dataset(self, metric, dataset):\n if dataset is not None and metric is not None:\n LOGGER.debug(\"Adding metric '%s' to dataset '%s'\" % (metric.name,\n dataset.name))\n dataset.metrices.append(metric)\n self.__SESSION.commit()\n LOGGER.debug(\"Metric has been added successfully!\")\n else:\n raise ValueError(\"The value of 'dataset' and 'metric' parameters\\\n shouldn't be None\")",
"def update_summary_table(self, logger_id):\n cursor = self.connection.cursor()\n select_query = (\"\"\"SELECT COUNT(*), MIN(Time_GMT), MAX(Time_GMT)\n FROM cnx_logger_temperature WHERE logger_id=%s\"\"\")\n cursor.execute(select_query, (logger_id,))\n select_results = cursor.fetchone()\n cursor.close()\n if select_results is not None:\n cursor = self.connection.cursor()\n try:\n update_query = \"\"\"INSERT INTO `cnx_logger_metadata`\n (logger_id, logger_count, logger_min_date, logger_max_date)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n logger_count = VALUES(logger_count),\n logger_min_date = VALUES(logger_min_date),\n logger_max_date = VALUES(logger_max_date)\"\"\"\n cursor.execute(update_query, (logger_id, select_results[0], \\\n select_results[1], select_results[2]))\n self.connection.commit()\n cursor.close()\n except MySQLdb.DatabaseError:\n self.connection.rollback()",
"def add_slack_id_to_df(self, data_frame, email_column):\n # Check for users\n self.update_users()\n # Add an empty Column\n data_frame['ID'] = ''\n # Fill the Column with the corresponding Slack IDs\n for row_df in range(0, len(df)):\n df_email = df[email_column][row_df]\n for slack_user in self.users:\n try:\n if str(df_email) == slack_user['profile']['email']:\n data_frame.at[row_df, 'ID'] = slack_user['id']\n except KeyError:\n logging.debug('No Email found for user with id {}'.format(slack_user['id']))\n return data_frame",
"def test_post_metric_target_with_measurements(self, request):\n self.database.measurements.find_one.return_value = dict(_id=\"id\", sources=[])\n\n def set_measurement_id(measurement):\n measurement[\"_id\"] = \"measurement_id\"\n\n self.database.measurements.insert_one.side_effect = set_measurement_id\n request.json = dict(target=\"10\")\n self.assertEqual(\n dict(_id=\"measurement_id\", end=\"2019-01-01\", sources=[], start=\"2019-01-01\", status=None, value=None),\n post_metric_attribute(\"report_uuid\", \"metric_uuid\", \"target\", self.database))",
"def generate_metric(dedup_df: pd.DataFrame) -> pd.DataFrame:\n try:\n dedup_us_df = dedup_df[dedup_df['Country_Region'] == 'US']\n cleaned_df = dedup_us_df.copy()\n cleaned_df['month'] = pd.DatetimeIndex(cleaned_df['Date']).month\n cleaned_df['year'] = pd.DatetimeIndex(cleaned_df['Date']).year\n metric_df = cleaned_df.groupby(['Province_State', 'year', 'month'])[\"ConfirmedCases\"].sum()\n LOG.info(f\"data: generate_metric [{metric_df.shape[0]}] records\")\n except Exception as error:\n LOG.exception(f\"data: generate_metric could not be completed. {error}\")\n return metric_df",
"def add_metric(self, source, col_key, label=None, color=None):\n try:\n col_idx = [i for (i, col) in enumerate(source.source['columns']) if col['label'] == col_key][0]\n except:\n #logger.warning('could not find column named %s in datasoure:\\n%s', col_key, source)\n return\n metric = copy.deepcopy(self.default_metric)\n metric['index'] = self.__index__\n if label is not None:\n metric['options']['label'] = label\n metric['metric']['source_id'] = source.source['id']\n metric['metric']['source_col'] = col_idx\n self.__index__ += 1\n self.graph['root']['children'][Graph.METRIC_CHILD_ID]['children'].append(metric)",
"def add_id(df, id_col=\"id\"):\n\n return df.withColumn(id_col, monotonically_increasing_id())",
"def _add_hashed_uid(df):\r\n logger.info('Start adding hashed uid')\r\n uids = (df.apply(lambda row: row['url'], axis=1)\r\n \t .apply(lambda url: hashlib.md5(bytes(url.encode())))\r\n \t .apply(lambda hash_obj: hash_obj.hexdigest())\r\n \t )\r\n df['uid'] = uids\r\n df = df.set_index('uid')\r\n\r\n return df",
"def _add_newspaper_uid_column(df, newspaper_uid):\r\n df['newspaper_uid'] = newspaper_uid\r\n logger.info('Newspaper uid added to dataframe')\r\n\r\n return df",
"def record_metric(event, metric_name, metric_unit, metric_value):\n metrics.add_dimension(name=\"tenant_id\", value=event['requestContext']['authorizer']['tenantId'])\n metrics.add_metric(name=metric_name, unit=metric_unit, value=metric_value)\n metrics_object = metrics.serialize_metric_set()\n metrics.clear_metrics()\n print(json.dumps(metrics_object))",
"def _add_metrics_group_if_allowed(self, metrics_group, metrics_group_map, metrics_group_name, dimension_was_empty):\n if metrics_group_map['ignore_empty_dimensions']:\n self._metrics.add(metrics_group)\n elif not dimension_was_empty:\n self._metrics.add(metrics_group)\n else:\n self._polling_status.handle_exception(metrics_group_name, PanoptesMetricDimensionNullException())",
"def add_metrics(self):\n \n for metric in self.METRICS:\n #getlogger().info('creating metric %s', metric.label)\n self.perf_patterns[metric.label] = reduce(self.stdout, self.num_tasks, metric.column, metric.function)\n self.reference[metric.label] = (0, None, None, metric.unit) # oddly we don't have to supply the \"*\" scope key??",
"def build_reduced_dataframe(self):\n if self.reduced_dataframe is None:\n self.reduced_dataframe = self.dataframe[[CASE_CONCEPT_NAME, self.activity_key, DEFAULT_TIMESTAMP_KEY]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
init udp connecting afterwards udp connection is available via remote_control.udp REQUIRED BEFORE USING OTHER FUNCTIONS returns udp connection
|
def init_udp_connection():
global udp
udp = UDPConnection()
return udp
|
[
"def init_UDP_connection(self):\n import socket\n\n self.sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_recv.bind((RIO_IP, UDP_RECV_PORT))",
"async def connect(self) -> None:\n udp_client_factory = UDPClient.UDPClientFactory(\n self.local_addr[0],\n multicast=self.multicast,\n data_received_callback=self.data_received_callback,\n )\n loop = asyncio.get_running_loop()\n if self.multicast:\n sock = UDPClient.create_multicast_sock(self.local_addr[0], self.remote_addr)\n (transport, _) = await loop.create_datagram_endpoint(\n lambda: udp_client_factory, sock=sock\n )\n self.transport = transport\n\n else:\n (transport, _) = await loop.create_datagram_endpoint(\n lambda: udp_client_factory,\n local_addr=self.local_addr,\n remote_addr=self.remote_addr,\n )\n self.transport = transport",
"def udp_connect():\r\n try:\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n except socket.error as emsg:\r\n print('Error creating udp socket')\r\n return False, emsg\r\n\r\n return True, sock",
"def __init__ (self, UDPlist_p, GPIOpin):\n try: \n self.UDPlist = UDPlist_p\n self.sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.bind (('', UDP_PORT))\n hasUDP= True\n if GPIOpin == 0:\n self.hasGPIO = False\n else:\n self.hasGPIO = True\n self.GPIOpin=GPIOpin\n GPIO.setmode (GPIO.BCM)\n GPIO.setwarnings (False)\n GPIO.setup (self.GPIOpin, GPIO.OUT)\n except socket.error:\n hasUDP = False\n print ('AHF_UDPTrig failed to create a socket.')",
"def __init__(self,UDP_IP=\"127.0.0.1\",HOST_PORT=5005,CLIENT_PORT=5006,drone_address=\"\"):\n self.host = UDP_IP\n self.port = HOST_PORT\n self.HOST_SERVER_ADDRESS = (UDP_IP,HOST_PORT)\n self.NODE_SERVER_ADDRESS =(UDP_IP,CLIENT_PORT)\n\n\n self.controller = DroneController(connection_string=drone_address)\n try:\n self.controller.connect()\n pass\n # Bad TCP connection\n except socket.error:\n print('No server exists!')\n # Bad TTY connection\n except exceptions.OSError as e:\n print\n 'No serial exists!'\n # API Error\n except dronekit.APIException:\n print\n 'Timeout!'\n # Other error\n except Exception as e:\n print('Some other error!'+e.message)",
"def updateLocalSettings():\n global UDP_IP\n global UDP_PORT\n global UDP_TIMEOUT\n\n UDP_IP = str(getSetting('udpIP'))\n UDP_PORT = int(getSetting('udpPort'))\n UDP_TIMEOUT = float(getSetting('udpTimeout'))",
"def socket_open(self):\n log.info(\"Creating UDP socket %s:%d for communication with the client\",\n self.receiverIP, self.receiverPort)\n\n try:\n self.receiverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiverSocket.bind((self.receiverIP, self.receiverPort))\n except Exception as e:\n log.error(\"Could not create UDP socket for communication with the client!\")\n log.debug(e)\n traceback.print_exc()",
"def __init__(self, localAddr, connID):\n self.socket = None\n self.addr = localAddr\n self.connID = connID\n if Convert.is_valid_ipv4_address(localAddr):\n self.socket = socket.socket(\n socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_L2TP)\n self.socket.setblocking(False) # Set the socket to non-block mode\n self.socket.bind((localAddr, connID))\n self.logger.info(\n \"Create L2TP socket and bind to it, local IP address:%s, local Connection ID:%d, socket: %d\" %\n (localAddr, connID, self.socket.fileno()))\n elif Convert.is_valid_ipv6_address(localAddr):\n self.socket = socket.socket(\n socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_L2TP)\n self.socket.setblocking(False) # Set the socket to non-block mode\n self.socket.bind(('', connID))\n self.logger.info(\n \"Create L2TP socket and bind to any adress, local Connection ID:%d, socket: %d\" %\n (connID, self.socket.fileno()))\n else:\n self.logger.info(\n \"Create L2TP socket failed, invalid local IP address:%s, local Connection ID:%d\" %\n (localAddr, connID))\n self.socket = None",
"def _connect(self, target, listen, udp, ipv6, retry):\n ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM\n fam = socket.AF_INET6 if ipv6 else socket.AF_INET\n self.sock = socket.socket(fam, ty)\n if listen:\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind(target)\n if not udp:\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n self.sock.close()\n self.sock = conn\n self.peer = addr\n else:\n self.buf, self.peer = self.sock.recvfrom(4096)\n self.sock.connect(self.peer)\n self.logger.buffering(self.buf)\n else:\n while retry >= 0:\n try:\n self.sock.connect(target)\n except (socket.gaierror, socket.herror) as exc:\n raise errors.NetcatError('Could not connect to %r: %r' \\\n % (target, exc))\n except socket.error as exc:\n if retry:\n time.sleep(0.2)\n retry -= 1\n else:\n raise errors.NetcatError('Could not connect to %r: %r' \\\n % (target, exc))\n else:\n break\n self.peer = target\n self.logger.connected(self.peer)",
"async def test_protocol_factory_udp():\n test_url1: str = \"udp://localhost\"\n config: dict = {\"COT_URL\": test_url1}\n reader, writer = await pytak.protocol_factory(config)\n assert isinstance(reader, pytak.asyncio_dgram.aio.DatagramServer)\n assert isinstance(writer, pytak.asyncio_dgram.aio.DatagramClient)",
"def init_socket(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((host, port))\n return sock",
"def initiate_server(self):\n try:\n print(f'Server started, listening on IP address {self.server_ip}')\n self.server_socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.server_socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.udp_thread = Thread(target=self.activate_server_udp)\n self.tcp_thread = Thread(target=self.activate_server_tcp)\n self.udp_thread.start()\n self.tcp_thread.start()\n self.udp_thread.join()\n self.tcp_thread.join()\n self.initiate_game()\n self.close_connections_with_clients()\n time.sleep(0.5)\n self.reset_server()\n except Exception as e:\n print(e)\n time.sleep(1)\n self.server_socket_tcp.close()",
"def __init__(self, config: Dict[str, str]) -> None:\n super().__init__(config)\n self.ipv4 = config['ipv4']\n self.port = config['port']\n logging.debug(\"Building a SpeakerUDP\")\n self.transport = None\n self.protocol = None\n self._do_clean_up = False\n self._sent_uuids = collections.OrderedDict()\n self._late_uuids = collections.OrderedDict()",
"def initiate(self):\n \n self.should_stop = False\n self.initial_values = False\n self.tasks = []\n self.connection = None\n \n self.ipv4 = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]\n self._print(\"Local address: {}\".format(self.ipv4))\n \n self.connection = self.create_or_verify_connection()\n \n if self.conn:\n self.thread = threading.Thread(target=self._database_thread)\n self.thread.start()\n self._print(\"Started database thread.\")",
"def main():\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((UDP_IP, UDP_PORT))\n ser = serial.Serial('/dev/ttyUSB0', 19200)\n while True:\n data, addr = sock.recvfrom(1024)\n ser.write(data)",
"def rdt_peer(peer_ip, port):\r\n\t######## Your implementation #######\r\n\r\n\tglobal __peeraddr\r\n\t__peeraddr = (peer_ip, port)",
"def udp_options(self, udp_options):\n self._udp_options = udp_options",
"def __init__(self,port=DEFAULT_PORT,keys=None,pubkeys=None,address='',multicast=None, \n ecc_keypair=None, handle=None, allow_guest=False,daemon=False,execute=None):\n\n\n #Not implemented yet\n self.broker=False\n self.ignore = {}\n\n\n self.keys = keys or {}\n self.pubkeys= pubkeys or {}\n\n self.port = port\n self.address = (address, port)\n\n\n self.guest_key = None\n self.allow_guest = allow_guest\n\n\n def cl(*args):\n self.close()\n self.clfun = cl\n #If we have a handle, make it so that if the handle gets collected the server thread stops\n if handle:\n self.handle = weakref.ref(handle,cl)\n \n \n self.waitingForAck = weakref.WeakValueDictionary()\n\n # Create the socket\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) \n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) \n # Bind to the server address\n self.sock.bind(self.address)\n self.sock.settimeout(1)\n\n self.sendsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sendsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) \n self.sendsock.bind((self.address[0],0))\n self.sendsock.settimeout(1)\n\n self.mcastgroup = multicast\n #Subscribe to any requested mcast group\n self.msock_joined = False\n if multicast:\n try:\n common.addMulticastGroup(self.sock, multicast)\n self.msock_joined = True\n except OSError as e:\n if e.errno==19:\n pass\n else:\n raise\n #A list of all the registers and functions indexed by number\n self.registers = {}\n\n #If we should send system info like battery status,\n #wifi signal, and temperature\n self.enableStatusReporting = False\n\n self.ecc_keypair = ecc_keypair\n self.running = True\n \n\n self.knownclients = collections.OrderedDict()\n\n self.counter = \"don'tusethis\"\n\n self.messageTargets = {}\n\n self.targetslock = threading.Lock()\n self.lock = threading.Lock()\n\n with common.lock:\n if not common.allow_new:\n raise RuntimeError(\"System shutting down\")\n common.cleanup_refs.append(weakref.ref(self))\n\n #Function used to execute RPC callbacks and handlers and such\n\n self.execute = execute or pavillion.execute\n\n #Max number of clients we keep track of, including ignored ones\n self.maxclients = 512\n t = threading.Thread(target=self.loop)\n t.name+=\":PavillionServer\"\n t.daemon = daemon\n t.start()",
"def openRtpPort(self):\r\n\t\t#-------------\r\n\t\t# TO COMPLETE\r\n\t\t#-------------\r\n\t\t# Create a new datagram socket to receive RTP packets from the server\r\n\t\t# self.rtpSocket = ...\r\n\t\t\r\n\t\t# Set the timeout value of the socket to 0.5sec\r\n\t\t# ...\r\n\t\tself.rtpSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\r\n\t\tself.rtpSocket.settimeout(0.5)\r\n\t\ttry:\r\n\t\t\tself.rtpSocket.bind(('', self.rtpPort))\r\n\t\t\tprint(self.rtpPort)\r\n\t\t\tprint(\"Connection Success\")\r\n\t\texcept:\r\n\t\t\tprint(\"Connection Error\")",
"def openRtpPort(self):\n\t\t# Create a new datagram socket to receive RTP packets from the server\n\t\tself.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n\t\t# Set the timeout value of the socket\n\t\tself.rtpSocket.settimeout(0.5)\n\t\t\n\t\t# Bind the socket to the address using the RTP port given by the client user.\n\t\tself.rtpSocket.bind(('',self.rtpPort))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get data from the recording PC get_commands e.g. Command.GET_FZ or Command.GET_VERSION ....
|
def get_data(get_command):
udp.send(get_command)
d = udp.receive(1)
try:
return loads(d[len(Command.VALUE):])
except:
return None
|
[
"def onGetCmds(self):\n strJsonResponse = \"\"\n self.dataLock.acquire()\n\n cmds = dict()\n cmds[\"Commands\"] = []\n for cmd in self.commandMap.values():\n desc = cmd.describe(False)\n cmdURL = \"/cmd?name={}\".format(desc[\"Command\"][\"Name\"])\n for arg in desc[\"Command\"][\"Args\"]:\n cmdURL = cmdURL + \"&{}=\".format(arg)\n desc[\"Command\"][\"URL\"] = cmdURL\n cmds[\"Commands\"].append(desc)\n\n strJsonResponse = json.dumps(cmds, ensure_ascii=False)\n\n self.dataLock.release()\n return strJsonResponse",
"def get_cmd(db_redis):\n db_redis.rdb_pipe.get(REDIS_CMD_THROTTLE)\n db_redis.rdb_pipe.get(REDIS_CMD_DROLL)\n db_redis.rdb_pipe.get(REDIS_CMD_DPITCH)\n db_redis.rdb_pipe.get(REDIS_CMD_DYAW)\n db_redis.rdb_pipe.get(REDIS_CMD_AUX1)\n db_redis.rdb_pipe.get(REDIS_CMD_AUX2)\n cmd_data = db_redis.rdb_pipe.execute()\n return list(map(float, cmd_data))",
"def get_command(data):\n return data.get(\"command\")",
"def readcmd(self):\n\n b = self.read(4)\n\n app = b[0]\n verb = b[1]\n n = b[2] + (b[3] << 8)\n\n if n > 0:\n data = self.read(n)\n else:\n data = b''\n\n if len(data) != n:\n raise ValueError('Facedancer expected ' + str(n) \\\n + ' bytes but received only ' + str(len(data)))\n\n cmd = FacedancerCommand(app, verb, data)\n\n if self.verbose > 1:\n print(\"Facedancer Rx command:\", cmd)\n\n return cmd",
"def _get_zdo_command_data(self):\n pass",
"def read(self):\r\n # byte[0]: 0x01 for query cmd\r\n # byte[1]: length of query cmd\r\n # byte[2:]: bytes of command string\r\n \r\n len_cmd = 0\r\n bytes_to_write = bytes([0x01]) + len_cmd.to_bytes(2, 'little') \r\n #print(bytes_to_write, len(bytes_to_write))\r\n try:\r\n self.instr.write_raw(bytes_to_write)\r\n data = self.instr.read_raw()\r\n return data\r\n except ValueError:\r\n print(\"uart failed read\")",
"def getmode(self, cmd, cams):\n\n for n in cams:\n if self.cams[n] != None and not self.cams[n].isReady():\n if cmd:\n cmd.fail('text=\"camera busy, command ignored\"')\n return\n for n in cams:\n if self.cams[n] != None:\n mode = self.cams[n].getMode()\n if cmd:\n cmd.respond('text=\"AGC[%d] readout mode: %d\"' % (n + 1, mode))\n cmd.inform('text=\"Camera getmode command done\"')\n cmd.finish()",
"def get_output(self, command, pause=0):\r\n self.child.send(command + \"\\n\")\r\n time.sleep(pause)\r\n start_failed = self.child.expect([\"bluetooth\", pexpect.EOF])\r\n\r\n if start_failed:\r\n raise BluetoothctlError(\"Bluetoothctl failed after running \" + command)\r\n\r\n return self.child.before.split(\"\\r\\n\")",
"def get_keyboard_command(self):\r\n key_pressed_list = pygame.key.get_pressed()\r\n cmd_1P = []\r\n cmd_2P = []\r\n\r\n if key_pressed_list[pygame.K_LEFT]: cmd_1P.append(BRAKE_cmd)\r\n if key_pressed_list[pygame.K_RIGHT]:cmd_1P.append(SPEED_cmd)\r\n if key_pressed_list[pygame.K_UP]:cmd_1P.append(LEFT_cmd)\r\n if key_pressed_list[pygame.K_DOWN]:cmd_1P.append(RIGHT_cmd)\r\n\r\n if key_pressed_list[pygame.K_a]: cmd_2P.append(BRAKE_cmd)\r\n if key_pressed_list[pygame.K_d]:cmd_2P.append(SPEED_cmd)\r\n if key_pressed_list[pygame.K_w]:cmd_2P.append(LEFT_cmd)\r\n if key_pressed_list[pygame.K_s]:cmd_2P.append(RIGHT_cmd)\r\n\r\n return {\"ml_1P\":cmd_1P,\r\n \"ml_2P\":cmd_2P}",
"def get_command_output():\n commands = session.query(Command)\n result=session.execute(commands)\n json_data=[]\n for r in result:\n json_data.append({\n 'id' : r[0],\n 'command_string' : r[1],\n 'length' : r[2],\n 'duration' : r[3],\n 'output' : r[4].decode()\n })\n if not json_data:\n return \"Commands not found\"\n json_data = json.dumps(json_data)\n return jsonify(json.loads(json_data))",
"def _read(self, command, nbytes, arguments=b\"\"):\n padding = b\"\\x00\" * nbytes\n tx = struct.pack(\"B\", command) + arguments + padding\n self.programmer.flash_mode()\n self.programmer.select()\n rx = self.programmer.write(tx)\n self.programmer.unselect()\n return rx[1:]",
"def sensor_cli():",
"def commandOutput(con, data):",
"def sendStatusKeys(self, cmd): \n \n cmd.inform('text=\"Number of AG cameras = %d\"' % self.numberOfCamera)\n for n in range(nCams):\n if self.cams[n] != None:\n if self.cams[n].isReady():\n tempstr = '%5.1f' % self.cams[n].getTemperature()\n cmd.inform('agc%d_stat=READY' % (n + 1))\n else:\n tempstr = '<%5.1f>' % self.cams[n].temp\n cmd.inform('agc%d_stat=BUSY' % (n + 1))\n cmd.inform('text=\"[%d] %s SN=%s status=%s temp=%s regions=%s bin=(%d,%d) expArea=%s\"'\n % (n + 1, self.cams[n].devname, self.cams[n].devsn,\n self.cams[n].getStatusStr(), tempstr, self.cams[n].regions,\n self.cams[n].hbin, self.cams[n].vbin, self.cams[n].expArea))\n else:\n cmd.inform('agc%d_stat=ABSENT' % (n + 1))",
"def get_step_info(self, command_name):\n r = requests.get(url=ENDPOINT+\"step/\"+command_name)\n data = r.json()\n rospy.logdebug(\"get_step_info request gave : {}\".format(data))\n return data",
"def run_supported_commands(self):\n\n if(self.supported_commands == [] or self.supported_commands is None):\n logger.error('No commands to apply!')\n return -1\n elif(self.connection is None):\n logger.error('Connection to the car not found!')\n return -2\n else:\n\n output = []\n output.append('\\r\\n')\n output.append('-----------------------------------------------\\n')\n output.append(f\"Data retrieved on: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\\n\")\n output.append('-----------------------------------------------\\n')\n\n ## Run commands one by one\n for cmd in self.supported_commands:\n\n try: \n response = self.connection.query(obd.commands[cmd[0]])\n\n if(response.is_null()):\n output.append(f\"[{cmd[0]}] => None\\r\\n\") \n else:\n output.append(f\"[{cmd[0]}] => {response.value}\\r\\n\")\n \n time.sleep(0.1)\n except Exception as inst:\n logger.error(f\"Exception: {str(inst)}\")\n\n output_str = ''.join(output)\n io.write_to_file(self.output_file, \"a+\", output_str)\n return 0",
"def commands(self):\n return self.dataset.commands",
"def GetCommandRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_command(command):\n cursor = database.get_db().cursor()\n return cursor.execute('SELECT command, admin_only, response FROM commands where command = ?', (command, )).fetchone()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
polling for multiple events e.g. [Command.CHANGED_LEVEL, Command.CHANGED_LEVEL2] returns tuple (event_type, event_type_data) or (None, None)
|
def poll_multiple_events(event_type_list):
rcv = udp.poll()
if rcv is not None:
for event_type in event_type_list:
if bytes_startswith(rcv, event_type):
x = loads(rcv[len(event_type):])
return (event_type, x)
return (None, None)
|
[
"def events(self):\n if self.connection_active:\n if self.buff_out:\n event = select.POLLOUT\n else:\n event = select.POLLIN\n else:\n event = select.POLLOUT\n\n return event",
"def get_result_events(self, event_type=None, response_pos=0):\n event_names = ['INFO', 'WARNING', 'ERROR']\n response = self.responses[response_pos]\n if event_type is None:\n return response['events'] \\\n if 'events' in response and response['events'] else []\n elif event_type in event_names:\n return [event for event in response['events'] if event['type'] == event_type] \\\n if 'events' in response and response['events'] else []\n else:\n msg = 'Argument \"type\" must be one of the following values: \"{}\"'\n raise ValueError(msg.format(', '.join(event_names)))",
"def check_for_event(self):\r\n a=self.read_chat(0)\r\n event=False\r\n finmes=\"\"\r\n next=False\r\n for m in a:\r\n if next==True:\r\n finmes=m\r\n break\r\n\r\n elif \"event\" in m:\r\n event=True\r\n next=True\r\n\r\n\r\n if event==True:\r\n finmes+=\" \"\r\n t1=finmes[finmes.find(\"Type\")+5:-1]\r\n\r\n self.write_to_chat(t1)\r\n\r\n t2=finmes[finmes.find(\"type\")+5:-1]\r\n self.write_to_chat(t2)\r\n\r\n for i in range(5):\r\n self.write_to_chat(t2)\r\n sleep(0.8)\r\n self.write_to_chat(t1)\r\n sleep(0.8)\r\n\r\n return True\r\n\r\n else:\r\n return False",
"def handle_event(event):\n for (_, states) in get_registered().items():\n data = states.get(event, None)\n if data is None:\n continue\n handle_single(data)",
"async def manage_events(self, events: Optional[List[List[str]]] = None) -> Union[list, None]:\n\n # Build the request data\n url: str = self.EVENTS_URL % self.server\n data: dict = {'id': self.client_id}\n\n # If events weren't selected, get them\n if not events:\n events = await self._request(url, data)\n\n # If we got events back\n if isinstance(events, list):\n\n # Handle the events and return their results\n # TODO Allow underscores, case insensitivity for method names in handler\n return [self.loop.create_task(self._handle_event(event)) for event in events]\n\n # If we got a dict back, there was a system error & we didn't get anything back at all\n if isinstance(events, dict):\n await self._handle_event(['systemError', events])",
"def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n if any(key in event[\"text\"].lower() for key in keywords) and not \"thread_ts\" in event:\n message = event[\"text\"]\n #print(event)\n return message, event[\"channel\"], \"\"\n elif any(key in event[\"text\"].lower() for key in keywords):\n message = event[\"text\"]\n #print(event)\n return message, event[\"channel\"], event[\"thread_ts\"]\n else:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == bot_id:\n return message, event[\"channel\"], event[\"thread_ts\"]\n \n return None, None, None",
"def handle_poll_event(self, event):\n pass",
"def getEvents(self):\n while True:\n response = requests.get(self.longPollBaseUrl, self.longPollPayload)\n jsonResponse = json.loads(response.text)\n logger.debug('Get response from longPoll - {0}'.format(jsonResponse))\n\n if 'ts' not in jsonResponse:\n self._setUpLongPoll()\n continue\n\n self._updateTs(jsonResponse['ts'])\n yield jsonResponse['updates']",
"def test_get_multi_run_events(self):\n pass",
"def fetch_event(self):\n\n event = json.loads(requests.post(server + '/events', data={'id': self.client_id}).text)[0]\n\n if event[0] == 'gotMessage':\n return (Event.GOTMESSAGE, event[1])\n\n if event[0] == 'strangerDisconnected':\n return (Event.STRANGERDISCONNECTED, None)\n\n if event[0] == 'typing':\n return (Event.TYPING, None)\n\n return event",
"def parse_bot_commands(slack_events):\n for event in slack_events:\n if event['type'] == \"message\" and \"subtype\" not in event:\n user_id, message = parse_direct_mention(event['text'])\n if user_id == Hal9000_id:\n return message, event['channel']\n return(None, None)",
"def EventChecker():\n eventList = []\n global nosepoke\n if GPIO.input(nose_poke) and nosepoke == 0:\n eventList.append('nosepoke on')\n nosepoke = 1\n if GPIO.input(nose_poke) == False and nosepoke == 1:\n eventList.append('nosepoke off')\n nosepoke = 0\n if GPIO.input(left_in):\n eventList.append('left screen')\n if GPIO.input(right_in):\n eventList.append('right screen')\n Record(eventList)\n return()",
"async def process_events(self, events: List[EventData]):\n pass",
"def test_on_multiple_handlers():\n\n obs = Observable()\n nose.assert_false(obs.events)\n\n results = []\n\n def some_test(*args, **kw):\n results.append(1)\n\n def some_test_2(*args, **kw):\n results.append(2)\n\n obs.on('some_test', some_test, some_test_2)\n nose.assert_equals(len(obs.events['some_test']), 2)\n\n obs.trigger('some_test')\n nose.assert_equals(results, [1,2])",
"def process_events(self):\n# print \"in FuncEventQueue.process_events()\"\n while self.events:\n event = self.events.pop(0)\n\n print 'processing event, type = ' + str(event['type'])\n\n\n # TODO - remove\n if event['type'] == 'CLIENT_LEFT':\n client_id = event['data']['clientID']\n player = self.blakes7.players.client_id_to_player(client_id)\n\n # TODO - remove player or set status to inactive,\n # if has a user/pass\n if player:\n player['clientID'] = None\n player['clientData'] = None\n player['status'] = 'INACTIVE'\n\n # remove from clients list\n self.blakes7.players.client_left(client_id)\n\n # send broadcast message of new player list\n self.blakes7.players.broadcast_player_list()",
"def testCheckLastTwoEvents(self):\n event_tester = EventTester()\n event1 = Event()\n event2 = Event()\n event3 = Event()\n event_tester.notify(event1)\n event_tester.notify(event2)\n event_tester.notify(event3)\n self.assertEqual([event1, event2], event_tester.last_n_events(2))",
"def do_events(self, arg):\n\n event_types = arg.upper().split()\n\n if 'CLEAR' in event_types:\n del self._received_events[:]\n return format('cleared event backlog', *STANDARD_OUTPUT)\n\n return '\\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)])",
"def _read_event(self):\n for event in self._fanotify.read_events():\n self._put_event(event)",
"def get_events(data):\n\n return data[\"events\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the Authorization header is generated 61 seconds in the past, then a 401 is returned
|
def test_if_61_seconds_in_past_401_returned(api_client):
past = timezone.now() - datetime.timedelta(seconds=61)
with freeze_time(past):
auth = auth_sender().request_header
response = api_client.get(
reverse('activity-stream'),
content_type='',
HTTP_AUTHORIZATION=auth,
HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
error = {'detail': 'Incorrect authentication credentials.'}
assert response.json() == error
|
[
"def test_if_61_seconds_in_past_401_returned(api_client):\n past = datetime.datetime.now() - datetime.timedelta(seconds=61)\n with freeze_time(past):\n auth = _auth_sender().request_header\n response = api_client.get(\n reverse('activity-stream:activity-stream'),\n content_type='',\n HTTP_AUTHORIZATION=auth,\n HTTP_X_FORWARDED_FOR='1.2.3.4, 123.123.123.123',\n )\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n error = {'detail': 'Incorrect authentication credentials.'}\n assert response.json() == error",
"def test_status_code_code_for_empty_authorization_value(self):\n\n resp = HttpResponse()\n http_response = resp. get_http_reponse()\n expected_status_code = 401\n received_status_code = http_response.status_code\n self.assertEqual(expected_status_code, received_status_code)",
"def auth_oidc_req(self):\n\n current_time = time.time()\n if self.token_json and self.token_time:\n if current_time - self.token_time < 30:\n print(\"Warning: token was requested less than 30 seconds ago. Will not renew this time.\")\n return\n \n self.token_time = current_time\n token_req_data = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'audience': self.audience\n }\n ret = requests.post(cern_api_url, data=token_req_data, verify=self.cert_verify, proxies=self.proxies)\n if ret.status_code!=200:\n raise Exception(\"Unable to acquire OAuth token: \" + ret.content.decode())\n\n self.token_json = json.loads(ret.content)\n self.token_headers = {'Authorization':'Bearer ' + self.token_json[\"access_token\"], 'content-type':'application/json'}",
"def challenge():\n return Response(\n status=401, headers={\n \"WWW-Authenticate\": \"Basic realm=\\\"Dummy API\\\"\"\n })",
"def test_get_with_wrong_auth(self):\n response = self.client.get(\n '/api/v1/restock/',\n HTTP_AUTHORIZATION='Token {}'.format(Faker().pyint())\n )\n\n # Verify access denied\n self.assertEqual(response.status_code, 401)",
"def test_cron_decorator_unauthed_user(self):\n response = wsgi.get_response('/t_cron')\n self.assertEqual(401, response.status_int)",
"def test_expired_token_failing_jwt_auth(self):\n payload = utils.jwt_payload_handler(self.user)\n payload[\"exp\"] = 1\n token = utils.jwt_encode_handler(payload)\n\n auth = \"Bearer {0}\".format(token)\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n expected_error = [\"Signature has expired.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)",
"def test_get_random_number_invalid_auth(self, *_):\n response = self.client.open(\n '/api/v1/random_number',\n method='GET',\n headers={'Authorization': b'foo'},\n content_type='application/json')\n self.assert403(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def ensure_globus_authorized(func):\n @functools.wraps(func)\n def do_reauth(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except globus_sdk.AuthAPIError as e:\n # older versions of the globus_sdk use raw_text, which is now deprecated\n response_text = getattr(e, \"text\", \"\") or getattr(e, \"raw_text\", \"\")\n if e.http_status == 400 and \"invalid_grant\" in response_text:\n print(\"Globus login has expired.\")\n get_refresh_token_authorizer(force_reauth=True)\n return func(*args, **kwargs)\n\n return do_reauth",
"def unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 401)",
"def test_call_raises_invalid_oauth_token_error_when_401_and_auth_header(self):\n error_code = 401\n resp_headers = {'WWW-Authenticate': ''}\n canvas_error = self.make_retry_call_with_error_code(\n error_code, max_retries=1, response_headers=resp_headers)\n\n self.assertIs(type(canvas_error), InvalidOAuthTokenError)",
"def token_error():\n \n return (jsonify({'error': 'authentication required'}), 401, {'WWW-Authenticate': 'Bearer realm=\"Authentication Required\"'})",
"def check_authorization(self):\n self.token",
"def _verify_auth(self, resp, *args, **kwargs):\n if resp.status_code == 401:\n raise errors.AuthFailure(\n 'Received response code 401 from {} {}.'\n .format(resp.request.method, resp.request.path_url)\n )",
"def test_task_decorator_unauthed_user(self):\n response = wsgi.get_response('/t_task')\n self.assertEqual(401, response.status_int)",
"def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)",
"def challenge(self):\n realm = current_app.config['BASIC_AUTH_REALM']\n return Response(\n status=401,\n headers={'WWW-Authenticate': 'Basic realm=\"%s\"' % realm}\n )",
"def challenge( self, request, response, **kw ):\n # If browser is coming back with auth, yet we are still challenging\n # that means there is insufficient privs.\n if request._auth and request._auth.startswith(self.auth_scheme):\n return False\n response.addHeader('WWW-Authenticate', self.auth_scheme)\n response.addHeader('Connection', 'keep-alive')\n response.setStatus(401)\n m = \"<strong>You are not authorized to access this resource.</strong>\"\n response.setBody(m, is_error=1)\n return True",
"def test_invalid_token_failing_jwt_auth(self):\n auth = \"Bearer abc123\"\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n\n expected_error = [\"Error decoding signature.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)",
"def unauthorized(error):\n return render_template('401.html'), 401"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the input matrix by adding column headers and padding matrix with 0s to keep it a perfect square
|
def _pad_matrix(self):
for row in self.matrix:
row.insert(0, 0)
column_headers = []
for j in range(len(self.matrix[0])):
if j == 0:
# insert header node
column_headers.append('H')
else:
# insert column headers
column_headers.append(f'C{j}')
self.matrix.insert(0, column_headers)
|
[
"def pad_matrix(M):\n m, n = len(M), len(M[0])\n b = 1\n while b < max(m, n):\n b <<= 1\n M += [[0] * n for _ in range(b - m)]\n for i in range(b):\n M[i] += [0] * (b - n)\n return M",
"def pad_matrix(self, matrix, pad_value=0):\n max_columns = 0\n total_rows = len(matrix)\n for row in matrix:\n max_columns = max(max_columns, len(row))\n total_rows = max(max_columns, total_rows)\n new_matrix = []\n for row in matrix:\n row_len = len(row)\n new_row = row[:]\n if total_rows > row_len:\n # Row too short. Pad it.\n new_row += [0] * (total_rows - row_len)\n new_matrix += [new_row]\n while len(new_matrix) < total_rows:\n new_matrix += [[0] * total_rows]\n return new_matrix",
"def pad_matrix(mat, max_size):\n size = mat.shape[0]\n assert size <= max_size\n if size == max_size:\n return mat\n pad_size = max_size - size\n zs1 = jnp.zeros([size, pad_size], dtype=mat.dtype)\n zs2 = jnp.zeros([pad_size, size], dtype=mat.dtype)\n eye = jnp.eye(pad_size, dtype=mat.dtype)\n mat = jnp.concatenate([mat, zs1], 1)\n mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0)\n return mat",
"def update_cols(self):\n self.cols = []\n\n # Iterate through the list of lists and append the element to the appropriate list.\n for x in range(self.row_num):\n i = 0\n for y in self.rows[x]:\n if x == 0:\n self.cols.append([])\n self.cols[i].append(y)\n i += 1\n self.col_num = len(self.cols)",
"def unit_matrix(size):\n #TODO\n result = Matrix()\n for y in range(size):\n result.rows.append([])\n for y in result.rows:\n for x in range(size):\n result.append(0)\n for y in range(size):\n result.rows[y][y] = 1\n return result",
"def pad_array(matrix, value):\n for row in matrix:\n row.append(value)\n return matrix",
"def __create_matrix(self):\n self.matrix = []\n for _ in range(self.size):\n line = []\n for __ in range(self.size):\n line.append(' ')\n self.matrix.append(line)",
"def zeroPadImage(im, marg0, marg1):\n\t# Pad first side (zeros before first column):\n\ttmp = zeros(im[:,0].shape) # Get a column of zeros\n\ttmp = tile(tmp, (marg1,1) ) # Replicate the column the right number of times\n\tim = concatenate( (tmp.T,im), axis=1) # Concatenate tmp before the image\n\n\t# Pad second side (zeros after last column):\n\ttmp = zeros(im[:,im.shape[1]-1].shape) # Get a column of zeros\n\ttmp = tile(tmp, (marg1,1)) # Replicate the column the right number of times\n\tim = concatenate( (im,tmp.T), axis=1) # Concatenate tmp after the image\n\n\t# Pad third side (zeros before first row):\n\ttmp = zeros(im[0,:].shape) # Get a row of zeros\n\ttmp = tile(tmp, (marg0,1)) # Replicate the row the right number of times\n\tim = concatenate( (tmp,im), axis=0) # Concatenate tmp before the image\n\n\t# Pad fourth side (zeros after last row):\n\ttmp = zeros(im[im.shape[0]-1,:].shape) # Get a row of zeros:\n\ttmp = tile(tmp, (marg0,1)) # Create a tmp matrix replicating the row the right number of times\n\tim = concatenate( (im,tmp), axis=0) # Concatenate tmp after the image\n\n\treturn im",
"def pad(self, matrices, pad_value):\n shapes = [m.shape for m in matrices]\n M, N = sum([s[0] for s in shapes]), sum([s[1] for s in shapes])\n zeros = torch.FloatTensor(np.zeros((M, N))).to(self.device)\n pad_matrices = pad_value + zeros\n i, j = 0, 0\n for k, matrix in enumerate(matrices):\n m, n = shapes[k]\n pad_matrices[i:i+m, j:j+n] = matrix\n i += m\n j += n\n return pad_matrices",
"def set_col_zero(mat: List[List[int]], j: int) -> None:\n m = len(mat)\n for i in range(m):\n mat[i][j] = 0",
"def zero_bias(matrix):\n matrix[:,0] = 0\n return(matrix)",
"def mat_padding(orig_k, ret_size, \n x_shift=0, y_shift=0):\n ret_k = np.zeros((ret_size, ret_size))\n temp_k = np.zeros((ret_size, ret_size))\n #_mat_shape_check(orig_k)\n temp_k[0:orig_k.shape[0],0:orig_k.shape[1]] = orig_k\n ret_k[y_shift:ret_size,:] = temp_k[0:ret_size-y_shift,:]\n ret_k[0:y_shift,:] = temp_k[ret_size-y_shift:ret_size,:]\n temp_k[:] = ret_k\n ret_k[:,x_shift:ret_size] = temp_k[:,0:ret_size-x_shift]\n ret_k[:,0:x_shift] = temp_k[:,ret_size-x_shift:ret_size]\n #_ret_fft = np.fft.fft2(ret_k)\n #ret_fft_real = _ret_fft.real\n #ret_fft_imag = _ret_fft.imag\n #printf('ret_k:\\n{}',ret_k)\n #printf('fft_real:\\n{}\\nfft_imag:\\n{}',ret_fft_real,ret_fft_imag)\n return ret_k",
"def update(self):\n self._header.set(\"NAXIS1\", self.data._raw_itemsize, after=\"NAXIS\")\n self._header.set(\"NAXIS2\", self.data.shape[0], after=\"NAXIS1\")\n self._header.set(\"TFIELDS\", len(self.columns), after=\"GCOUNT\")\n\n self._clear_table_keywords()\n self._populate_table_keywords()",
"def add_column(matrix):\n mCopy = copy.deepcopy(matrix)\n for item in mCopy:\n item.append(0)\n print mCopy",
"def EditMatrix(s1, s2):\n n1 = len(s1)\n n2 = len(s2)\n matrix = np.zeros((n1+1, n2), dtype=int)\n matrix = np.concatenate((np.arange(n1+1).T[:,np.newaxis], matrix), axis=1)\n matrix[0,:] = np.arange(n2+1)\n return matrix",
"def _compute_padded(self, refresh: bool = False):\n if not (refresh or self._points_padded is None):\n return\n\n self._normals_padded, self._features_padded = None, None\n if self.isempty():\n self._points_padded = torch.zeros((self._N, 0, 3), device=self.device)\n else:\n self._points_padded = struct_utils.list_to_padded(\n self.points_list(),\n (self._P, 3),\n pad_value=0.0,\n equisized=self.equisized,\n )\n normals_list = self.normals_list()\n if normals_list is not None:\n self._normals_padded = struct_utils.list_to_padded(\n normals_list,\n (self._P, 3),\n pad_value=0.0,\n equisized=self.equisized,\n )\n features_list = self.features_list()\n if features_list is not None:\n self._features_padded = struct_utils.list_to_padded(\n features_list,\n (self._P, self._C),\n pad_value=0.0,\n equisized=self.equisized,\n )",
"def pad_with_zeros(n, num_of_rows_in_data, dft):\n\n half_zero_pad_size = -0.5 * (num_of_rows_in_data - n)\n half_zero_pad_size = int(half_zero_pad_size)\n\n half_zero_pad = np.zeros([half_zero_pad_size, 1])\n return np.concatenate((half_zero_pad, dft, half_zero_pad))",
"def set_row_zero(mat: List[List[int]], i: int) -> None:\n n = len(mat[0])\n mat[i] = [0] * n",
"def fill_zeros(heading):\n\theading_np = heading.detach().cpu().numpy()\n\theading_pd = pd.DataFrame(heading_np)\n\theading_pd = heading_pd.replace(to_replace=0, method=\"ffill\").replace(to_replace=0, method=\"bfill\")\n\treturn torch.from_numpy(heading_pd.values).to(heading) \n\t\n#\tprint(heading_pd)\n#\tinput(\"pause..\")\n\t\"\"\"\n\tneighbors = heading_np.shape[1]\n\tslen = heading_np.shape[0]\n\tfor n in range(neighbors):\n\t\tif not (heading_np[:,n]==0).any():\n\t\t\tcontinue\n\t\tidx = np.arange(slen)\n\t\tidx[heading_np[:,n]==0]=0\n\t\tidx = np.maximum.accumulate(idx,axis=0)\n\t\tprint(idx)\n\t\theading_np[:,n] = heading_np[idx,n]\n\t\tprint(heading_np) \n\t\tif (heading_np[:,n]==0).any():\n\t\t\tidx = np.arange(slen)\n\t\t\tidx[heading_np[:,n]==0]=0\n\t\t\tidx = np.minimum.accumulate(idx[::-1],axis=0)\n\t\t\tprint(idx)\n\t\t\theading_np[:,n] = heading_np[idx[::-1],n]\n\t\"\"\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts all column headers and cells with 1s to Nodes
|
def _create_nodes(self):
nodes = []
for i in range(len(self.matrix)):
for j in range(len(self.matrix[i])):
value = self.matrix[i][j]
# Nothing to do
if value == 0:
continue
node = None
# convert all 1's to DancingNode
if value == 1:
node = DancingNode(value)
# convert all column headers to ColumnNode
if value != 1 and value != 0:
node = ColumnNode(value)
node.row_id = i
node.column_id = j
nodes.append(node)
self.matrix[i][j] = node
return nodes
|
[
"def _initialize(self, matrix: List[List[int]], column_names: Optional[Iterable[AnyStr]] = None) -> None:\n if not matrix:\n return\n\n if column_names is None:\n num_columns = len(matrix[0])\n if num_columns <= 26:\n column_names = (chr(ord('A') + i) for i in range(num_columns))\n else:\n column_names = (str(i + 1) for i in range(num_columns))\n\n # create the column list headers\n prev_column = self.root\n for column_name in column_names:\n column = Column(name=column_name, left=prev_column)\n prev_column.right = column\n prev_column = column\n prev_column.right = self.root\n self.root.left = prev_column\n\n # create the nodes\n prev_row_nodes = {column: column for column in self.traverse_right(self.root)}\n for i, row in enumerate(matrix):\n node = None\n prev_col_node = None\n for column, value in zip(self.traverse_right(self.root), row):\n if value == 1:\n node = Node(column)\n prev_row_node = prev_row_nodes[column]\n node.up = prev_row_node\n prev_row_node.down = node\n prev_row_nodes[column] = node\n if prev_col_node is not None:\n node.left = prev_col_node\n prev_col_node.right = node\n prev_col_node = node\n if node is not None:\n if node.left is None:\n first = node\n else:\n first = node.left\n while first.left is not None:\n first = first.left\n node.right = first\n first.left = node\n\n for column, node in prev_row_nodes.items():\n node.down = column\n column.up = node",
"def _parse_nodes(self):\n self.column_width = int((len(self.raw_maze) - ((self.num_columns + 1) * self.wall_thickness)) / self.num_columns)\n self.row_height = int((len(self.raw_maze[0]) - ((self.num_rows + 1) * self.wall_thickness)) / self.num_rows)\n\n curr_row, curr_column = 0, 0\n previous_left_index = None\n previous_above_indexes = [None] * self.num_columns # Store list of previous vertical nodes\n self._update_location(curr_column, curr_row)\n while curr_row < self.num_rows:\n curr_column = 0\n self._update_location(curr_column, curr_row)\n while curr_column < self.num_columns:\n walls = self._get_walls(self.location[\"x\"], self.location[\"y\"])\n \"\"\"\n If the current cell has no top/bottom walls it is a Node, as it will need to connect to something later. \n \"\"\"\n if (not walls[\"top\"] or not walls[\"bottom\"]) or not (walls[\"left\"] and walls[\"right\"]):\n curr_index = (curr_column, curr_row)\n new_node = node.Node(curr_index, self.location)\n if previous_left_index:\n # Connect to the node to the left\n new_node.left = previous_left_index\n # Inform that node that you're its neighbor (doubly-linked-list)\n self.maze[previous_left_index].right = curr_index\n # Set yourself as the previous horizontal neighbor\n previous_left_index = curr_index\n\n if previous_above_indexes[curr_column]:\n # Connect to the node above\n new_node.above = previous_above_indexes[curr_column]\n # Inform that node that you're its neighbor (doubly-linked-list)\n self.maze[previous_above_indexes[curr_column]].below = curr_index\n # Set yourself as the previous vertical neighbor in this column\n previous_above_indexes[curr_column] = curr_index\n self.maze[(curr_column, curr_row)] = new_node\n\n if walls[\"right\"]:\n # Overwrite the previous horizontal neighbor if there are walls\n previous_left_index = None\n if walls[\"bottom\"]:\n # Overwrite the previous vertical neighbor if there are walls\n previous_above_indexes[curr_column] = None\n\n # Special cases for start and stop \"neighbors\"\n if curr_row == 0 and not walls[\"top\"]:\n self.maze[(curr_column, curr_row)].neighbors[\"start\"] = \"start\"\n if curr_row == self.num_rows - 1 and not walls[\"bottom\"]:\n self.maze[(curr_column, curr_row)].neighbors[\"end\"] = \"end\"\n\n curr_column += 1\n self._update_location(curr_column, curr_row)\n curr_row += 1\n self._update_location(curr_column, curr_row)\n self._remove_deadends()",
"def _create_links_between_nodes(self, nodes):\n for node in nodes:\n node.left = self._get_left(node.row_id, node.column_id)\n node.right = self._get_right(node.row_id, node.column_id)\n\n # header node does not need up or down links\n if node.value != 'H':\n node.up = self._get_up(node.row_id, node.column_id)\n node.down = self._get_down(node.row_id, node.column_id)\n\n # create reference to column header\n if node.value == 1:\n node.column_header = self._get_column_header(node.column_id)\n node.column_header.size += 1",
"def compress(self):\n self.nodes = numpy.zeros([self.bounds[0] / 10 + 10, self.bounds[1] / 10 + 10], dtype='uint8')\n\n for row_index, row in enumerate(self.nodes):\n for node_index, node in enumerate(row):\n begin_x = node_index * 10\n begin_y = row_index * 10\n if numpy.count_nonzero(self.grid[begin_y:begin_y + 10, begin_x:begin_x + 10]): # temp fix by adding 10 nodes of wiggle room\n self.nodes[node_index][row_index] = 1",
"def create_nodes_from_data_frame(tx, df, labels=None):\n create_nodes(tx, df.itertuples(index=False, name=None),\n labels=labels, keys=df.keys())",
"def _prep_node_data(node_data):\n data = node_data \\\n if isinstance(node_data, pd.core.frame.DataFrame) \\\n else pd.read_csv(node_data)\n\n (left, right) = data.columns\n return pd.concat([data[left], data[right]], keys=['left', 'right'])",
"def dt_cell_node_iter(self):\n for c in self.DT.valid_cell_iter():\n yield [self.vh_info[n] for n in self.DT.cells['nodes'][c,:3]]",
"def __skeleton_nodes(self, data3d_skel, kernel=None):\n\n if kernel is None:\n kernel = np.ones([3, 3, 3])\n\n mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel\n\n nodes = (mocnost > 3).astype(np.int8)\n terminals = ((mocnost == 2) | (mocnost == 1)).astype(np.int8)\n\n data3d_skel[nodes == 1] = 2\n data3d_skel[terminals == 1] = 3\n # maybe swap next two lines\n data3d_skel = self.__skeleton_nodes_aggregation(data3d_skel)\n data3d_skel = self.__remove_terminal_nodes_in_neghborhood_of_the_branching_node(data3d_skel)\n\n return data3d_skel",
"def _transform_non_hierarchical(self):\n if self.non_hierarchical_cols is None:\n return\n \n for col in self.non_hierarchical_cols:\n if is_numeric_dtype(self.data[col]):\n self.data[col] = self.data[col].astype(str)\n \n main_values = self.data[col].value_counts()[:self.max_non_hierarchical_classes].index\n self.data.loc[~self.data[col].isin(main_values), col] = \"others\"\n \n self.data[col] = self.data[col].astype(str)\n self.data[col] = self.data[col].str.lower()\n self.data[col] = self.data[col].str.strip()\n\n for value in self.data[col].unique():\n new_name = f\"{col}_{value}\"\n self.data[new_name] = 0\n self.data.loc[self.data[col] == value, new_name] = 1\n \n self.data = self.data.drop(col, axis=1)",
"def createnodes(self):\n i = 0\n for j in range(0, self.width):\n for k in range(0, self.height):\n n = node.Node(j, k, i)\n self.nodes[int(n.guid)] = n\n i += 1\n for nds in self.nodes:\n self.nodes[nds].walkable = True\n self.nodes[nds].neighbors = helpers.get_neighbors(self.nodes[nds], self)",
"def get_blanknodes(nodes):\n\n blanknodes = []\n for node in nodes:\n if is_blanknode(node):\n blanknodes.append(node)\n # print(\"**: %s\" % node)\n # else:\n # print(\"node: %s\" % node)\n return blanknodes",
"def _make_nodes(self, section):\n\t\tnodes = VariableTree.TreeNode(\"Nodes\")\n\t\tfor node in section:\n\t\t\tnodes.childs.append(self._make_node(node))\n\t\tself._var_tree.childs.append(nodes)",
"def node_label_matrix(word_node, classes=list(), label_data_set_dir=\"\", label_file_name=\"\"):\n node_label_matrix_dict = dict()\n length = len(classes)\n print(\"TOTAL NUMBER OF RAW NODE: {}\".format(len(word_node)))\n\n name, boxes, labels_xml = read_content(str(label_data_set_dir / label_file_name))\n # efficiently check whether nodes are in bounding boxes?\n for index, box in enumerate(boxes):\n # each box contains >= 1 words\n result_temp = list()\n if len(word_node) > 0:\n scanned_node = list()\n # [xmin, ymin, xmax, ymax]\n for index_n, node in enumerate(word_node):\n if box[0] < node.center_x < box[2] and box[1] < node.center_y < box[3]:\n\n # print(node.word, labels_xml[index])\n # set the label of the node\n # using a one-hot encoding way\n one_hot_class = [0] * length\n one_hot_class[classes.index(labels_xml[index])] = 1\n node_label_matrix_dict[node.id] = list()\n node_label_matrix_dict[node.id] = one_hot_class\n else:\n result_temp.append(node)\n\n word_node = result_temp\n # handle the unlabelled nodes\n for index_r, node_remaining in enumerate(word_node):\n one_hot_class = [0] * length\n one_hot_class[classes.index('')] = 1\n node_label_matrix_dict[node_remaining.id] = list()\n node_label_matrix_dict[node_remaining.id] = one_hot_class\n\n return node_label_matrix_dict",
"def set_internal_host_nodes(node: tree.Node):\n if node.is_leaf():\n return\n set_internal_host_nodes(node.left_node)\n set_internal_host_nodes(node.right_node)\n node.layout.row = (node.left_node.layout.row + node.right_node.layout.row) / 2\n node.layout.x = node.layout.col # This can be scaled if desired\n node.layout.y = node.layout.row # This can be scaled if desired",
"def nodes(self):\n return self.nodes_spanned()",
"def _pad_matrix(self):\n for row in self.matrix:\n row.insert(0, 0)\n\n column_headers = []\n for j in range(len(self.matrix[0])):\n\n if j == 0:\n # insert header node\n column_headers.append('H')\n else:\n # insert column headers\n column_headers.append(f'C{j}')\n\n self.matrix.insert(0, column_headers)",
"def writeNodes(pos, nodefile, header_comment):\n nodesTotal = pos[0].__len__() * pos[1].__len__() * pos[2].__len__()\n\n NODEFILE = open(nodefile, 'w')\n NODEFILE.write(\"%s\\n\" % (header_comment))\n NODEFILE.write(\"*NODE\\n\")\n\n NodeID = 0\n for z in pos[2]:\n for y in pos[1]:\n for x in pos[0]:\n NodeID += 1\n NODEFILE.write(\"%i,%.6f,%.6f,%.6f\\n\" % (NodeID, x, y, z))\n NODEFILE.write(\"*END\\n\")\n NODEFILE.close()\n print(\"%i/%i nodes written to %s\" % (NodeID, nodesTotal, nodefile))",
"def node_table(nodes, field_names=()):\n\n fields = OrderedDict([\n ('HOSTNAME', lambda s: s.get('host', s.get('hostname'))),\n ('IP', lambda s: s.get('ip') or mesos.parse_pid(s['pid'])[1]),\n ('ID', lambda s: s['id']),\n ('TYPE', lambda s: s['type']),\n ('REGION', lambda s: s['region']),\n ('ZONE', lambda s: s['zone']),\n ])\n\n for field_name in field_names:\n if field_name.upper() in fields:\n continue\n if ':' in field_name:\n heading, field_name = field_name.split(':', 1)\n else:\n heading = field_name\n fields[heading.upper()] = _dotted_itemgetter(field_name)\n\n sortby = list(fields.keys())[0]\n tb = table(fields, nodes, sortby=sortby)\n tb.align['TYPE'] = 'l'\n return tb",
"def insert_all_nodes():\n with open(r'projectI_hetionet\\nodes.tsv')as tsvfile:\n tsvreader = csv.reader(tsvfile, delimiter=\"\\t\")\n tsv_headings = next(tsvreader)\n for line in tsvreader:\n aline = aline + 1\n if \"Anatomy\" in line:\n createAnatomy_node = f\"\"\"CREATE ( Anatomy : Anatomy {{ id : \"{line[0]}\", name : \"{line[1]}\", kind : \"{line[2]}\" }})\"\"\"\n a_query = f\"\"\"insert into hetionet.anatomy1(id, kind, name) values( '{line[0]}' , '{line[1]}' , '{line[2]}');\"\"\"\n cqlsh(a_query)\n insert_query(createAnatomy_node)\n\n if \"Compound\" in line:\n createCompound_node = f\"\"\"CREATE ( Compound : Compound {{ id : \"{line[0]}\", name : \"{line[1]}\", kind : \"{line[2]}\" }})\"\"\"\n c_query = f\"\"\"insert into hetionet.compound1(id, kind, name) values( '{line[0]}' , '{line[1]}' , '{line[2]}');\"\"\"\n cqlsh(c_query)\n insert_query(createCompound_node)\n\n if \"Disease\" in line:\n createDisease_node = f\"\"\"CREATE ( Disease : Disease {{ id : \"{line[0]}\", name : \"{line[1]}\", kind : \"{line[2]}\" }})\"\"\"\n d_query = f\"\"\"insert into hetionet.disease1(id, kind, name) values( '{line[0]}' , '{line[1]}' , '{line[2]}');\"\"\"\n cqlsh(d_query)\n insert_query(createDisease_node)\n\n if\"Gene\" in line:\n createGene_node = f\"\"\"CREATE ( Gene : Gene {{ id : \"{line[0]}\", name : \"{line[1]}\", kind : \"{line[2]}\" }})\"\"\"\n q_query = f\"\"\"insert into hetionet.gene1(id, kind, name) values( '{line[0]}' , '{line[1]}' , '{line[2]}');\"\"\"\n cqlsh(q_query)\n insert_query(createGene_node)\n\n print(\"Numbers of nodes => \", aline, \"\\nNodes inserted successfully!\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a link between nodes that are connected to the left, right, up and down. Additionally, each DancingNode is referenced to a ColumnNode
|
def _create_links_between_nodes(self, nodes):
for node in nodes:
node.left = self._get_left(node.row_id, node.column_id)
node.right = self._get_right(node.row_id, node.column_id)
# header node does not need up or down links
if node.value != 'H':
node.up = self._get_up(node.row_id, node.column_id)
node.down = self._get_down(node.row_id, node.column_id)
# create reference to column header
if node.value == 1:
node.column_header = self._get_column_header(node.column_id)
node.column_header.size += 1
|
[
"def __link_nodes(self):\n def __link_north(node):\n if node.x is 0:\n return\n\n pos = (node.x - 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] - 1, pos[1])\n\n def __link_south(node):\n if node.x is self.maze.height - 1:\n return\n\n try:\n pos = (node.x + 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] + 1, pos[1])\n except IndexError:\n return\n\n def __link_east(node):\n pos = (node.x, node.y + 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] + 1)\n\n def __link_west(node):\n pos = (node.x, node.y - 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] - 1)\n\n for node in self.graph.values():\n __link_south(node)\n __link_north(node)\n __link_east(node)\n __link_west(node)",
"def make_links(self, node0, node1):\r\n Link(node0, node1)\r\n Link(node1, node0)",
"def _parse_nodes(self):\n self.column_width = int((len(self.raw_maze) - ((self.num_columns + 1) * self.wall_thickness)) / self.num_columns)\n self.row_height = int((len(self.raw_maze[0]) - ((self.num_rows + 1) * self.wall_thickness)) / self.num_rows)\n\n curr_row, curr_column = 0, 0\n previous_left_index = None\n previous_above_indexes = [None] * self.num_columns # Store list of previous vertical nodes\n self._update_location(curr_column, curr_row)\n while curr_row < self.num_rows:\n curr_column = 0\n self._update_location(curr_column, curr_row)\n while curr_column < self.num_columns:\n walls = self._get_walls(self.location[\"x\"], self.location[\"y\"])\n \"\"\"\n If the current cell has no top/bottom walls it is a Node, as it will need to connect to something later. \n \"\"\"\n if (not walls[\"top\"] or not walls[\"bottom\"]) or not (walls[\"left\"] and walls[\"right\"]):\n curr_index = (curr_column, curr_row)\n new_node = node.Node(curr_index, self.location)\n if previous_left_index:\n # Connect to the node to the left\n new_node.left = previous_left_index\n # Inform that node that you're its neighbor (doubly-linked-list)\n self.maze[previous_left_index].right = curr_index\n # Set yourself as the previous horizontal neighbor\n previous_left_index = curr_index\n\n if previous_above_indexes[curr_column]:\n # Connect to the node above\n new_node.above = previous_above_indexes[curr_column]\n # Inform that node that you're its neighbor (doubly-linked-list)\n self.maze[previous_above_indexes[curr_column]].below = curr_index\n # Set yourself as the previous vertical neighbor in this column\n previous_above_indexes[curr_column] = curr_index\n self.maze[(curr_column, curr_row)] = new_node\n\n if walls[\"right\"]:\n # Overwrite the previous horizontal neighbor if there are walls\n previous_left_index = None\n if walls[\"bottom\"]:\n # Overwrite the previous vertical neighbor if there are walls\n previous_above_indexes[curr_column] = None\n\n # Special cases for start and stop \"neighbors\"\n if curr_row == 0 and not walls[\"top\"]:\n self.maze[(curr_column, curr_row)].neighbors[\"start\"] = \"start\"\n if curr_row == self.num_rows - 1 and not walls[\"bottom\"]:\n self.maze[(curr_column, curr_row)].neighbors[\"end\"] = \"end\"\n\n curr_column += 1\n self._update_location(curr_column, curr_row)\n curr_row += 1\n self._update_location(curr_column, curr_row)\n self._remove_deadends()",
"def _create_nodes(self):\n nodes = []\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[i])):\n value = self.matrix[i][j]\n\n # Nothing to do\n if value == 0:\n continue\n\n node = None\n\n # convert all 1's to DancingNode\n if value == 1:\n node = DancingNode(value)\n\n # convert all column headers to ColumnNode\n if value != 1 and value != 0:\n node = ColumnNode(value)\n\n node.row_id = i\n node.column_id = j\n nodes.append(node)\n self.matrix[i][j] = node\n\n return nodes",
"def createnodes(self):\n i = 0\n for j in range(0, self.width):\n for k in range(0, self.height):\n n = node.Node(j, k, i)\n self.nodes[int(n.guid)] = n\n i += 1\n for nds in self.nodes:\n self.nodes[nds].walkable = True\n self.nodes[nds].neighbors = helpers.get_neighbors(self.nodes[nds], self)",
"def switch_nodes(self):\n\n\t\t# Get current info\n\t\tnode_A=self.node_A\n\t\tport_A=self.port_A\n\t\tapp_id_A=self.app_id_A\n\t\tnode_B=self.node_B\n\t\tport_B=self.port_B\n\t\tapp_id_B=self.app_id_B\n\t\tDF=self.DF\n\n\t\t# Update\n\t\tself.node_A=node_B\n\t\tself.port_A=port_B\n\t\tself.app_id_A=app_id_B\n\t\tself.node_B=node_A\n\t\tself.port_B=port_A\n\t\tself.app_id_B=app_id_A\n\t\tif DF==0:\n\t\t\tself.DF=0\n\t\telif DF==1:\n\t\t\tself.DF=2\n\t\telif DF==2:\n\t\t\tself.DF=1\n\t\telse:\n\t\t\tlogging.warning(\"Unknown directionality flag\")\n\t\t\tself.DF=DF",
"def make_link(self, node0, node1):\r\n Link(node0, node1)",
"def _connect_nodes(self, node_a, node_b):\n self._graph[node_a].append(node_b)\n self._graph[node_b].append(node_a)",
"def connect(cell1, cell2):\n if cell1.pos.x == cell2.pos.x:\n if cell1.pos.y == cell2.pos.y + 1:\n cell1.down = cell2\n cell2.up = cell1\n elif cell1.pos.y == cell2.pos.y - 1:\n cell1.up = cell2\n cell2.down = cell1\n if cell1.pos.y == cell2.pos.y:\n if cell1.pos.x == cell2.pos.x + 1:\n cell1.left = cell2\n cell2.right = cell1\n elif cell1.pos.x == cell2.pos.x - 1:\n cell1.right = cell2\n cell2.left = cell1",
"def __node_place(self):\n\n for row in range(1, self.maze.height - 1):\n for column in range(1, self.maze.width - 1):\n if self.__rule_check(row, column):\n self.graph[str(row) + str(column)] = (Node(row, column))\n self.maze.paint_solved(column, row, (0, 255, 0))",
"def _closure_createlink(self):\n linkparents = self._closure_model.objects.filter(\n child__pk=self._closure_parent_pk\n ).values(\"parent\", \"depth\")\n linkchildren = self._closure_model.objects.filter(\n parent__pk=self.pk\n ).values(\"child\", \"depth\")\n newlinks = [self._closure_model(\n parent_id=p['parent'],\n child_id=c['child'],\n depth=p['depth']+c['depth']+1\n ) for p in linkparents for c in linkchildren]\n self._closure_model.objects.bulk_create(newlinks)",
"def link_nodes(self, node_list):\n for nd in node_list:\n if nd.name_ == \"SplitterNode\":\n self.sp_node_ = nd",
"def _move_words_to_links(self):\n\n visited = {self.initial_node.id}\n\n def visit_link(link):\n \"\"\"A function that is called recursively to move a word from the\n link end node to the link.\n \"\"\"\n end_node = link.end_node\n if hasattr(end_node, 'word'):\n if link.word is None:\n link.word = end_node.word\n else:\n raise InputError(\"SLF lattice contains words both in nodes \"\n \"and links.\")\n if end_node.id not in visited:\n visited.add(end_node.id)\n for next_link in end_node.out_links:\n visit_link(next_link)\n\n for link in self.initial_node.out_links:\n visit_link(link)\n\n for node in self.nodes:\n if hasattr(node, 'word'):\n del node.word",
"def _evaluate_link(self):\n\n #\n # Read and validate parameters\n #\n\n column_name = self.id\n\n definition = self.column_json\n\n main_table = self.table\n\n main_keys = definition.get('keys', [])\n if not all_columns_exist(main_keys, main_table.data):\n log.error(\"Not all key columns available in the link column definition.\".format())\n return\n\n linked_table_name = definition.get('linked_table', '')\n linked_table = self.table.workflow.get_table(linked_table_name)\n if not linked_table:\n log.error(\"Linked table '{0}' cannot be found in the link column definition..\".format(linked_table))\n return\n\n linked_keys = definition.get('linked_keys', [])\n if not all_columns_exist(linked_keys, linked_table.data):\n log.error(\"Not all linked key columns available in the link column definition.\".format())\n return\n\n #\n # 1. Create a column with index values in the target table with the name of the link column.\n #\n \"\"\"\n INFO:\n df['index1'] = df.index # Copy\n # Use df.reset_index for converting existing index to a column. After that, we AGAIN create an index.\n # The goal is to preserve the old index even if it is not a continuous range\n df.reset_index().set_index('index', drop=False)\n # Or\n df.reset_index(inplace=True)\n df.set_index('index', drop=False, inplace=True)\n # Or\n df = df.rename_axis('index1').reset_index() # New index1 column will be created\n \"\"\"\n\n index_column_name = '__row_id__' # It could be 'id', 'index' or whatever other convention\n linked_table.data[index_column_name] = linked_table.data.index\n\n #\n # 2. Create left join on the specified keys\n #\n\n linked_prefix = column_name+'::' # It will prepended to each linked (secondary) column name\n\n out_df = pd.merge(\n main_table.data, # This table\n linked_table.data.rename(columns=lambda x: linked_prefix + x, inplace=False), # Target table to link to. We rename columns (not in place - the original frame preserves column names)\n how='left', # This (main) table is not changed - we attach target records\n left_on=main_keys, # List of main table key columns\n right_on= [linked_prefix + x for x in linked_keys], # List of target table key columns. Note that we renamed them above so we use modified names.\n left_index=False,\n right_index=False,\n #suffixes=('', linked_suffix), # We do not use suffixes because they cannot be enforced (they are used only in the case of equal column names)\n sort=False # Sorting decreases performance\n )\n # Here we get linked column names like 'Prefix::OriginalName'\n\n #\n # 3. Rename according to our convention and store the result\n #\n\n # Rename our link column by using only specified column name\n out_df.rename({column_name+'::'+index_column_name: column_name}, axis='columns', inplace=True)\n\n out = out_df[column_name]\n\n # Store the result df with all target columns (in the case they are used in other definitions)\n #main_table.data = out_df\n # ??? If the result df includes all columns of this df, then why not to simply replace this df by the new df?\n # ??? What if this df already has some linked (tareget) columns from another table attached before?\n # ??? What if the target table already has linked (target) columns from its own target table (recursive)?\n\n return out",
"def order_nodes(self, uplink, downlinks):\n id_to_name = {}\n dl_map = {} # downlink -> uplink port\n for p in self.G.node[uplink][\"ports\"]:\n id_to_name[p[\"id\"]] = sorted(p[\"ports\"], key=split_alnum)[0]\n for dl in downlinks:\n for p in self.G.edges[uplink, dl][\"ports\"]:\n if p in id_to_name:\n dl_map[dl] = id_to_name[p]\n break\n return sorted(dl_map, key=lambda x: split_alnum(dl_map[x]))",
"def _setup_nodes(self):\n\n # Hard Coded connections based on indices.\n # ([Tiles], [Neighbors])\n Connection = namedtuple('Connection', ['tiles', 'neighbors'])\n connections = {\n 0: Connection([0], [3, 4]),\n 1: Connection([1], [4, 5]),\n 2: Connection([2], [5, 6]),\n 3: Connection([0], [0, 7]),\n 4: Connection([0, 1], [0, 1, 8]),\n 5: Connection([1, 2], [1, 2, 9]),\n 6: Connection([2], [2, 10]),\n 7: Connection([0, 3], [3, 11, 12]),\n 8: Connection([0, 1, 4], [4, 12, 13]),\n 9: Connection([1, 2, 5], [5, 13, 14]),\n 10: Connection([2, 6], [6, 14, 15]),\n 11: Connection([3], [7, 16]),\n 12: Connection([0, 3, 4], [7, 8, 17]),\n 13: Connection([1, 4, 5], [8, 9, 18]),\n 14: Connection([2, 5, 6], [9, 10, 19]),\n 15: Connection([6], [10, 20]),\n 16: Connection([3, 7], [11, 21, 22]),\n 17: Connection([3, 4, 8], [12, 22, 23]),\n 18: Connection([4, 5, 9], [13, 23, 24]),\n 19: Connection([5, 6, 10], [14, 24, 25]),\n 20: Connection([6, 11], [15, 25, 26]),\n 21: Connection([7], [16, 27]),\n 22: Connection([3, 7, 8], [16, 17, 28]),\n 23: Connection([4, 8, 9], [17, 18, 29]),\n 24: Connection([5, 9, 10], [18, 19, 30]),\n 25: Connection([6, 10, 11], [19, 20, 31]),\n 26: Connection([11], [20, 32]),\n 27: Connection([7], [21, 33]),\n 28: Connection([7, 8, 12], [22, 33, 34]),\n 29: Connection([8, 9, 13], [23, 34, 35]),\n 30: Connection([9, 10, 14], [24, 35, 36]),\n 31: Connection([10, 11, 15], [25, 36, 37]),\n 32: Connection([11], [26, 37]),\n 33: Connection([7, 12], [27, 28, 38]),\n 34: Connection([8, 12, 13], [28, 29, 39]),\n 35: Connection([9, 13, 14], [29, 30, 40]),\n 36: Connection([10, 14, 15], [30, 31, 41]),\n 37: Connection([11, 15], [31, 32, 42]),\n 38: Connection([12], [33, 43]),\n 39: Connection([12, 13, 16], [34, 43, 44]),\n 40: Connection([13, 14, 17], [35, 44, 45]),\n 41: Connection([14, 15, 18], [36, 45, 46]),\n 42: Connection([15], [37, 46]),\n 43: Connection([12, 16], [38, 39, 47]),\n 44: Connection([13, 16, 17], [39, 40, 48]),\n 45: Connection([14, 17, 18], [40, 41, 49]),\n 46: Connection([15, 18], [41, 42, 50]),\n 47: Connection([16], [43, 51]),\n 48: Connection([16, 17], [44, 51, 52]),\n 49: Connection([17, 18], [45, 52, 53]),\n 50: Connection([18], [46, 53]),\n 51: Connection([16], [47, 48]),\n 52: Connection([17], [48, 49]),\n 53: Connection([18], [49, 50])\n }\n\n # Setup nodes w/ tiles.\n for i in range(54):\n self.nodes[i].tiles = [self.tiles[j]\n for j\n in connections[i].tiles]\n\n # Connect nodes to each other\n for i in range(54):\n self.nodes[i].neighbors = [self.nodes[j]\n for j\n in connections[i].neighbors]",
"def dataArrangeLink(self):\n selectList = self.currentSelectionModel().uniqueBranches()\n children = []\n for node in selectList:\n children.extend(node.childList)\n fieldList = self.model.formats.commonFields(children)\n if not fieldList:\n QtGui.QMessageBox.warning(self.activeWindow, 'TreeLine',\n _('Cannot expand without common fields'))\n return\n linkField, ok = QtGui.QInputDialog.getItem(self.activeWindow,\n _('Link Field'),\n _('Select field with links '\n 'to parents'), fieldList,\n 0, False)\n if not ok:\n return\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n undo.BranchUndo(self.model.undoList, selectList)\n for node in selectList:\n node.arrangeByLink(linkField)\n self.updateAll()\n QtGui.QApplication.restoreOverrideCursor()",
"def link(self, node1, node2):\n if node1 != None and node2 != None:\n node1['next'] = node2\n node2['previous'] = node1\n elif node1 != None and node2 == None:\n node1['next'] = None\n elif node1 == None and node2 != None:\n node2['previous'] = None",
"def make_move(self, row1, col1, row2, col2):\n\n child = BirdsOfAFeatherNode.from_parent(self)\n\n child.prev_move = repr(child.grid[row1][col1]) + '-' + repr(child.grid[row2][col2])\n\n child.grid[row2][col2] = child.grid[row1][col1]\n\n child.grid[row1][col1] = None\n\n return child"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the column header of the node at column
|
def _get_column_header(self, column):
return self.matrix[0][column]
|
[
"def _get_header_column_letter(self, title):\n return self._to_letter(self._get_header_index(title))",
"def get_header_cell(self):\n return self.heading.center(self.width)[:self.width]",
"def _get_header_column_number(self, title):\n return self._get_header_index(title) + 1",
"def _get_column_name(self, column):\n return column",
"def as_dataframe_column_header(self):\n return f\"{self.period.tag} front-{self.front} {self.field.tag}\"",
"def fieldnames(self):\n column_headers = []\n with open(self.data, 'r+') as csv_file:\n # make csv_reader and get first row from iterator with next()\n csv_reader = csv.reader(csv_file)\n header_row = next(csv_reader)\n for column_header in header_row:\n column_headers.append(column_header)\n return column_headers",
"def name (self):\n return self._column;",
"def columns ( frame ) :\n names = [ str(c) for c in frame.GetColumnNames() ]\n if ( 6 , 16 ) <= root_info : \n names += [ str(c) for c in frame.GetDefinedColumnNames() ] \n return tuple ( sorted ( set ( names ) ) )",
"def _colHeader(strIn):\n return \" & \".join(strIn) + \"\\\\\\\\\\n\"",
"def _GetColumnHeaders(self):\n return [\n \"account_name\",\n \"transaction_date\",\n \"transaction_description\",\n \"transaction_amount\",\n \"category\",\n \"display_name\"\n ]",
"def _GetColumn(data, token):\n last_newline = data.rfind('\\n', 0, token.lexpos)\n if last_newline < 0:\n last_newline = 0\n column = token.lexpos - last_newline\n return column",
"def get_column_name(self):\r\n columns = list(self.all_data.columns)\r\n # Note: Excludes Year, Month, Day\r\n columns.remove(self._year)\r\n columns.remove(self._month)\r\n columns.remove(self._day_of_week)\r\n index = 1\r\n for col in columns:\r\n print(f'{index}. {col}')\r\n index += 1\r\n \r\n col_number = int(input('Please select column number: '))\r\n while col_number not in [1, 2, 3, 4]:\r\n col_number = int(input('Please select column number: '))\r\n return columns[ col_number - 1]",
"def get_header(self, taskmanager_id, generation_id, key):\n\n cols = [(x.split())[0] for x in SQLite3DB.tables.get(SQLite3DB.header_table)]\n return self._get_table_row(SQLite3DB.header_table, taskmanager_id,\n generation_id, key, cols)",
"def get_header(self, colwidth=18):\n kk = list(self.__dict__.keys())\n kk.sort()\n # build the format specifier\n fmt = '{{:<{:d}s}}'.format(colwidth)\n out = ''\n for key in kk:\n out += fmt.format(key)\n return out",
"def get_column_names(self):\n columns = list(self.table_content.keys())\n return columns",
"def getColumnName(self, columnIndex): \n return self.columnNames[columnIndex]",
"def _mheader(self):\n return self.get_info(0)[1]",
"def get_level0000_column_headers(self):\n try: self.level0000_column_headers\n except:\n self.set_level0000_standards()\n return self.level0000_column_headers",
"def get_column_cell_names(self, title, row=1):\r\n cells = self.get_column_cells(title, row)\r\n try:\r\n cell_names = [\"{}{}\".format(cell.column, cell.row)\r\n for cell in cells]\r\n return cell_names\r\n except Execption as traceback_error:\r\n statement = \"Problem building cell name from {} column\".format(title)\r\n error_logger.logger(statement, traceback_error)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Transform input .py file if provided, otherwise run simple_demo. Argument is assumed to be a syntactically valid Python module.
|
def main():
if len(sys.argv) < 2:
transform_module(EXAMPLE_BODY)
else:
module = sys.argv[1]
with open(transform_module, 'r') as f:
transform_module(f.read(), module)
|
[
"def runpy(self, name, contents):\n NAME = \"t_run\"\n f = open(\"%s.py\" % name, \"w\")\n f.write(contents)\n f.close()\n\n import importlib\n m = importlib.import_module(name)\n return m # the module instance",
"def main():\n # type: (str) -> None\n set_test_cache()\n fetch.DATA_SOURCES = config.data_sources\n\n if len(sys.argv) < 2:\n print(\"usage: python regression.py datafile\")\n sys.exit()\n try:\n if sys.argv[1].endswith('.json'):\n play_file(sys.argv[1])\n else:\n replay_file(sys.argv[1])\n sys.exit(0)\n except Exception as exc:\n traceback.print_exc()\n sys.exit(1)",
"def run(self):\n\n # Index for main file and key for main function\n file_index = 0\n function_key = \"0\"\n\n # All the code will start with 1 tab indent\n indent = 1\n\n # Source: https://www.mattlayman.com/blog/2018/decipher-python-ast/\n with open(self.script_path, \"r\") as py_source:\n tree = ast.parse(py_source.read())\n py_source.seek(0)\n all_lines = py_source.read().splitlines()\n\n analyzer = pyanalyzer.PyAnalyzer(self.output_files, all_lines)\n analyzer.analyze(tree.body, file_index, function_key, indent)\n\n self.apply_variable_types()\n self.ingest_comments(all_lines)\n self.write_cpp_files()",
"def loadPython(fstr, toSimple=True):\n\n if os.path.isfile(fstr):\n lp = LoadPython.loadFile(fstr)\n else: #it's a command\n lp = LoadPython.loadCommand(fstr)\n\n lp.doPrepare()\n if toSimple: lp.doTransform()\n return lp.getContent()",
"def run_from_file(filename, input_data = None, do_print = True):\n\twith open(filename) as f:\n\t\tcode = f.read()\n\treturn run(code, input_data = input_data, do_print = do_print)",
"def main(pyfile, overrides={}, initial_options=copy.copy(default_options), \r\n global_allowed_types=allowed_types):\r\n # Beware of passing by reference. We need to make copies of options as\r\n # Much as possible to avoid histerisis effects:\r\n options = copy.copy(initial_options)\r\n allowed_types = global_allowed_types.copy()\r\n\r\n # Options used to start the parsing:\r\n parsing_options = copy.copy(options)\r\n parsing_options._update_loose(overrides)\r\n # Slice the input file into code blocks\r\n block_list = code_hasher.iterblocks(pyfile)\r\n # FIXME: Need to deal with the script's options\r\n script_options = {}\r\n\r\n # Override the options given by the script by the command line switch\r\n script_options.update(overrides)\r\n # And now merge this to the default options (! this a not a dict)\r\n options._update_loose(script_options)\r\n options = guess_names_and_types(options, allowed_types=allowed_types)\r\n\r\n # Process the blocks\r\n output_list = execute_block_list(block_list, options)\r\n DEBUGwrite( output_list, 'output_list')\r\n\r\n open_outfile(options)\r\n \r\n output_list = shape_output_list(output_list, options)\r\n \r\n global compilers\r\n compiler = compilers.get(options.outtype, TexCompiler)(options)\r\n compiler.compile( output_list, options.outfile, options)",
"def main(pyfile, overrides={}, initial_options=copy.copy(default_options), \n global_allowed_types=allowed_types):\n # Beware of passing by reference. We need to make copies of options as\n # Much as possible to avoid histerisis effects:\n options = copy.copy(initial_options)\n allowed_types = global_allowed_types.copy()\n\n # Options used to start the parsing:\n parsing_options = copy.copy(options)\n parsing_options._update_loose(overrides)\n # Slice the input file into code blocks\n block_list = code_hasher.iterblocks(pyfile)\n # FIXME: Need to deal with the script's options\n script_options = {}\n\n # Override the options given by the script by the command line switch\n script_options.update(overrides)\n # And now merge this to the default options (! this a not a dict)\n options._update_loose(script_options)\n options = guess_names_and_types(options, allowed_types=allowed_types)\n\n # Process the blocks\n output_list = execute_block_list(block_list, options)\n DEBUGwrite( output_list, 'output_list')\n\n open_outfile(options)\n \n output_list = shape_output_list(output_list, options)\n \n global compilers\n compiler = compilers.get(options.outtype, TexCompiler)(options)\n compiler.compile( output_list, options.outfile, options)",
"def sample_module(\n # The input/output port are defined using the following 4 annotations.\n # Note that you need to register data type using\n # DataType.create_data_type(ws, 'MyDirectory', description=description, is_directory=True)\n # DataType.create_data_type(ws, 'MyFile', description=description, is_directory=False)\n # See https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.graph.datatype?view=azure-ml-py#create-data-type-workspace--name--description--is-directory--parent-datatypes-none-\n output_dir: OutputDirectory(type='MyDirectory'),\n output_file: OutputFile(type='MyFile'),\n input_dir: InputDirectory(type='MyDirectory') = None,\n input_file: InputFile(type='MyFile') = None,\n # The parameter with default values will be considered as annotated with such type,\n # Now we support the following 5 types: str, int, float, bool, enum\n str_param='abc',\n int_param=1,\n float_param=0.1,\n bool_param=False,\n enum_param=MyEnum.Enum0,\n # If the default value is None without annotation, it will be treated as str.\n none_param=None,\n):\n print(f\"Arg 'input_dir' = '{input_dir}', type='{type(input_dir)}'\")\n if input_dir:\n print(f\"Contents of input directory:\")\n print('\\n'.join(f.name for f in Path(input_dir).iterdir()))\n print(f\"Arg 'input_file' = {input_file}, type='{type(input_file)}'\")\n print(f\"Arg 'output_dir' = {output_dir}, type='{type(output_dir)}'\")\n print(f\"Arg 'output_file' = {output_file}, type='{type(output_file)}'\")\n print(f\"Arg 'str_param' = {str_param}, type='{type(str_param)}'\")\n print(f\"Arg 'int_param' = {int_param}, type='{type(int_param)}'\")\n print(f\"Arg 'float_param' = {float_param}, type='{type(float_param)}'\")\n print(f\"Arg 'bool_param' = {bool_param}, type='{type(bool_param)}'\")\n print(f\"Arg 'enum_param' = {enum_param}, type='{type(enum_param)}'\")\n print(f\"Arg 'none_param' = {none_param}, type='{type(none_param)}'\")\n\n data = str_param\n if input_file:\n with open(input_file, 'r') as fin:\n data = fin.read()\n print(\"Content of input file:\", data)\n if input_dir:\n shutil.copytree(input_dir, output_dir)\n else:\n os.makedirs(output_dir, exist_ok=True)\n with open(os.path.join(output_dir, \"test.txt\"), 'w') as fout:\n fout.write(data)\n with open(output_file, 'w') as fout:\n fout.write(data)",
"def run_sample():\n from autumn.projects.covid_19.mixing_optimisation.sample_code import run_sample_code\n\n run_sample_code()",
"def main():\n parser = create_parser()\n args = parser.parse_args()\n if args.version:\n print(\"{} version {}\".format(__prog_name__, __version__))\n return\n source = \"\".join(fileinput.input(args.input))\n tree = parse(source)\n json = export_json(tree, args.pretty)\n print(json)",
"def execute(self):\n if self._cli_arguments['cfn']:\n generate_sample_cfn_module(self.env_root)\n elif self._cli_arguments['sls']:\n generate_sample_sls_module(self.env_root)\n elif self._cli_arguments['sls-tsc']:\n generate_sample_sls_tsc_module(self.env_root)\n elif self._cli_arguments['stacker']:\n generate_sample_stacker_module(self.env_root)\n elif self._cli_arguments['tf']:\n generate_sample_tf_module(self.env_root)\n elif self._cli_arguments['cdk-tsc']:\n generate_sample_cdk_tsc_module(self.env_root)\n elif self._cli_arguments['cdk-py']:\n generate_sample_cdk_py_module(self.env_root)\n elif self._cli_arguments['cdk-csharp']:\n generate_sample_cdk_cs_module(self.env_root)",
"def _preprocess_in_module_mode():\n if len(sys.argv) <= 1:\n print(\"Please set filename\")\n print(\"example:\")\n print(\" $ python -m shape_commentator filename arg1 arg2\")\n exit()\n for i in range(len(sys.argv)-1):\n sys.argv[i] = sys.argv[i+1]\n del sys.argv[len(sys.argv)-1]",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='The file to process')\n parser.add_argument('-i', '--inplace', action='store_true',\n help='Process in place')\n args = parser.parse_args()\n\n convert_file(args.file, args.inplace)\n\n return 0",
"def run_suite():\n print(\"*Input*\")\n raw_text = \"\" # the to-be-encrypted/decrypted text \n if(type_is_file()):\n filename = prompt_for_filename()\n while(not os.path.exists(filename)):\n filename = prompt_for_filename()\n raw_text = read_from_file(filename).upper()\n else:\n raw_text = input(\"Enter the string to encrypt or decrypt: \").upper()\n\n print(\"*Transform*\")\n operation = transformation_type()\n tool = encryption_method() \n message = transform_text(raw_text, operation, tool)\n print(\"...\")\n\n print(\"*Output*\") \n if(type_is_file()):\n write_to_file(prompt_for_filename(), message)\n else:\n print(message)",
"def main():\n\n # file-specific constants\n section_header = 'Python Scikit-learn Models'\n table_header_list = ['Model Name', 'Model Description', 'Data Name',\n 'Data Description', 'Performance Metric 1',\n 'Performance Metric 2']\n\n # determine output markdown filename from current filename\n current_path = re.split(r'[\\\\/]', inspect.getfile(inspect.currentframe()))\n current_fname_prefix = current_path[-1].split('.')[0]\n out_txt_fname = current_fname_prefix + '.txt'\n\n # run benchmark models\n models = run_models()\n\n # generate markdown\n gen_table_md(models, section_header, table_header_list, out_txt_fname)",
"def main():\n description = 'Generate \"Lorem ipsum\" text'\n args = _parser(description).parse_args()\n print(generate(**vars(args)))",
"def test_cli_compiles_source_file(monkeypatch):\n params = [\"overreact\", \"--compile\", \"data/ethane/B97-3c/model.k\"]\n monkeypatch.setattr(\"sys.argv\", params)\n cli.main()",
"def XXXtest_hello_py(self):\n PYNAME = \"t_hello\"\n SRC = \\\n\"\"\"\nprint(\"## hello from python ##\")\n\"\"\"\n self.runpy(PYNAME, SRC)",
"def sample_programs(dsl):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
send invitation to phone number. confirmation code is deterministic based on team info
|
def post(user):
data = request.get_json()
try:
number = parse(data["phone_number"], "US")
except NumberParseException:
message = "The number supplied does not seem to be valid. Please try again."
print(message)
return make_response(jsonify({"message": message}), 400)
number = f"+{number.country_code}{number.national_number}"
# generate a confirmation code
team = db.session.query(Team).filter(Team.id == data["team_id"]).one()
code = encode(team)
# format message
message = f"{user.username} invited you to join their team {team.name} on the Bricks app."
# send message to number with Twilio
recipient = {"phone_number": number}
send_message(recipient, message)
send_message(recipient, "Download the app here: https://itunes.apple.com/us/app/stack-a-brick/id1456194944#?platform=iphone")
send_message(recipient, "Use this code to join their team:")
send_message(recipient, code)
# add invitation to db
invitation = Invitation(
user = user,
team = team,
invitee_phone = number,
code = code
)
db.session.add(invitation)
db.session.commit()
db.session.close()
message = f"Invitation sent to {number}"
return make_response(jsonify({"message": message}), 200)
|
[
"def send_invite(recipient, recipient_name, sender, sender_name, base_url, id):\n\n url = base_url.strip('/') + '/' + id\n invite_msg = \"\"\"\nDear {recp_name}:\n\n{sender_name} is inviting you to use Minion ({url}). Minion is a security testing framework \\\nbuilt by Mozilla to bridge the gap between developers and security testers. Once you signup,\nyou can scan your projects and receive friendly web security assessment.\n\nThank you.\n\nSincerely,\nSecurity Assurance Team at Mozilla\n\n\"\"\".format(recp_name=recipient_name, sender_name=sender_name, url=url)\n\n config = backend_config()\n smtp = config['invitation']\n subject = \"{sender_name} is inviting you to use Minion!\".format(sender_name=sender_name)\n\n # we have the option to send this invitation \n # via user's email (admin's own account) or\n # the email account specified by the config.\n # This option allows us to send invite by any\n # user in the future (if we wish to enabled that).\n # For now, we can assume admin's persona account\n # is passed.\n if sender is None:\n fromaddr = smtp['sender']\n else:\n fromaddr = sender\n toaddrs = ', '.join((recipient,))\n invite_msg = invite_msg.format(recp=recipient, url=url)\n body = (\"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\n%s\"\n %(fromaddr, toaddrs, subject, invite_msg))\n server = smtplib.SMTP(smtp['host'], smtp['port'])\n server.sendmail(fromaddr, toaddrs, body)\n server.quit()",
"def send_code(self, request, verification_obj, method):\n try:\n verification_obj.send_code(method)\n except TwilioRestException:\n msg = 'Sending verification code failed for phone number {}.'.format(\n verification_obj.phone)\n self.message_user(request, msg, messages.WARNING)\n else:\n msg = 'Phone verification code for phone number {} will be delivered by {}'.format(\n verification_obj.phone, method.capitalize())\n self.message_user(request, msg, messages.SUCCESS)",
"def resend_code():\n messagebox.showinfo(\"OTP Resent\", \"New OTP has been sent. Please, check on your mobile phone.\")\n send_code()",
"def invite_code(self) -> str | None:\n if self.type == Type.Individual and self.is_valid():\n invite_code = _invite_hex_sub(f\"{self:32x}\")\n split_idx = len(invite_code) // 2\n return invite_code if split_idx == 0 else f\"{invite_code[:split_idx]}-{invite_code[split_idx:]}\"",
"def send_verification_text(self, country_code=settings.TWILIO_US_COUNTRY_CODE):\n get_authy_client().phones.verification_start(\n self.phone_number,\n country_code,\n via=TWILIO_VIA_SMS\n )\n if settings.DEBUG:\n print(\"DEBUG MODE ON; NO VERIFICATION MESSAGE SENT\")",
"def send_reservation_confirm(\n to_email, to_name, date_reservation, restaurant_name, number_seat\n):\n subject = \"Reservation confirmed\"\n body = (\n \"Hi {toName},<br>\"\n \"we are glad to confirm your table for {numberSeat} people \"\n 'at restaurant \"{restaurantName}\" in date {dateReservation}<br> '\n \"<br>See you soon!<br> \"\n )\n body = body.replace(\"{toName}\", to_name)\n body = body.replace(\"{restaurantName}\", restaurant_name)\n body = body.replace(\"{dateReservation}\", date_reservation)\n body = body.replace(\"{numberSeat}\", str(number_seat))\n send_email(subject, body, to_email)",
"def send_moji_sms_invitation(actor_member_id, emoji, invitee_sms_number, when):\n LOGGER.info(\"Event received: send_moji_sms_invitation(%s, %s, %s, %s)\",\n actor_member_id, emoji, invitee_sms_number, when)\n\n recipient = validate_sms_number(invitee_sms_number)\n if not recipient:\n LOGGER.debug(\"SMS target '%s' is invalid\", invitee_sms_number)\n return\n\n sending_member = member.models.Member.objects.get(pk=actor_member_id)\n if not sending_member:\n LOGGER.debug(\"Actor '%s' does not exist\", actor_member_id)\n return\n\n # TODO - Check datetime\n\n # TODO: Localize\n template = Template(settings.MOJI_SMS_INVITE_TEXT)\n message = template.substitute(senderName=sending_member.name,\n emoji=emoji)\n result = send_unicode_message(recipient, message)\n LOGGER.info(\"Event handled: send_moji_sms_invitation(%s, %s) %s\",\n recipient, message, result)",
"def send_invitation_email(self, *args, **kwargs):\n\n if not self.invitation_key_expired():\n subject, email = self.invitation_email_message(*args, **kwargs)\n self.mail_guest(subject, email)\n if not self.sent_invite:\n self.sent_invite = True\n self.save()",
"def test_invitation_email(self):\n self.beta_valid.invite()\n self.assertEqual(len(mail.outbox), 2)\n self.assertTrue(self.beta_valid.invited)",
"def generate_invite_code(self, user, num=3):\n result_list = []\n for i in range(num):\n code = code_generator(size=settings.INVITE_CODE_SIZE)\n invitation = self.model(owner=user,invite_code=code)\n invitation.save()\n result_list.append(invitation)\n return result_list",
"def SendMailVerificationCode(send_to):\n sent_from = settings.EMAIL_USER\n to = [send_to]\n subject = 'Verification code [Accommodating]'\n length = 6\n verify_sample = random.sample(init_chars, length)\n verification_code = ''.join(verify_sample)\n body = f\"Here is your verification code!\"\n msg = EmailMessage()\n email_text = f\"\"\" Hi,\n {body}\n\n {verification_code}\n \"\"\"\n msg.set_content(email_text)\n msg['Subject'] = subject\n msg['From'] = sent_from\n msg['To'] = send_to\n try:\n if settings.EMAIL_SERVER_TYPE == 'SSL':\n server = smtplib.SMTP_SSL(settings.EMAIL_SERVER, settings.EMAIL_SERVER_PORT)\n else:\n server = smtplib.SMTP(settings.EMAIL_SERVER, settings.EMAIL_SERVER_PORT)\n server.ehlo()\n server.login(settings.EMAIL_USER, settings.EMAIL_PASSWORD)\n server.send_message(msg)\n server.close()\n return verification_code\n except:\n return None",
"def test_sign_up_with_invitation_token(self):\n pass",
"async def inv(self, ctx):\n invite = await self.bot.create_invite(ctx.message.server)\n await self.bot.say(invite)",
"def sendForPeerReview(intent_request):\r\n\r\n #Intent fulfillment\r\n slots = get_slots(intent_request)\r\n source = intent_request['invocationSource']\r\n\r\n applicationNumber = slots['applicationNumber']\r\n peer = {}\r\n peer['firstName'] = slots['peerFirstName'].capitalize()\r\n peer['lastName'] = slots['peerLastName'].capitalize()\r\n\r\n applicationNumberVal = validate_applicationNumber(applicationNumber)\r\n if not applicationNumberVal['isValid']:\r\n slots[applicationNumberVal['violatedSlot']] = None\r\n\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n applicationNumberVal['violatedSlot'],\r\n applicationNumberVal['message'])\r\n\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n peerVal = validate_peer(peer['firstName'],peer['lastName'])\r\n if not peerVal['isValid']:\r\n slots[peerVal['violatedSlot']] = None\r\n\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n peerVal['violatedSlot'],\r\n peerVal['message'])\r\n\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n application = applicationsRead.getDetails(applicationNumber,'pullUpEverything')\r\n\r\n if ('y' in application['details']) and (application['details']['y'] is not None):\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Done! I\\'ve sent application number {} to your colleague {} for a review.'.format(applicationNumber,peer['firstName'])})\r\n elif ('y' not in application['details']):\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Application number {} does not seem to be evaluated for a risk score yet. Are you sure you want to send it to your colleague {} for a review?'.format(applicationNumber,peer['firstName'])})\r\n else:\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Sorry, I could not send application {} to {}.'.format(applicationNumber,peer['firstName'])})",
"def test_sendInstantInvitation(pk: str = CONTACT_PK, channel_id: str = CHANNEL_ID) -> json:\r\n\r\n # Action\r\n status, result = u.sendInstantInvitation(pk, channel_id,\r\n \"Python invite description\", \"Python invite comment\")\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)",
"def validate_phone_number(driver):\n code = input(\"Enter The code you got on your phone : \")\n code_input_element = driver.find_element(By.NAME, \"OTP\")\n code_validate_button = driver.find_element(\n By.CLASS_NAME, \"ms-Button-label\")\n code_input_element.send_keys(str(code))\n code_validate_button.click()",
"def do_invite(self, args):\n enter = Enter()\n acct1 = enter.account_name(1)\n if not db.get_user_token(acct1):\n enter.new_account(acct1)\n acct2 = enter.account_name(0)\n per = enter.percentage(acct1)\n ratio = enter.ratio(acct1, acct2, per, 1)\n dur = enter.duration()\n memoid = db.add_invite(acct1, acct2,\n per, ratio, dur)\n if memoid:\n msg.message('An invite has been created. To '\n + 'authorize this exchange and to send '\n + 'the invite please send any amount of '\n + 'SBD to @steem-ax along with the following '\n + 'memo message. Your SBD will be forwarded '\n + 'to the invitee:\\n\\n '\n + '{}:start'.format(memoid))\n else:\n msg.message(\"An invite could not be created.\")",
"async def _send_confirmation(\n ctx: Context,\n on_success: str,\n reminder_id: str | int\n ) -> None:\n embed = discord.Embed(\n description=on_success,\n colour=discord.Colour.green(),\n title=random.choice(POSITIVE_REPLIES)\n )\n\n footer_str = f\"ID: {reminder_id}\"\n\n embed.set_footer(text=footer_str)\n\n await ctx.send(embed=embed)",
"def send_confirm_challenge_mail(self):\n\n # TODO: better solution here. Maybe use sites framework?\n # domain = self.request.META['HTTP_HOST']\n domain = \"localhost:8000\"\n link = (\n \"https://\"\n + domain\n + \"/confirm/\"\n + self.__class__.__name__\n + \"/\"\n + self.confirmation_token\n )\n\n context = {\"confirmation_link\": link}\n msg = render_to_string(self.template_name, context)\n\n send_mail(\n \"Bekreft XYZ stemme\",\n msg,\n \"webkom@nabla.ntnu.no\",\n [self.username + \"@stud.ntnu.no\"],\n fail_silently=False,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.