query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Retrieve the most recent rebalance submission's approval status
|
def get_rebalance_approval_status(self) -> str:
last_approval = GsIndexApi.last_rebalance_approval(self.id)
return get(last_approval, 'status')
|
[
"def _calculate_approval(self):\n from reviewboard.extensions.hooks import ReviewRequestApprovalHook\n\n approved = True\n failure = None\n\n if self.shipit_count == 0:\n approved = False\n failure = 'The review request has not been marked \"Ship It!\"'\n elif self.issue_open_count > 0:\n approved = False\n failure = 'The review request has open issues.'\n elif self.issue_verifying_count > 0:\n approved = False\n failure = 'The review request has unverified issues.'\n\n for hook in ReviewRequestApprovalHook.hooks:\n try:\n result = hook.is_approved(self, approved, failure)\n\n if isinstance(result, tuple):\n approved, failure = result\n elif isinstance(result, bool):\n approved = result\n else:\n raise ValueError('%r returned an invalid value %r from '\n 'is_approved'\n % (hook, result))\n\n if approved:\n failure = None\n except Exception as e:\n extension = hook.extension\n logger.error('Error when running ReviewRequestApprovalHook.'\n 'is_approved function in extension \"%s\": %s',\n extension.id, e, exc_info=True)\n\n self._approval_failure = failure\n self._approved = approved",
"async def status_for_repo_last_pr(*, github_access_token, repo_info, release_pr):\n if release_pr:\n if repo_info.project_type == LIBRARY_TYPE:\n if release_pr.open:\n return LIBRARY_PR_WAITING_FOR_MERGE\n else:\n labels = {\n label.lower()\n for label in await get_labels(\n github_access_token=github_access_token,\n repo_url=repo_info.repo_url,\n pr_number=release_pr.number,\n )\n }\n for label in BLOCKER_LABELS:\n if label.lower() in labels:\n return label.lower() if release_pr.open else None\n\n if not release_pr.open and WAITING_FOR_CHECKBOXES.lower() in labels:\n # If a PR is closed and the label is 'waiting for checkboxes', just ignore it\n # Maybe a user closed the PR, or the label was incorrectly updated\n return None\n\n for label in RELEASE_LABELS:\n if label.lower() in labels:\n return label.lower()\n\n return None",
"def get_ap_policy_approval(self, apgrp_name = 'System Default', ap_model = 'zf7982'):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_POINT)\n\n checkbox = self.info['loc_cfg_appolicy_allow_all_checkbox']\n approval = self.s.is_checked(checkbox)\n \n max_clients = ap_group.get_ap_model_max_client_by_name(self, apgrp_name, ap_model)\n return {'approval':approval, 'max_clients':max_clients}",
"def update_approval_status(request):\n resp, success = {}, False\n status, user = request.GET.get(\"status\"), request.GET.get(\"user\")\n remarks = request.GET.get(\"remarks\")\n if status and user and remarks:\n try:\n approval = HRManagement.objects.get(id=int(user))\n approval.approval_status = int(status)\n approval.remarks = remarks\n approval.approved_by = request.user\n approval.save()\n isu = InterviewSchedule.objects.get(\n candidate=approval.resume,\n resume_status=4)\n isu.status = status\n isu.save()\n\n success = True\n except (HRManagement.DoesNotExist,\n InterviewSchedule.DoesNotExist,\n AttributeError):\n approval = None\n resp[\"success\"] = success\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")",
"def approval_failure(self):\n if not hasattr(self, '_approval_failure'):\n self._calculate_approval()\n\n return self._approval_failure",
"def latest_activity(self):\n if self.approved_date is not None and self.approved_date > self.date:\n return {\n \"translation\": self,\n \"date\": self.approved_date,\n \"user\": self.approved_user,\n \"type\": \"approved\",\n }\n else:\n return {\n \"translation\": self,\n \"date\": self.date,\n \"user\": self.user,\n \"type\": \"submitted\",\n }",
"def pending_reviews(self):\n pending = QUORUM\n comments = self.get_comments()\n for comment in comments:\n username = comment['user']['login']\n if (approve_regex.search(comment['body'])\n and (username in QUORUM_USERS or len(QUORUM_USERS) == 0)):\n pending = pending - 1\n return pending",
"def view_status():\n pending_names = SHEET.worksheet('pending').col_values(1)\n approved_names = SHEET.worksheet('approved').col_values(1)\n rejected_names = SHEET.worksheet('rejected').col_values(1)\n validate_payroll_num()\n if name in pending_names:\n print(colored('Your application is currently under review.',\n 'cyan', attrs=['bold']))\n print(colored('Please contact HR if 5 days have passed since your '\n 'application was submitted\\n', 'cyan',\n attrs=['bold']))\n exit()\n elif name in approved_names:\n print('\\nYour application has been approved\\n')\n exit()\n elif name in rejected_names:\n print(colored('Unfortunately your application has been rejected',\n 'cyan', attrs=['bold']))\n print(colored('Please speak to HR for further information\\n', 'cyan',\n attrs=['bold']))\n exit()\n else:\n print(colored('\\nYour application has not been received', 'cyan',\n attrs=['bold']))\n print('\\nWould you like to submit an application?')\n apply = input('Please enter Y to calculate your reduncancy '\n 'or any other key to exit:\\n')\n if apply.lower() == 'y':\n calculate_redundancy()\n else:\n staff_logout()\n exit()",
"def approval_program(self) -> MaybeValue:\n return AppParam.approvalProgram(self._app)",
"def approval_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PatchBaselineApprovalRuleArgs']]]]:\n return pulumi.get(self, \"approval_rules\")",
"def overallstatus(self):\n return self._overallstatus",
"def get_approved(self):\n return self.filter(verified=True, blacklisted=False,\n flags__lte=SyllabusFlag.LIMIT)",
"def get_security_status(self):\n return self.latest_status",
"def last_submitted_report(self, submission_bpk_private='ignore'):\n assert self.ensure_one(), _(\"last_submitted_report() works only for one record at a time!\")\n\n # HINT: Since imported donation reports have no submission_id we can not use it in the domain!\n domain = [('submission_env', '=', self.submission_env),\n ('bpk_company_id', '=', self.bpk_company_id.id),\n ('partner_id', '=', self.partner_id.id),\n ('meldungs_jahr', '=', self.meldungs_jahr),\n ('state', '!=', False),\n ('state', 'not in', ['new', 'skipped', 'disabled', 'error']),\n #('submission_id', '!=', False),\n #('submission_id_datetime', '!=', False),\n ('id', '!=', self.id)]\n\n # ATTENTION: If submission_bpk_private is set the lsr FOR THIS BPK will be returned!\n if submission_bpk_private != 'ignore':\n domain += [('submission_bpk_private', '=', submission_bpk_private)]\n\n # ATTENTION: Make sure the inverse order is used for the XML record generation in the submission!\n # HINT: Since imported donation reports have no submission_id_datetime we can not use it for the sort order!\n # lsr = self.sudo().search(domain,\n # order=\"submission_id_datetime DESC, anlage_am_um DESC, create_date DESC\",\n # limit=1)\n lsr = self.sudo().search(domain,\n order=\"anlage_am_um DESC, create_date DESC\",\n limit=1)\n\n # Return the empty record set if no lsr was found\n if not lsr:\n return lsr\n\n # Check that submission_id and submission_id_datetime are set for non imported donation reports\n if not lsr.imported:\n if not lsr.submission_id or not lsr.submission_id_datetime:\n raise ValidationError(_(\"Non imported and submitted donation report (ID %s) is not linked to a valid\"\n \"donation report submission!\") % lsr.id)\n\n # Return the lsr also on an ERR-U-008\n # ATTENTION: ERR-U-008 error means that there was already an 'Erstmeldung' for this donation report\n # e.g.: if the customer did a manual 'Spendenmeldung' in the FinanzOnline Website\n if lsr.state == 'response_nok' and 'ERR-U-008' in (lsr.response_error_code or ''):\n return lsr\n\n # Return the lsr also on an ERR-U-006 AND ERR-007\n # ATTENTION: ERR-U-006/7 error means that the lsr was an \"Aenderungsmeldung\" but that there was no previous\n # donation report with for the RefNr (submission_refnr) of the Aenderungsmeldung\n # (or the ZR or Env changed). This may only happen if donation reports where submitted by other\n # systems or we have a bug so that we calculated RefNr of the Aenderungsmeldung instead of taking\n # it from the former lsr.\n # HINT: This may also happen on cancellation reports if a donation report is missing in FinanzOnline\n if lsr.state == 'response_nok' and any(\n ecode in (lsr.response_error_code or '') for ecode in ('ERR-U-006', 'ERR-U-007')):\n return lsr\n\n # ATTENTION: If the state is 'submitted' or 'unexpected_response' we do not know if the lsr donation report\n # was accepted by FinanzOnline or not! Therefore we throw an exception!\n if lsr.state != \"response_ok\":\n raise ValidationError(_(\"Submitted donation report (ID %s) is in state '%s' but should be \"\n \"in state 'response_ok'.\") % (lsr.id, lsr.state))\n\n # An lsr was found with state 'response_ok'\n return lsr",
"def status_pick_up(self):\n return self._status_pick_up",
"def grant_status(grant_duration: DateRange) -> GrantStatus:\n if grant_duration.upper_inf:\n return GrantStatus.Active\n\n if grant_duration.upper > date.today():\n return GrantStatus.Active\n\n return GrantStatus.Closed",
"def merge_requests_author_approval(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"merge_requests_author_approval\")",
"def initial_tag_review_status(feature_type):\n if feature_type == core_enums.FEATURE_TYPE_INCUBATE_ID:\n return core_enums.REVIEW_PENDING\n return core_enums.REVIEW_NA",
"def posts_queued_for_approval(self):\n\n query = \"\"\"select post_url from managerPostApproval\n where platform = ? and username = ? and approved = 0 and sent_request = 1\n \"\"\"\n \n return [p[0] for p in self.db.query(query, (self.platform, self.username))]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cancel the most recent rebalance submission Usage Cancel the basket's most recent rebalance submission if it has not yet been approved Examples Cancel the basket's most recent rebalance submission >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.cancel_rebalance() See also
|
def cancel_rebalance(self) -> Dict:
return GsIndexApi.cancel_rebalance(self.id, CustomBasketsRebalanceAction.default_instance())
|
[
"def request_cancel(self, *args, **kwargs) -> None:\n self.connection.request_cancel_workflow_execution(self.domain.name, self.workflow_id, run_id=self.run_id)",
"def cancel_request(self, requestid):\n # TODO: return to SedmDb.py because of how much sql \"understanding\" it requires?\n self.db.update_request({'id': requestid, 'status': 'CANCELED'})\n # cancel the associated atomicrequests\n # TODO: allow more nuanced update function inputs (e.g. add a where_dict)?\n self.db.execute_sql(\"UPDATE atomicrequest SET status='CANCELED' WHERE request_id='%s'\" % (requestid,))\n return (0, \"Request canceled\")",
"def do_cancel(self, args):\n acct = Enter().account_name(1)\n memoid = Enter().memo_id(acct)\n if not db.verify_memoid(acct, memoid):\n return\n if db.cancel(acct, memoid):\n msg.message(\"The exchange has been canceled\")",
"def action_cancel(self):\n # TDE DUMB: why is cancel_procuremetn in ctx we do quite nothing ?? like not updating the move ??\n if any(move.state == 'done' for move in self):\n raise UserError(_('You cannot cancel a stock move that has been set to \\'Done\\'.'))\n\n procurements = self.env['procurement.order']\n for move in self:\n if move.reserved_quant_ids:\n move.quants_unreserve()\n if self.env.context.get('cancel_procurement'):\n if move.propagate:\n pass\n # procurements.search([('move_dest_id', '=', move.id)]).cancel()\n else:\n if move.move_dest_id:\n if move.propagate and move.move_dest_id.state!='done':\n move.move_dest_id.action_cancel()\n elif move.move_dest_id.state == 'waiting':\n # If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)\n move.move_dest_id.write({'state': 'confirmed'})\n if move.procurement_id:\n procurements |= move.procurement_id\n\n self.write({'state': 'cancel', 'move_dest_id': False})\n if procurements:\n procurements.check()\n return True",
"async def cancel(ctx: commands.Context):\n actives = state[\"active-pickbans-by-user\"]\n process = actives.get(ctx.author)\n if not process:\n await ctx.send(\n \"You do not have an active pick/ban process. Start one with the `pickban` command.\"\n )\n return\n captain1, captain2 = process[\"captains\"]\n actives.pop(captain1, None)\n actives.pop(captain2, None)\n await ctx.send(\n \"Cancelled pick/ban process for {} and {}.\".format(\n captain1.mention, captain2.mention\n )\n )",
"def cancel(self,\n headers=None,\n **query_parameters):\n return self.cancel_backup(\n headers=headers,\n **query_parameters\n )",
"def cancel_bancor_fee(self, cancel_bancor_fee):\n\n self._cancel_bancor_fee = cancel_bancor_fee",
"def cancel(self, user):\n for moderation_request in self.moderation_requests.filter(is_active=True):\n moderation_request.update_status(\n action=constants.ACTION_CANCELLED,\n by_user=user,\n message=_(\"Cancelled collection\"),\n )\n self.status = constants.CANCELLED\n self.save(update_fields=[\"status\"])",
"async def cancel_order(self, **params):\r\n return await self.client_helper(\"cancel_order\", **params)",
"def cancel(self):\n if self._jobid == -1:\n return\n\n os_ext.run_command('scancel %s' % self._jobid,\n check=True, timeout=settings.job_submit_timeout)\n self._is_cancelling = True\n self.wait()",
"def cancel(self, data):\n process_cancelling(self, data['user'], data['text'])\n\n if not self.shift.is_started:\n postponed_applications = self.shift.applications.filter(\n state=ApplicationStateEnum.POSTPONED)\n\n for application in postponed_applications:\n application.renew()\n application.save()\n\n if self.state == ApplicationStateEnum.APPROVED:\n return ApplicationStateEnum.CANCELLED\n\n return ApplicationStateEnum.FAILED",
"def cancel(self):\n payload = {\n \"command\": \"cancel\",\n \"options\": {}\n }\n self.log('INFO', 'Cancel workflow (instance ID: {})...'.format(self.instanceId))\n rsp = self.rest_put(self.uri+\"/action\", payload)\n\n if rsp.get('status', None) != 202:\n raise Exception('Cancel workflow (instance ID: {}) fail, http status: {}, response: {}'.\n format(self.instanceId, rsp.get('status', None), rsp.get('text', '')))\n else:\n self.log('INFO', 'Cancel workflow (instance ID: {}) done'.\n format(self.instanceId))",
"async def futures_cancel_order(self, **params):\r\n return await self.client_helper(\"futures_cancel_order\", **params)",
"def cancel(self, _id):\n connection_factory = factory.connection_factory(self.connection_factory_type)\n try:\n with connection_factory.get_connection() as client:\n _filter = {\"_id\": ObjectId(_id)}\n update = {\"state\": BasketState.CANCELED.name}\n document = client.farmers.basket.find_one_and_update(_filter,\n {\"$set\": update},\n return_document=ReturnDocument.AFTER)\n if document:\n return True\n except errors.DuplicateKeyError as duplicate_key_error:\n self.logger.error(duplicate_key_error)\n return False\n self.logger.error(\"Could not set basket to canceled state!\")\n return False",
"def cancel_subtasks(self, analysis_pk):\n\n from .models import Analysis\n analysis = Analysis.objects.get(pk=analysis_pk)\n _now = timezone.now()\n\n subtask_qs = analysis.sub_task_statuses.filter(\n status__in=[\n AnalysisTaskStatus.status_choices.PENDING,\n AnalysisTaskStatus.status_choices.QUEUED,\n AnalysisTaskStatus.status_choices.STARTED]\n )\n\n for subtask in subtask_qs:\n task_id = subtask.task_id\n status = subtask.status\n logger.info(f'subtask revoked: analysis_id={analysis_pk}, task_id={task_id}, status={status}')\n if task_id:\n self.app.control.revoke(task_id, terminate=True, signal='SIGTERM')\n self.update_state(task_id=task_id, state='REVOKED')\n subtask_qs.update(status=AnalysisTaskStatus.status_choices.CANCELLED, end_time=_now)",
"def send_cancel(self) -> None:\n\n state = self.get_current_state()\n if state != \"CANCELING\" and state not in TERMINAL_STATES:\n # If it's not obvious we shouldn't cancel, cancel.\n\n # If we end up in CANCELING but the workflow runner task isn't around,\n # or we signal it at the wrong time, we will stay there forever,\n # because it's responsible for setting the state to anything else.\n # So, we save a timestamp, and if we see a CANCELING status and an old\n # timestamp, we move on.\n self._store.set(\"cancel_time\", get_iso_time())\n # Set state after time, because having the state but no time is an error.\n self._store.set(\"state\", \"CANCELING\")",
"def cancel_subnet(self, subnet_id):\n subnet = self.get_subnet(subnet_id, mask='id, billingItem.id')\n if \"billingItem\" not in subnet:\n raise exceptions.SoftLayerError(\"subnet %s can not be cancelled\"\n \" \" % subnet_id)\n billing_id = subnet['billingItem']['id']\n return self.client['Billing_Item'].cancelService(id=billing_id)",
"def cancel(self):\n self.sa_session.rollback()",
"def do_cancel(self):\n return self.case_cancel()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve corporate actions for a basket across a date range
|
def get_corporate_actions(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today() + dt.timedelta(days=10),
ca_type: List[CorporateActionType] = CorporateActionType.to_list()) -> pd.DataFrame:
where = dict(assetId=self.id, corporateActionType=ca_type)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.CORPORATE_ACTIONS.value)
return pd.DataFrame(response)
|
[
"def apply_corporate_actions(stock, corporate):\n stock[\"Date\"] = pd.to_datetime(stock[\"Date\"])\n corporate[\"Ex Date\"] = pd.to_datetime(\n corporate[\"Ex Date\"], errors='coerce')\n # corporate[\"BC Start Date\"] = pd.to_datetime(corporate[\"BC Start Date\"],errors='coerce')\n # corporate[\" BC End Date\\t\"] = pd.to_datetime(corporate[\" BC End Date\\t\"],errors='coerce')\n # corporate[\"ND Start Date\"] = pd.to_datetime(corporate[\"ND Start Date\"],errors='coerce')\n # corporate[\"ND End Date\"] = pd.to_datetime(corporate[\"ND End Date\"],errors='coerce')\n\n bonus_df = corporate[corporate['Purpose'].str.contains(\"Bonus\")]\n for index, row in bonus_df.iterrows():\n try:\n start_date = bonus_df.loc[index, \"Ex Date\"]\n ratio = bonus_df.loc[index, \"Purpose\"]\n r1, r2 = re.findall(r\"\\d+\", ratio)\n r1, r2 = int(r1), int(r2)\n end_date = stock.tail(1)[\"Date\"].values[0]\n stock = bonus_issue(stock, start_date, end_date, r1, r2)\n except:\n pass\n\n stock_split_df = corporate[corporate['Purpose'].str.contains(\"Stock\")]\n for index, row in stock_split_df.iterrows():\n try:\n start_date = stock_split_df.loc[index, \"Ex Date\"]\n ratio = stock_split_df.loc[index, \"Purpose\"]\n r1, r2 = re.findall(r\"\\d+\", ratio)\n r1, r2 = int(r1), int(r2)\n end_date = stock.tail(1)[\"Date\"].values[0]\n stock = stock_split(stock, start_date, end_date, r1, r2)\n except:\n pass\n stock = create_dividend(stock, corporate)\n\n return stock",
"def _get_all_contracts(self, date_from, date_to, states=['open']):\n return self.search([])._get_contracts(date_from, date_to, states=states)",
"def get_historical_disclosure_list(self, fromdate = datetime.today().date() - timedelta(days = 365), todate=datetime.today().date(),disclosure_type=\"FR\", subject =\"4028328c594bfdca01594c0af9aa0057\"):\n data = {\n \"fromDate\": str(fromdate),\n \"toDate\": str(todate),\n \"year\": \"\", \"prd\": \"\",\n \"term\": \"\", \"ruleType\": \"\",\n \"bdkReview\": \"\",\n \"disclosureClass\": disclosure_type,\n \"index\": \"\", \"market\": \"\",\n \"isLate\": \"\", \"subjectList\": [subject],\n \"mkkMemberOidList\": [self.company_id],\n \"inactiveMkkMemberOidList\": [],\n \"bdkMemberOidList\": [],\n \"mainSector\": \"\", \"sector\": \"\",\n \"subSector\": \"\", \"memberType\": \"IGS\",\n \"fromSrc\": \"N\", \"srcCategory\": \"\",\n \"discIndex\": []}\n response = requests.post(url=\"https://www.kap.org.tr/tr/api/memberDisclosureQuery\", json=data)\n return json.loads(response.text)",
"def withEachCalendarHomeDo(action, batchSize=None): # @NoSelf",
"def _get_contracts(self, date_from, date_to, states=['open'], kanban_state=False):\n state_domain = [('state', 'in', states)]\n if kanban_state:\n state_domain = expression.AND([state_domain, [('kanban_state', 'in', kanban_state)]])\n\n return self.env['hr.contract'].search(\n expression.AND([[('employee_id', 'in', self.ids)],\n state_domain,\n [('date_start', '<=', date_to),\n '|',\n ('date_end', '=', False),\n ('date_end', '>=', date_from)]]))",
"def decide_actions(last_used, now):\n if 'LastUsedDate' in last_used:\n days_since_last_use = (now - last_used['LastUsedDate'].replace(tzinfo=None)).days\n for days_limit, actions in days_and_actions:\n if days_limit <= days_since_last_use:\n return actions\n\n return []",
"def get_items_by_date(month, year):",
"def test_get_by_month(self):\n self.add_expected()\n # given the user_list\n self.user.transactions = self.expected_list\n\n october = datetime.now()\n # make the date match october 2021\n october = october.replace(year=2021, month=10).date()\n user_history = self.user.get_records_by_date(october, True)\n # the records should match october 2021\n expected_transactions = self.create_transaction()\n # filter everything that is Oct-2021\n expected_transactions[self.user.spend_categories[0]] = [self.oct_01, self.oct_10]\n assert user_history == expected_transactions",
"def get_attacks_between_dates(self, start_date, end_date):",
"def test_get_by_day(self):\n\n self.add_expected()\n # given the user_list\n self.user.transactions = self.expected_list\n\n october = datetime.now()\n # make the date match october 2021\n october = october.replace(year=2021, month=10, day=1).date()\n user_history = self.user.get_records_by_date(october, False)\n # the records should match october 2021\n expected_transactions = self.create_transaction()\n # filter everything that is Oct-2021\n expected_transactions[self.user.spend_categories[0]] = [self.oct_01]\n assert user_history == expected_transactions",
"def test_expenses_categories_accounting_get(self):\n pass",
"def dateRangeExpenses():\n\n year1 = int(request.args['year1'])\n month1 = int(request.args['month1'])\n day1 = int(request.args['day1'])\n year2 = int(request.args['year2'])\n month2 = int(request.args['month2'])\n day2 = int(request.args['day2'])\n selectedDate1 = date(year1, month1, day1)\n selectedDate2 = date(year2, month2, day2)\n\n date_range_expenses = getDateRangeTotalExpenses(selectedDate1, selectedDate2, current_user.id)\n\n resp = jsonify(status_code=200,\n savings = date_range_expenses)\n return resp",
"def requestActions(logger, owAPIHost, owb64APIKey):\n logger.debug(LOG_PREFIX + \"requestActions: '%(owAPIHost)s' '%(owb64APIKey)s'\"\n %\n {\n 'owAPIHost': owAPIHost,\n 'owb64APIKey': owb64APIKey\n })\n\n headers = {'Authorization' : 'Basic %s' % owb64APIKey}\n\n r = requests.get(\n '%(owAPIHost)s/api/v1/namespaces/_/actions?limit=100&skip=0' %\n {\n 'owAPIHost': owAPIHost\n }, headers=headers, verify=False)\n\n logger.info(LOG_PREFIX + \"requestActions: Received: %s\" % r.text)\n _raise_if_api_exception(r)\n\n # r.raise_for_status()\n return r",
"def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)",
"def getBookingBrains(self, start_date=None, end_date=None, **kwargs):\n\n # Initialize\n ctool = getToolByName(self, 'portal_catalog')\n btool = getToolByName(self, 'portal_booking')\n center_obj = self.getBookingCenter()\n center_path = '/'.join(center_obj.getPhysicalPath())\n query_args = {}\n start_ts = None\n end_ts = None\n\n try:\n start_date = btool.ts2zdt(int(start_date))\n end_date = btool.ts2zdt(int(end_date))\n except:\n pass\n\n if start_date is not None:\n start_ts = btool.zdt2ts(start_date)\n if end_date is not None:\n end_ts = btool.zdt2ts(end_date)\n\n # Add default query args\n query_args['path'] = center_path\n query_args['portal_type'] = 'Booking'\n query_args['sort_on'] = 'start'\n\n # Add query args specific to start date and end date\n start_end_ranges = []\n if start_date is not None:\n # Start date between booking start and end date\n start_end_ranges.append((start_date, 'max', start_date, 'min'))\n if end_date is not None:\n # End date between booking start and end date\n start_end_ranges.append((end_date, 'max', end_date, 'min'))\n # Booking in start date and end date\n start_end_ranges.append((start_date, 'min', end_date, 'max'))\n\n # Update query_args\n for k,v in kwargs.items():\n if v:\n query_args.update({k:v})\n # Get brains\n brains = []\n brain_rids = []\n for sdate, srange, edate, erange in start_end_ranges:\n query_args_copy = query_args.copy()\n if sdate is not None:\n query_args_copy['start'] = {'query' : sdate, 'range' : srange}\n\n if edate is not None:\n query_args_copy['end'] = {'query' : edate, 'range' : erange}\n\n new_brains = ctool.searchResults(**query_args_copy)\n for brain in new_brains:\n brain_rid = brain.getRID()\n\n if brain_rid in brain_rids:\n continue\n\n brain_rids.append(brain_rid)\n brain_start_dt = DateTime(brain.start)\n brain_start_ts = btool.zdt2ts(brain_start_dt)\n if brain_start_ts == end_ts:\n continue\n brain_end_dt = DateTime(brain.end)\n brain_end_ts = btool.zdt2ts(brain_end_dt)\n if brain_end_ts == start_ts:\n continue\n brains.append(brain)\n return brains",
"def influence_access_timeline(active_users, first_monday, last_monday):\n\n txns = []\n x_data_txns = []\n events_list = []\n feeds_list = []\n for t in TechCashTransaction.objects.filter(timestamp__range=(first_monday, last_monday), location__type=Location.EATERY, user__in=active_users).order_by(\"timestamp\"):\n x_data_txns.append(t.timestamp)\n txns.append(t)\n\n x_data_events = []\n for e in Event.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__in=active_users).order_by(\"timestamp\"):\n # plot all events by color\n x_data_events.append(e.timestamp)\n events_list.append(e.action) \n \n # find number of unique users that have events\n num_mobile_users = Event.objects.filter(timestamp__range=(first_monday, last_monday)).order_by('user').values('user').distinct().count()\n for u in active_users:\n print \"User %d: %d\"%(u, Event.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__id=u).count())\n \n x_data_feeds = []\n for e in FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__in=active_users).order_by(\"timestamp\"):\n x_data_feeds.append(e.timestamp)\n feeds_list.append(e.action)\n\n # find number of unique users that looked at feeds \n num_feed_users = FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).order_by('user').values('user').distinct().count()\n for u in active_users:\n print \"User %d: %d\"%(u, FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__id=u).count())\n \n print \"Num mobile users:\", num_mobile_users\n print \"Num feed users:\", num_feed_users\n\n years = mdates.YearLocator()\n fridays = mdates.WeekdayLocator(byweekday=mdates.FR)\n months = mdates.MonthLocator() # every month\n weekFmt = mdates.DateFormatter('%b-%d')\n days = mdates.DayLocator()\n\n fig1 = plt.figure(figsize=(15,10))\n fig1.subplots_adjust(hspace=0.3)\n ax1 = fig1.add_subplot(211)\n ax1.plot(x_data_events, events_list, \".\")\n ax1.set_title(\"Access event time line\")\n # format the ticks\n labels = ax1.get_xticklabels() \n for label in labels: \n label.set_rotation(45) \n ax1.xaxis.set_major_locator(fridays)\n ax1.xaxis.set_major_formatter(weekFmt)\n ax1.xaxis.set_minor_locator(days)\n ax1.autoscale_view()\n # format the coords message box\n #def price(x): return '$%1.2f'%x\n #ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n #ax.fmt_ydata = price\n ax1.grid(True)\n\n ax2 = fig1.add_subplot(212)\n ax2.plot(x_data_feeds, feeds_list, \".\")\n ax2.set_title(\"Feed access time line\")\n # format the ticks\n labels = ax2.get_xticklabels() \n for label in labels: \n label.set_rotation(45) \n ax2.xaxis.set_major_locator(fridays)\n ax2.xaxis.set_major_formatter(weekFmt)\n ax2.xaxis.set_minor_locator(days)\n ax2.autoscale_view()\n # format the coords message box\n #def price(x): return '$%1.2f'%x\n #ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n #ax.fmt_ydata = price\n ax2.grid(True)\n\n #fig.autofmt_xdate()\n fig1.savefig(PREFIX_IMG+\"influence_timeline.%s\"%img_type, bbox_inches=\"tight\")\n\n fig1.show()",
"def test_date_search_from_items(self):\n self._request_valid(\n f\"collections/{self.tested_product_type}/items?bbox=0,43,1,44\",\n expected_search_kwargs=dict(\n productType=self.tested_product_type,\n page=1,\n items_per_page=DEFAULT_ITEMS_PER_PAGE,\n raise_errors=True,\n geom=box(0, 43, 1, 44, ccw=False),\n ),\n )\n self._request_valid(\n f\"collections/{self.tested_product_type}/items?bbox=0,43,1,44&datetime=2018-01-20/2018-01-25\",\n expected_search_kwargs=dict(\n productType=self.tested_product_type,\n page=1,\n items_per_page=DEFAULT_ITEMS_PER_PAGE,\n raise_errors=True,\n start=\"2018-01-20T00:00:00\",\n end=\"2018-01-25T00:00:00\",\n geom=box(0, 43, 1, 44, ccw=False),\n ),\n )",
"def test_expenses_categories_business_get(self):\n pass",
"def test_date_search_from_catalog_items(self):\n results = self._request_valid(\n f\"catalogs/{self.tested_product_type}/year/2018/month/01/items?bbox=0,43,1,44\",\n expected_search_kwargs=dict(\n productType=self.tested_product_type,\n page=1,\n items_per_page=DEFAULT_ITEMS_PER_PAGE,\n raise_errors=True,\n start=\"2018-01-01T00:00:00\",\n end=\"2018-02-01T00:00:00\",\n geom=box(0, 43, 1, 44, ccw=False),\n ),\n )\n self.assertEqual(len(results.features), 2)\n\n results = self._request_valid(\n f\"catalogs/{self.tested_product_type}/year/2018/month/01/items\"\n \"?bbox=0,43,1,44&datetime=2018-01-20/2018-01-25\",\n expected_search_kwargs=dict(\n productType=self.tested_product_type,\n page=1,\n items_per_page=DEFAULT_ITEMS_PER_PAGE,\n raise_errors=True,\n start=\"2018-01-20T00:00:00\",\n end=\"2018-01-25T00:00:00\",\n geom=box(0, 43, 1, 44, ccw=False),\n ),\n )\n self.assertEqual(len(results.features), 2)\n\n results = self._request_valid(\n f\"catalogs/{self.tested_product_type}/year/2018/month/01/items\"\n \"?bbox=0,43,1,44&datetime=2018-01-20/2019-01-01\",\n expected_search_kwargs=dict(\n productType=self.tested_product_type,\n page=1,\n items_per_page=DEFAULT_ITEMS_PER_PAGE,\n raise_errors=True,\n start=\"2018-01-20T00:00:00\",\n end=\"2018-02-01T00:00:00\",\n geom=box(0, 43, 1, 44, ccw=False),\n ),\n )\n self.assertEqual(len(results.features), 2)\n\n results = self._request_valid(\n f\"catalogs/{self.tested_product_type}/year/2018/month/01/items\"\n \"?bbox=0,43,1,44&datetime=2019-01-01/2019-01-31\",\n )\n self.assertEqual(len(results.features), 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve fundamentals data for a basket across a date range
|
def get_fundamentals(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
period: DataMeasure = DataMeasure.ONE_YEAR.value,
direction: DataMeasure = DataMeasure.FORWARD.value,
metrics: List[DataMeasure] = DataMeasure.list_fundamentals()) -> pd.DataFrame:
where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value)
return pd.DataFrame(response)
|
[
"def request_fundamentals(stock_index):\n items = [\n ['l1', 'Last Price'],\n ['y', 'Dividend Yield'],\n ['r', 'Price/Earnings'],\n ['e', 'Earnings/Share'],\n ['b4', 'Book Value'],\n ['j', '52 week low'],\n ['k', '52 week high'],\n ['j1', 'Market Cap'],\n ['j4', 'EBITDA'],\n ['p5', 'Price/Sales'],\n ['p6', 'Price/Book'],\n ['f6','Float']\n ] \n params = ''.join([ x[0] for x in items ])\n url = 'http://download.finance.yahoo.com/d/quotes.csv?'\n #edgar = 'http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK='\n\n reader = csv.reader(open(data_dir + stock_index +'/'+ stock_index +'_atoms.csv'))\n outrows = [ row for row in reader ]\n symbols = [ row[0] for row in outrows[1:] ]\n\n #outrows[0] += [ item[1] for item in items ] + ['SEC Filings']\n outrows[0] += [ item[1] for item in items ]\n \n print('Getting fundamentals of stocks in {}'.format(stock_index))\n for idx in range(0,len(symbols),20):\n query = url + 's=' + '+'.join(symbols[idx:idx+20]) + '&f=' + params\n fo = urlopen(query)\n tmpcsv = csv.reader(fo)\n rows = [ row for row in tmpcsv ]\n for count, row in enumerate(rows):\n realidx = idx + count + 1\n # change n/a to empty cell\n row = [ x.replace('N/A', '') for x in row ]\n # market cap and ebitda have 'B' or 'M' in them sometimes\n row[7] = correctToBillions(row[7])\n row[8] = correctToBillions(row[8])\n # add the edgar link\n #row.append(edgar + symbols[realidx-1])\n outrows[realidx] = outrows[realidx] + row\n #print('Processed: %s rows' % (idx + 20))\n\n output_dir = data_dir + stock_index + '/' + todays_date_mmddyy() + '/'\n fo = open(output_dir + 'fundm_'+ todays_date_mmddyy() +'.csv', 'w')\n writer = csv.writer(fo, lineterminator='\\n')\n writer.writerows(outrows)\n fo.close()",
"def gather(self):\n quandl.ApiConfig.api_key = self.apikey\n # this would be where I would construct it's own api call, using quandl's get_table method instead\n #base = quandl.ApiConfig.api_base\n #base += '/datatables/' + querypattern + '&api_key=' + apikey\n #data = requests.get(base)\n metadata = quandl.Dataset('WIKI/' + self.ticker)\n date = metadata['newest_available_date']\n data = quandl.get_table('WIKI/PRICES',\n ticker=self.ticker,\n date={'gte':(date - relativedelta(years=5))})\n return(data)",
"def fundings(bank):\n conn = engine.connect()\n loanPie_df = pd.read_sql(\"select ApprovalYear as FundingYear, sum(grossApproval) as TotalApproval from bank_data2 group by 1,2\", conn)\n all_fundings = loanPie_df.to_json(orient='records')\n\n return all_fundings",
"def get_fund_histories(self, begin: str = '-1m', end: str = arrow.now(), size: int = 1024):\n begin = len(begin)>5 and arrow.get(begin).format('YYYY-MM-DD') \\\n or self._str2date(begin).format('YYYY-MM-DD')\n end = arrow.get(end).format('YYYY-MM-DD')\n resp = sess.get(api.fund_history % (self.code, begin, end, size))\n df = pd.DataFrame(\n re.findall(api.x_fund_history, resp.text),\n columns=['date','nav','cnav','percent'])\n df['date'] = pd.to_datetime(df['date'])\n self.fund_history = df.set_index('date').sort_index(axis=0)",
"def getDeparting(start_date, end_date):\n cursor = conn.cursor()\n query = \"\"\"SELECT departure_odate, MIN(total_usd)\n FROM flights\n WHERE date(departure_odate) BETWEEN date('{0}') and date('{1}')\n GROUP BY departure_odate\n ORDER BY departure_odate;\n \"\"\".format(start_date, end_date)\n cursor.execute(query)\n data = cursor.fetchall()\n updateJSON(data, '')",
"def get_historical_disclosure_list(self, fromdate = datetime.today().date() - timedelta(days = 365), todate=datetime.today().date(),disclosure_type=\"FR\", subject =\"4028328c594bfdca01594c0af9aa0057\"):\n data = {\n \"fromDate\": str(fromdate),\n \"toDate\": str(todate),\n \"year\": \"\", \"prd\": \"\",\n \"term\": \"\", \"ruleType\": \"\",\n \"bdkReview\": \"\",\n \"disclosureClass\": disclosure_type,\n \"index\": \"\", \"market\": \"\",\n \"isLate\": \"\", \"subjectList\": [subject],\n \"mkkMemberOidList\": [self.company_id],\n \"inactiveMkkMemberOidList\": [],\n \"bdkMemberOidList\": [],\n \"mainSector\": \"\", \"sector\": \"\",\n \"subSector\": \"\", \"memberType\": \"IGS\",\n \"fromSrc\": \"N\", \"srcCategory\": \"\",\n \"discIndex\": []}\n response = requests.post(url=\"https://www.kap.org.tr/tr/api/memberDisclosureQuery\", json=data)\n return json.loads(response.text)",
"def getFinancialConditions(self, startDate, endDate):\n from staffing.models import FinancialCondition\n fc = FinancialCondition.objects.filter(consultant=self,\n consultant__timesheet__charge__gt=0, # exclude null charge\n consultant__timesheet__working_date__gte=startDate,\n consultant__timesheet__working_date__lt=endDate,\n consultant__timesheet=F(\"mission__timesheet\")) # Join to avoid duplicate entries\n fc = fc.values(\"daily_rate\").annotate(Sum(\"consultant__timesheet__charge\")) # nb days at this rate group by timesheet\n fc = fc.values_list(\"daily_rate\", \"consultant__timesheet__charge__sum\")\n fc = fc.order_by(\"daily_rate\")\n return fc",
"def getFoodItems(periodID):\n headers = {\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Host': 'api.dineoncampus.com',\n 'Origin': 'https://dineoncampus.com',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'\n }\n today = date.today()\n url = \"https://api.dineoncampus.com/v1/location/5880da183191a21e8af61adc/periods/\" + periodID + \"?platform=0&date=\" + str(today)\n\n response = requests.get(url, headers=headers) # send request wuth headers\n #print(response.text)\n json_data = json.loads(response.text) # decode json\n #print(json_data)\n if json_data[\"status\"] != \"success\":\n raise Exception(\"Status not successful\") # raise exception if status returned is not successful\n\n bfList = []\n for category in json_data['menu']['periods']['categories']:\n for foodName in category['items']: # assign attributes to each food item on the menu\n nutrients = [dict[\"name\"] + \":\" + dict[\"value\"] + \"(\" + dict[\"uom\"] + \")\" for dict in foodName['nutrients']]\n filters = [dict[\"type\"] + \":\" + dict[\"name\"] for dict in foodName['filters']]\n bfList.append(Food(foodName['id'], foodName['name'], nutrients=nutrients, ingredients=foodName['ingredients'], filters= filters, calories=foodName['calories']))\n\n print(len(bfList))\n #for item in bfList:\n #print(str(item))\n #pass\n #print(bfList[0].nutrients)\n #print(bfList[0].ingredients)\n #print(bfList[0].filters)\n return bfList",
"def get_all_supply(self, lower_date, upper_date, category, name):\n return (self.get_supply('available', lower_date, upper_date, category, name)+\n self.get_supply('sold', lower_date, upper_date, category, name) +\n self.get_supply('wasted', lower_date, upper_date, category, name))",
"def get_google_trends_palestine_between_dates(self, start_date, end_date):",
"def test_get_table_stock_data(self):\n fields = [\"ticker\", \"date\", \"adj_close\"]\n tickers = [\"AAPL\", \"MSFT\"]\n start_date = \"2015-01-01\"\n end_date = \"2016-01-01\"\n quandl.ApiConfig.api_key = BaseConfig.QUANDL_KEY\n data = quandl.get_table(\n \"WIKI/PRICES\",\n ticker=tickers,\n qopts={\"columns\": fields},\n date={\"gte\": start_date, \"lte\": end_date},\n paginate=True,\n )\n assert type(data) == pd.DataFrame\n assert sorted(data.columns.tolist()) == sorted(fields)\n assert sorted(data[\"ticker\"].unique().tolist()) == sorted(tickers)",
"def freq_view_data(self, service=False, aggregate=True, councils=[], startdate=\"\", enddate=\"\"):\n engine = db.create_engine(self.dbString)\n\n if service:\n df = pd.read_sql_query(\"SELECT requesttype, createddate, closeddate, servicedate, nc, ncname FROM %s\" % self.table, con=engine)\n df['servicedate'] = pd.to_datetime(df['servicedate'])\n\n else:\n df = pd.read_sql_query(\"SELECT requesttype, createddate, closeddate, nc, ncname FROM %s\" % self.table, con=engine)\n\n df['closeddate'] = pd.to_datetime(df['closeddate'])\n\n if councils != []:\n df = df[df.nc.isin(councils)]\n \n if startdate != \"\":\n start = pd.to_datetime(startdate)\n df = df[(df['createddate'] >= start)]\n\n if enddate != \"\":\n end = pd.to_datetime(enddate)\n df = df[df['createddate'] <= end]\n\n df = df.sort_values(by=['requesttype', 'nc', 'createddate', 'closeddate'])\n df_json = json.loads(df.to_json(orient=\"records\"))\n\n if aggregate:\n summary = self.freq_aggregate(df)\n json_data = []\n json_data.append(json.loads(summary))\n json_data.append(df_json)\n return json_data\n\n return df_json",
"def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)",
"def get_profits(self, start_date, end_date, ticker, investment=1000000):\n print('Calculating..')\n step = datetime.timedelta(1)\n start_date = start_date + step\n '''Init'''\n return_movements = []\n income_movements = []\n while start_date != end_date:\n last_day = start_date - step\n '''Do Calculation'''\n if not ticker: # portfolio mod\n income, profit_rate = self.profit_unit(last_day, start_date, None, investment=investment,\n ) # profit between a day\n else:\n income, profit_rate = self.profit_unit(last_day, start_date, ticker, investment=investment,\n ) # profit between a day\n '''Process'''\n investment = income # Income becomes investment for next turn\n income = round(income, 5)\n profit_rate = round(profit_rate, 5)\n '''Records changes of profit and gain'''\n return_movements.append([profit_rate, start_date])\n income_movements.append([income, start_date])\n '''Continue'''\n start_date += step\n print('Calculation Done..')\n return return_movements, income_movements",
"def compute_income(userid, session):\n t = session.query(Transaction).order_by(Transaction.t_date.desc()).limit(1).first()\n if t is None:\n return\n endate = (t.t_date)\n ##get date 4 months ago\n startdate = mkFirstOfMonth(endate) - relativedelta(months=MONTHS_MEASURED)\n\n # get all the user's item ids\n item_ids = []\n itemrecs = session.query(Item).filter(Item.user_id.like(userid)).all()\n if itemrecs is None:\n return\n for ir in itemrecs:\n item_ids.append(ir.item_id)\n if len(item_ids) < 1:\n return\n\n allincomes = []\n daybreaks = mkDayBreaks(startdate) # [1st, 9th, 17th, 25th, 1st]\n while daybreaks[4] <= endate: # handle a month at a time\n periodincomes = []\n for i in range(4): # handle a pillarperiod at a time\n queries = []\n queries.append( Transaction.t_date >= daybreaks[i] )\n queries.append( Transaction.t_date < daybreaks[i+1] )\n queries.append( Transaction.amount < 0 )\n queries.append( Transaction.item_id.in_(item_ids) )\n # count anything in Plaid category Transfer > Deposit as income\n queries.append( Transaction.category_uid.like(\"21007000\") )\n # q = session.query(Transaction.category_uid, func.sum(Transaction.amount))\n q = session.query( func.sum(Transaction.amount) )\n q = q.filter(*queries)#.group_by(Transaction.category_uid).all()\n sumrec = q.first()\n # from sqlalchemy.dialects import postgresql\n # statement = q.statement\n # print(\"SQL: \")\n # print(statement.compile(dialect=postgresql.dialect()))\n inc = 0\n if sumrec[0] is not None:\n inc = -1 * int(sumrec[0]) # make the amount positive\n print('Income from %s to %s: %d' % (formatDate(daybreaks[i]), formatDate(daybreaks[i+1]), inc) )\n # put period's income into table\n periodincomes.append( inc )\n # save period's income to DB\n a = ActualMonthIncome( user_id=userid, start_date=daybreaks[i], amount=inc, period=i+1 )\n session.merge( a )\n\n allincomes.append( periodincomes )\n daybreaks = mkDayBreaks( daybreaks[4] )\n session.commit()\n\n # Compute average monthly income by period using allincomes list\n # (which is a 4 periods x 4 MONTHS_MEASURED list) and save to DB\n # DROP HIGHEST AND LOWEST MONTH AND AVERAGE THE OTHERS\n total_avg_inc = 0\n for i in range(MONTHS_MEASURED):\n periodincs = []\n for inc in allincomes:\n periodincs.append(inc[i])\n periodincs.sort()\n periodincs = periodincs[ 1:(len(periodincs)-1) ]\n\n periodinc = sum( periodincs )\n avg_inc = int( periodinc / (MONTHS_MEASURED-2) )\n a = AverageMonthIncome( user_id=userid, amount=avg_inc, period=i+1 )\n session.merge( a )\n total_avg_inc += avg_inc\n\n # Save average monthly income to user profile\n user = session.query(User).get(userid)\n user.income = int(total_avg_inc)\n user.income_update = datetime.datetime.today()\n session.commit()\n\n return True",
"def usage(self, date_range='7D', as_df=True):\r\n end_date = None\r\n if end_date is None:\r\n end_date = datetime.now()\r\n params = {\r\n 'f' : 'json',\r\n 'startTime': None,\r\n 'endTime': int(end_date.timestamp() * 1000),\r\n \"period\": '',\r\n 'vars': 'num',\r\n 'groupby': 'name',\r\n 'etype': 'svcusg',\r\n 'name': self.itemid,\r\n\r\n }\r\n from datetime import timedelta\r\n if self.type == 'Feature Service':\r\n params['stype'] = 'features'\r\n params['name'] = os.path.basename(os.path.dirname(self.layers[0].container._url))\r\n if date_range.lower() in ['24h', '1d']:\r\n params['period'] = '1h'\r\n params['startTime'] = int((end_date - timedelta(days=1)).timestamp() * 1000)\r\n elif date_range.lower() == '7d':\r\n params['period'] = '1d'\r\n params['startTime'] = int((end_date - timedelta(days=7)).timestamp() * 1000)\r\n elif date_range.lower() == '14d':\r\n params['period'] = '1d'\r\n params['startTime'] = int((end_date - timedelta(days=14)).timestamp() * 1000)\r\n elif date_range.lower() == '30d':\r\n params['period'] = '1d'\r\n params['startTime'] = int((end_date - timedelta(days=30)).timestamp() * 1000)\r\n elif date_range.lower() == '60d':\r\n params['period'] = '1d'\r\n params['startTime'] = int((end_date - timedelta(days=60)).timestamp() * 1000)\r\n elif date_range.lower() == '6m':\r\n sd = end_date - timedelta(days=int(365/2))\r\n ranges = {\r\n \"1\" : [sd, sd + timedelta(days=60)],\r\n \"2\" : [sd + timedelta(days=61), sd + timedelta(days=120)],\r\n \"3\" : [sd + timedelta(days=121), sd + timedelta(days=180)],\r\n \"4\" : [sd + timedelta(days=181), end_date + timedelta(days=1)]\r\n }\r\n params['period'] = '1d'\r\n url = \"%s/portals/%s/usage\" % (self._portal.resturl, self._gis.properties.id)\r\n results = []\r\n for k,v in ranges.items():\r\n sd = int(v[0].timestamp() * 1000)\r\n ed = int(v[1].timestamp() * 1000)\r\n params['startTime'] = sd\r\n params['endTime'] = ed\r\n res = self._portal.con.post(url, params)\r\n if as_df:\r\n import pandas as pd\r\n\r\n res = pd.DataFrame(res['data'][0]['num'],\r\n columns=['Date', 'Usage'])\r\n res.Date = res.astype(float) / 1000\r\n res.Date = res.Date.apply(lambda x : datetime.fromtimestamp(x))\r\n res.Usage = res.Usage.astype(int)\r\n results.append(res)\r\n del k,v\r\n if as_df:\r\n return (pd.concat(results)\r\n .reset_index(drop=True)\r\n .drop_duplicates(keep='first',\r\n inplace=False))\r\n else:\r\n return results\r\n elif date_range.lower() in ['12m', '1y']:\r\n sd = end_date - timedelta(days=int(365))\r\n ranges = {\r\n \"1\" : [sd, sd + timedelta(days=60)],\r\n \"2\" : [sd + timedelta(days=61), sd + timedelta(days=120)],\r\n \"3\" : [sd + timedelta(days=121), sd + timedelta(days=180)],\r\n \"4\" : [sd + timedelta(days=181), sd + timedelta(days=240)],\r\n \"5\" : [sd + timedelta(days=241), sd + timedelta(days=320)],\r\n \"6\" : [sd + timedelta(days=321), sd + timedelta(days=366)]\r\n }\r\n params['period'] = '1d'\r\n url = \"%s/portals/%s/usage\" % (self._portal.resturl, self._gis.properties.id)\r\n results = []\r\n for k,v in ranges.items():\r\n sd = int(v[0].timestamp() * 1000)\r\n ed = int(v[1].timestamp() * 1000)\r\n params['startTime'] = sd\r\n params['endTime'] = ed\r\n res = self._portal.con.post(url, params)\r\n if as_df:\r\n import pandas as pd\r\n\r\n res = pd.DataFrame(res['data'][0]['num'],\r\n columns=['Date', 'Usage'])\r\n res.Date = res.astype(float) / 1000\r\n res.Date = res.Date.apply(lambda x : datetime.fromtimestamp(x))\r\n res.Usage = res.Usage.astype(int)\r\n\r\n results.append(res)\r\n del k,v\r\n\r\n if as_df:\r\n return (pd.concat(results)\r\n .reset_index(drop=True)\r\n .drop_duplicates(keep='first',\r\n inplace=False))\r\n else:\r\n return results\r\n else:\r\n raise ValueError(\"Invalid date range.\")\r\n\r\n url = \"%sportals/%s/usage\" % (self._portal.resturl, self._gis.properties.id)\r\n try:\r\n res = self._portal.con.post(url, params)\r\n if as_df:\r\n import pandas as pd\r\n df = pd.DataFrame(res['data'][0]['num'],\r\n columns=['Date', 'Usage'])\r\n df.Date = df.astype(float) / 1000\r\n df.Date = df.Date.apply(lambda x : datetime.fromtimestamp(x))\r\n df.Usage = df.Usage.astype(int)\r\n return df\r\n return res\r\n except:\r\n return None",
"def get_daily_expense(self, start_time: date, end_time: date):\n with sessionmaker(self.engine)() as session:\n cursor = session.execute(('SELECT '\n 'document_date as by_time, sum(amount_total) as total '\n 'FROM invoice_info '\n 'WHERE '\n '(amount_due IS null OR amount_due = 0) '\n 'and document_date >= :start_time '\n 'and document_date <= :end_time '\n 'GROUP BY document_date'),\n {'start_time': start_time, 'end_time': end_time})\n return [ExpenseByTime(by_time=it[0], total=it[1]) for it in cursor]",
"def get_items_by_date(month, year):",
"def compute_baskets(basket_specs, aggregates):\n index = pd.MultiIndex.from_product([basket_specs, aggregates.index.unique(level=1)])\n baskets = pd.Series(index=index)\n for name, currencies in basket_specs.items():\n components = aggregates.loc[pd.IndexSlice[currencies, :]]\n # scale down to avoid numerical instability\n price = components.xs(\"price\", level=1) @ CIRCULATING_SUPPLY[currencies] / 1e9\n volume = components.xs(\"volume\", level=1).sum()\n baskets.loc[pd.IndexSlice[name, :]] = (price, volume)\n return baskets"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve basket's live date Usage Retrieve basket's live date Examples >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_live_date()
|
def get_live_date(self) -> Optional[dt.date]:
return self.__live_date
|
[
"def time_to_live(self) -> Optional[str]:\n return pulumi.get(self, \"time_to_live\")",
"def getnow(self):\n print()\n print(\"Current date:\")\n print(datetime.date.today())",
"def test_get_product_live_time_details_success():\n expected_data = {\n 'product_id': 12,\n 'time_of_day_product': '20:30:00',\n 'time_zone': 'GMT',\n 'store_id': 1\n }\n db.insert_product_live_time_data()\n response = product_live_time.get_product_live_time_details('12')\n assert response.message == expected_data",
"def get_today_date():\n \n today = datetime.utcnow()\n print(\"Today's date: %s \" % today)\n return today",
"def getDate(self):\r\n print(\"Date TVA\")\r\n return self.date_tva",
"def get_latest_release():\n return json.load(urllib.request.urlopen(_LATEST_URL))",
"def latest_date():\n \n # start by trying today's date\n try_date = datetime.date(datetime.now())\n \n # the function will iterate until it finds a date with information \n date = find_usable_date(try_date)\n \n return jsonify(str(date))",
"def get_sell_date(self) -> datetime:\n return self.sell_date",
"def get_livefeed(game_id):\n randomnum = random.randint(1000, 9999)\n logging.info(\"Live Feed requested (random cache - %s)!\", randomnum)\n api_endpoint = f\"game/{game_id}/feed/live?{randomnum}\"\n response = api.nhl_api(api_endpoint).json()\n return response",
"def get_recent_item(date):\n logger.debug(\"Requested the recent item added on %s\", date)\n return spark_query_engine.get_recent_item_api(date)",
"def get_today_weather(self):\n return self.data['weather1']",
"def date_debut(self):\n return self.__date_debut",
"def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']",
"def getPublishDateOfLastReleaseData(self):\n sql = \"SELECT date FROM public.deter_publish_date\"\n \n return self.__execSQL(sql)",
"def detect_latest_dates(source, user, passwd):\n\n soup = retrieve_url(source, user, passwd)\n dates = [d.text[:-1] for d in soup.find_all(\"a\", href=re.compile(\"..-..-..\"))]\n print \"Latest date: {}\".format(dates[0])\n return dates",
"def today(_, message: CallbackQuery):\n _2day = requests.get(URL).json()\n message.message.edit_text(Msg.json_to_msg(_2day))",
"def get_yesterdays_date():\n\n return date.today() - timedelta(days=1)",
"def live_ended_at(self):\n if self.live_info and self.live_info.get(\"stopped_at\"):\n return int(self.live_info.get(\"stopped_at\"))\n\n if self.live_state in (RUNNING, STOPPING):\n return int(to_timestamp(timezone.now()))\n\n return None",
"def latest_info():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve url to basket's product page in Marquee Usage Retrieve url to basket's product page in Marquee Examples >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_url()
|
def get_url(self) -> str:
env = '-dev' if 'dev' in get(GsSession, 'current.domain', '') else ''
env = '-qa' if 'qa' in get(GsSession, 'current.domain', '') else env
return f'https://marquee{env}.gs.com/s/products/{self.id}/summary'
|
[
"def get_url_page(self, product):\n return product.get('url')",
"def getProductUrl(productId):\r\n return baseUrl + productId",
"def item_url(self):\n return self.get_url(item=True)",
"def getQueueURL():\n q = SQS.get_queue_url(QueueName='RestaurantRequest').get(QUEUE_URL)\n logger.debug(\"Queue URL is %s\", QUEUE_URL)\n return q",
"def product_url(self, product):\n url = 'product/%s' % product\n return posixpath.join(self.url, url)",
"def _getURL(self):\n return \"http://%s.%s\" % (self.key, self.baseurl)",
"def get_url(self, index):\n\n\t\treturn 'https://www.cardmarket.com/en/YuGiOh/Products/Singles/' + self.db.loc[index, 'set_url_name'] + '/' + self.db.loc[index, 'url_name']",
"def test_get_url(self):\n package = make_package()\n self.request.app_url.side_effect = lambda *x: '/'.join(x)\n url = self.storage.get_url(package)\n expected = 'api/package/%s/%s/download/%s' % (package.name,\n package.version,\n package.filename)\n self.assertEqual(url, expected)",
"def queue_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"queue_url\")",
"def get_page(self):\n\n product = self.product_name.replace(\" \", \"+\")\n\n URL = f\"https://www.amazon.pl/s?k={product}\"\n USER_AGENT = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\"\n LANGUAGE = \"en-US,en;q=0.5\"\n\n session = requests.Session()\n session.headers[\"User-Agent\"] = USER_AGENT\n session.headers[\"Accept-Language\"] = LANGUAGE\n session.headers[\"Content-Language\"] = LANGUAGE\n\n return session.get(URL).text",
"def market_url(self):\n return self._market_url",
"def get_url(self, job: Job):\n urls = self.get_urls(job)\n return urls.get(\"ws\") if urls else None",
"def test_get_url(self):\n package = make_package()\n url = self.storage.get_url(package)\n self.assertEqual(package.url, url)\n self.assertIsNotNone(package.expire)\n\n parts = urlparse(url)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + package.path)\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n actual_expire = (time.mktime(package.expire.timetuple()) +\n self.storage.buffer_time)\n self.assertEqual(int(query['Expires'][0]), actual_expire)\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['aws.access_key'])",
"def getQuickViewUrl(productId):\r\n return 'http://www.drivethrurpg.com/demo_xml/' + productId + '.xml'",
"def get_full_link(self, genre, chart):\n return 'http://www.billboard.com' + self.get_link_of_chart(genre, chart)",
"def _get_url(self):\n return self.config[\"rest_url\"] if len(self.config[\"rest_url\"]) > 0 \\\n else get_hostname(self.connect.advertise_addr)",
"def getProductHref(self):\n productClass = self.productClass()\n if productClass:\n return productClass.getPrimaryHref()\n return ''",
"def geturl(self):\n return 'https://requester.mturk.com/batches/%s/' % self.id",
"def get_external_url():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create and schedule a new factor risk report for your basket
|
def add_factor_risk_report(self, risk_model_id: str, fx_hedged: bool):
payload = CustomBasketRiskParams(risk_model=risk_model_id, fx_hedged=fx_hedged)
return GsIndexApi.update_risk_reports(payload)
|
[
"def scrum_report(ctx): # pylint: disable=unused-argument\n generate_scrum_report()",
"def generate_report():",
"def ticket(self,args,groupby='nite'):\n try:\n args.dataframe\n except:\n print(\"Must specify input data!\")\n sys.exit(1)\n \n if args.ignore_jira:\n args.dataframe['user'] = args.user \n else:\n args.dataframe['user'] = args.jira_user\n group = args.dataframe.groupby(by=[groupby])\n for name,vals in group:\n # create JIRA ticket per nite and add jira_id,reqnum to dataframe\n index = args.dataframe[args.dataframe[groupby] == name].index\n \n if args.jira_summary:\n jira_summary = args.jira_summary \n else:\n jira_summary = str(name)\n if args.reqnum:\n reqnum = args.reqnum\n else:\n reqnum = None\n if args.jira_parent:\n jira_parent = args.jira_parent\n else:\n jira_parent = None\n if args.ignore_jira:\n new_reqnum,new_jira_parent = (reqnum,jira_parent)\n else:\n # Create JIRA ticket\n new_reqnum,new_jira_parent = jira_utils.create_ticket(args.jira_section,args.jira_user,\n description=args.jira_description,\n summary=jira_summary,\n ticket=reqnum,parent=jira_parent,\n use_existing=True)\n \n # Update dataframe with reqnum, jira_id\n # If row exists replace value, if not insert new column/value\n try:\n args.dataframe.loc[index,('reqnum')] = new_reqnum\n except:\n args.dataframe.insert(len(args.dataframe.columns),'reqnum',None)\n args.dataframe.loc[index,'reqnum'] = new_reqnum\n try:\n args.dataframe.loc[index,('jira_parent')] = new_jira_parent\n except:\n args.dataframe.insert(len(args.dataframe.columns),'jira_parent',None)\n args.dataframe.loc[index,'jira_parent'] = new_jira_parent\n\n return args.dataframe",
"def test_create_report_schedule(self):\n pass",
"def create_report(self):\n\n Supervisor.show_complaint(self)\n result = Supervisor.input_create_report_data(self)\n Queries.create(self, 'Report',\n (int(result[0]), Supervisor.team_id, result[1], result[2], int(result[3]), int(result[4]))\n )\n Supervisor.supervisor_tasks(self)",
"def submit_jira(self, que):\n # Get the reasons need to submit of today.\n _only_crash_id_l = self.match_reason() #[{tableid:rowid}, ROWID, 1, CRASH_ID, PROJECT, REASON),()]\n if _only_crash_id_l.__len__() > 0:\n conn, cursor = sqlite_base.sqlite_connect()\n for index, _crash_id in enumerate(_only_crash_id_l):\n que.put('<h4>\\t%d/%d Submitting...</h4>' % (index + 1, _only_crash_id_l.__len__()))\n # Get the log after parse from report table.\n _log_finally = sqlite_base.search(conn, cursor,\n end=False,\n columns='PROJECT, VERSION, CALL, LOG',\n table_name='report',\n condition=\"where CRASH_ID = '%s'\" % _crash_id[-3])\n if _log_finally:\n _rowid = int()\n # Transfer log data type to List.\n _log_l = _log_finally[0][-1].split('\\n')\n\n # Get the first 7 lines of environment data to submit. Formart.\n _env = '\\n'.join(_log_l[1:7])\n\n # Get version code.\n _ver = _log_finally[0][1]\n # Stitching the summary wait to submit.\n _summary = 'Crash Analysis: ' + _log_finally[0][-2] + '[Frequency:%s]' % _crash_id[2]\n\n # submit to JIRA server.\n _jira_id = self.jirahandler.create(pjname=_log_finally[0][0],\n summary=_summary,\n environment=_env,\n description=_log_finally[0][-1].replace('<pre>', '').replace(\n '</pre>', ''),\n version=_ver,\n priority='urgen')\n\n if isinstance(_jira_id, jira.resources.Issue):\n # If ==, means submit success.\n if regular_common.jira_id(_jira_id.key).group(0) == _jira_id.key:\n # Insert data to reasons table.\n _rowid = sqlite_base.insert(conn, cursor,\n end=False,\n table_name='reasons',\n reason=_crash_id[-1],\n frequency=_crash_id[2],\n project=_log_finally[0][0],\n jiraid=_jira_id.key)\n else:\n LOG.cri(' %-20s ]-[ Submit to JIRA error: %s .' % (LOG.get_function_name(), _jira_id.key))\n else:\n LOG.cri(' %-20s ]-[ Submit to JIRA error: %s .' % (LOG.get_function_name(), _jira_id))\n # Treverse table id list.\n for _tb_id in _crash_id[0].keys():\n conditions = 'WHERE '\n # Get row id list with given table id.\n _row_l = _crash_id[0][_tb_id]\n # Stitching the update condition command.\n for key, _row_id in enumerate(_row_l):\n if key >= 1:\n conditions += ' or '\n conditions += 'ROWID = %d' % _row_id\n # Update reason id to backtrack tables.\n sqlite_base.update(conn, cursor,\n end=False,\n table_name='backtrack_%s' % str(_tb_id),\n columns=['REASON_ID'],\n values=[_rowid],\n condition=conditions)\n\n else:\n LOG.error(' %-20s ]-[ Table report cat not find CRASH_ID %s .' %\n (LOG.get_function_name(), _crash_id[-2]))\n cursor.close()\n conn.close()\n else:\n LOG.info(' %-20s ]-[ Look like all crash has been logged: %s' %\n (LOG.get_function_name(), ReportGenerator.get_yesterday_timestamp()))",
"def scheduled_stocktake_reports():\n\n # Sleep a random number of seconds to prevent worker conflict\n time.sleep(random.randint(1, 5))\n\n # First let's delete any old stocktake reports\n delete_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_DELETE_REPORT_DAYS', 30, cache=False))\n threshold = datetime.now() - timedelta(days=delete_n_days)\n old_reports = part.models.PartStocktakeReport.objects.filter(date__lt=threshold)\n\n if old_reports.count() > 0:\n logger.info(f\"Deleting {old_reports.count()} stale stocktake reports\")\n old_reports.delete()\n\n # Next, check if stocktake functionality is enabled\n if not common.models.InvenTreeSetting.get_setting('STOCKTAKE_ENABLE', False, cache=False):\n logger.info(\"Stocktake functionality is not enabled - exiting\")\n return\n\n report_n_days = int(common.models.InvenTreeSetting.get_setting('STOCKTAKE_AUTO_DAYS', 0, cache=False))\n\n if report_n_days < 1:\n logger.info(\"Stocktake auto reports are disabled, exiting\")\n return\n\n if not check_daily_holdoff('STOCKTAKE_RECENT_REPORT', report_n_days):\n logger.info(\"Stocktake report was recently generated - exiting\")\n return\n\n # Let's start a new stocktake report for all parts\n part.stocktake.generate_stocktake_report(update_parts=True)\n\n # Record the date of this report\n record_task_success('STOCKTAKE_RECENT_REPORT')",
"def generate_scenario(risk_factor_id, shock_value):\n #print parameters\n print (\"Generate Scenario\")\n print (\"Risk factor: \" + risk_factor_id)\n print (\"Shock Value: \" + str(shock_value))\n\n #call the url\n baseurl = 'https://fss-analytics.mybluemix.net/api/v1/scenario/generate_predictive'\n headers = {\n 'X-IBM-Access-Token': access_token,\n 'Content-Type': \"application/json\"\n }\n data = {\n 'market_change': {\n 'risk_factor': risk_factor_id,\n 'shock': shock_value\n }\n }\n get_data = requests.post(baseurl, headers=headers, data=json.dumps(data))\n status = get_data.status_code\n print(\"Predictive Market Scenario status: \" + str(status))\n\n #if the status is not success, return with status\n if status != 200:\n return status\n\n #create csv file\n data = get_data.text\n f = open(\"output_PMS.csv\", \"w\")\n f.write(data)\n f.close()\n\n #print and return the status\n print (os.path.exists(\"output_PMS.csv\"))\n print(\"Created output_PMS.csv\")\n return status",
"def make_report(self):\n self._report.append(\n date = self._current_date,\n account = self._caccount / 100,\n property_value = self._property_value / 100\n )",
"def chw_calendar_submit_report(request, username, template_name=\"pactcarehq/chw_calendar_submit_report.html\"):\n context = RequestContext(request)\n all_patients = request.GET.get(\"all_patients\", False)\n context['username'] = username\n user = User.objects.get(username=username)\n total_interval = 7\n if request.GET.has_key('interval'):\n try:\n total_interval = int(request.GET['interval'])\n except:\n pass\n\n ret, patients, total_scheduled, total_visited= _get_schedule_tally(username, total_interval)\n nowdate = datetime.now()\n\n context['date_arr'] = ret\n context['total_scheduled'] = total_scheduled\n context['total_visited'] = total_visited\n #context['total_visited'] = total_visited\n context['start_date'] = ret[0][0]\n context['end_date'] = ret[-1][0]\n\n if request.GET.get('getcsv', None) != None:\n csvdata = []\n csvdata.append(','.join(['visit_date','assigned_chw','pact_id','is_scheduled','contact_type', 'visit_type','visit_kept', 'submitted_by','visit_id']))\n for date, pt_visit in ret:\n if len(pt_visit) > 0:\n for cpt, v in pt_visit:\n rowdata = [date.strftime('%Y-%m-%d'), username, cpt.pact_id]\n if v != None:\n\n #is scheduled\n if v.form['scheduled'] == 'yes':\n rowdata.append('scheduled')\n else:\n rowdata.append('unscheduled')\n #contact_type\n rowdata.append(v.form['contact_type'])\n\n #visit type\n rowdata.append(v.form['visit_type'])\n\n #visit kept\n rowdata.append(v.form['visit_kept'])\n\n rowdata.append(v.form['Meta']['username'])\n if v.form['Meta']['username'] == username:\n rowdata.append('assigned')\n else:\n rowdata.append('covered')\n rowdata.append(v.get_id)\n else:\n rowdata.append('novisit')\n csvdata.append(','.join(rowdata))\n else:\n csvdata.append(','.join([date.strftime('%Y-%m-%d'),'nopatients']))\n\n resp = HttpResponse()\n\n resp['Content-Disposition'] = 'attachment; filename=chw_schedule_%s-%s_to_%s.csv' % (username, datetime.now().strftime(\"%Y-%m-%d\"), (nowdate - timedelta(days=total_interval)).strftime(\"%Y-%m-%d\"))\n resp.write('\\n'.join(csvdata))\n return resp\n\n else:\n return render_to_response(template_name, context_instance=context)",
"async def raffle_create(self, ctx, cost: int = 10, totalTickets: int = 0):\n id = await self.get_next_schedule_num()\n title = \"New Raffle!\"\n description = \"\"\n embed = self.createRaffleEmbed(title, description, id, totalTickets, cost)\n msg = await ctx.send(embed=embed)\n\n if totalTickets == 0:\n self.bot.mdb['raffle'].insert_one({\"id\": int(id), \"msgId\": f\"{msg.id}\", \"channelId\": f\"{ctx.message.channel.id}\", \"serverId\": f\"{ctx.guild.id}\", \"title\": title, \"description\": description, \"cost\": cost, \"boughtTickets\": 0, \"ended\": 0})\n else:\n self.bot.mdb['raffle'].insert_one({\"id\": int(id), \"msgId\": f\"{msg.id}\", \"channelId\": f\"{ctx.message.channel.id}\", \"serverId\": f\"{ctx.guild.id}\", \"title\": title, \"description\": description, \"cost\": cost, \"totalTickets\": totalTickets, \"boughtTickets\": 0, \"ended\": 0})",
"def _get_schedule_tally(username, total_interval, override_date=None):\n if override_date == None:\n nowdate = datetime.now()\n chw_schedule = schedule.get_schedule(username)\n else:\n nowdate = override_date\n chw_schedule = schedule.get_schedule(username, override_date = nowdate)\n #got the chw schedule\n #now let's walk through the date range, and get the scheduled CHWs per this date.visit_dates = []\n ret = [] #where it's going to be an array of tuples:\n #(date, scheduled[], submissions[] - that line up with the scheduled)\n\n total_scheduled=0\n total_visited=0\n\n for n in range(0, total_interval):\n td = timedelta(days=n)\n visit_date = nowdate-td\n scheduled_pactids = chw_schedule.get_scheduled(visit_date)\n patients = []\n visited = []\n for pact_id in scheduled_pactids:\n if pact_id == None:\n continue\n try:\n total_scheduled += 1\n cpatient = getpatient(pact_id) #TODO: this is a total waste of queries, doubly getting the cpatient, then getting the django object again\n# patients.append(Patient.objects.get(id=cpatient.django_uuid))\n patients.append(cpatient)\n except:\n #print \"skipping patient %s: %s, %s\" % (cpatient.pact_id, cpatient.last_name, cpatient.first_name)\n continue\n\n #inefficient, but we need to get the patients in alpha order\n patients = sorted(patients, key=lambda x: x.last_name)\n for patient in patients:\n pact_id = patient.pact_id\n searchkey = [str(username), str(pact_id), visit_date.year, visit_date.month, visit_date.day]\n #print searchkey\n submissions = XFormInstance.view('pactcarehq/submits_by_chw_per_patient_date', key=searchkey, include_docs=True).all()\n #print len(submissions)\n if len(submissions) > 0:\n visited.append(submissions[0])\n total_visited+= 1\n else:\n #ok, so no submission from this chw, let's see if there's ANY from anyone on this day.\n other_submissions = XFormInstance.view('pactcarehq/all_submits_by_patient_date', key=[str(pact_id), visit_date.year, visit_date.month, visit_date.day, 'http://dev.commcarehq.org/pact/dots_form' ], include_docs=True).all()\n if len(other_submissions) > 0:\n visited.append(other_submissions[0])\n total_visited+= 1\n else:\n visited.append(None)\n\n #print (visit_date, patients, visited)\n ret.append((visit_date, zip(patients, visited)))\n return ret, patients, total_scheduled, total_visited",
"def create_budget(request):\n from stalker_pyramid.views import get_logged_in_user, milliseconds_since_epoch\n logged_in_user = get_logged_in_user(request)\n utc_now = datetime.datetime.now(pytz.utc)\n\n project_id = request.params.get('project_id', None)\n project = Project.query.filter(Project.id == project_id).first()\n\n if not project:\n return Response('There is no project with id: %s' % project_id, 500)\n\n name = request.params.get('name', None)\n type_id = request.params.get('type_id', None)\n type_ = Type.query.filter(Type.id == type_id).first()\n description = request.params.get('description', \"\")\n\n logger.debug(\"type_id : %s\" % type_id)\n logger.debug(\"name : %s\" % name)\n logger.debug(\"description : %s\" % description)\n\n if not name:\n return Response('Please supply a name', 500)\n\n if not type_:\n return Response('There is no type with id: %s' % type_id, 500)\n\n\n status = Status.query.filter(Status.name == 'Planning').first()\n\n generic_data = {\n 'approved_total_price': 0,\n 'total_price': 0,\n 'total_msrp': 0,\n 'total_cost': 0,\n 'realized_total_price': 0,\n 'milestones': [],\n 'folders': [],\n 'links': [],\n 'calendar_editing': 'OFF',\n 'start_date': milliseconds_since_epoch(project.start),\n 'end_date': milliseconds_since_epoch(project.end),\n 'related_budgets': []\n }\n\n budget = Budget(\n project=project,\n name=name,\n type=type_,\n status=status,\n description=description,\n created_by=logged_in_user,\n date_created=utc_now,\n date_updated=utc_now,\n generic_text=json.dumps(generic_data)\n )\n DBSession.add(budget)\n transaction.commit()\n budget = Budget.query.filter(Budget.name == name).first()\n new_budget_id = budget.id\n\n # related_budgets = budget.get_generic_text_attr('related_budgets')\n # related_budgets.append(budget.id)\n # budget.set_generic_text_attr('related_budgets', related_budgets)\n\n return Response(\"/budgets/%s/view\" % new_budget_id)",
"def testPerformanceReport(self):\n self.cur.execute('''CREATE TABLE performance_reports\n (Academic_year, Academic_staff_id, Report_date, Report_status, Support_staff_id, Created_at, Created_by, Total_score, Standard_performance_score, Developed_performance_score, Core_values_score)\n ''')\n \n self.con.commit()",
"def generate_report(self):\n output_path = get_run_artifact_path(self.fips, \"backtest_result\")\n pdf = matplotlib.backends.backend_pdf.PdfPages(output_path)\n self.plot_backtest_results(self.backtest_results, pdf)\n self.plot_historical_predictions(self.historical_predictions, self.observations, pdf)\n pdf.close()",
"def _create_sprint_backlog(self):\n sprint = self.teh.create_sprint(\"Test\")\n s1 = self.teh.create_ticket(Type.USER_STORY, \n props={Key.STORY_POINTS: '3', \n Key.SPRINT: sprint.name})\n self.assert_true(s1.link_to(self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '4',\n Key.SPRINT: sprint.name})))\n self.assert_true(s1.link_to(self.teh.create_ticket(Type.TASK,\n props={Key.REMAINING_TIME: '8',\n Key.SPRINT: sprint.name})))\n self.assert_true(s1.link_to(self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '4'})))\n s2 = self.teh.create_ticket(Type.USER_STORY, props={Key.STORY_POINTS: '5', \n Key.SPRINT: sprint.name})\n self.assert_true(s2.link_to(self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '2',\n Key.SPRINT: sprint.name})))\n self.assert_true(s2.link_to(self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '3'})))\n sprint_backlog = self.bmm.get(name=\"Sprint Backlog\", scope=sprint.name)\n self.assert_contains(s1, sprint_backlog)\n self.assert_contains(s2, sprint_backlog)\n self.assert_length(5, sprint_backlog)\n return sprint_backlog",
"def _report_created(self):\n statsreporter.stats().incr('new_task_created_' + self.task_name)",
"def _create_product_backlog(self):\n def _create_story(props):\n \"\"\"Creates a ticket of type story and returns it\"\"\"\n return self.teh.create_ticket(Type.USER_STORY, props=props)\n \n r1 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Linear'})))\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n self.assert_true(r1.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n r2 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '1200'})\n self.assert_true(r2.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n self.assert_true(r2.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n r3 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '2000'})\n self.assert_true(r3.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n r4 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '800'})\n self.assert_true(r4.link_to(_create_story({Key.STORY_PRIORITY: 'Linear'})))\n r5 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n self.assert_true(r5.link_to(_create_story({Key.STORY_PRIORITY: 'Exciter'})))\n self.assert_true(r5.link_to(_create_story({Key.STORY_PRIORITY: 'Mandatory'})))\n product_backlog = self.bmm.get(name=\"Product Backlog\")\n self.assert_equals(len(product_backlog), 14)\n return product_backlog",
"def testPerformanceReport(self):\n self.cur.execute('''CREATE TABLE performance_reports\n (Academic_year INTEGER, Academic_staff_id INTEGER, Report_date TEXT, Report_status TEXT, Support_staff_id INTEGER, Created_at TEXT, Created_by INTEGER, Total_score REAL, Standard_performance_score REAL, Developed_performance_score REAL, Core_values_score REAL)\n ''')\n \n self.con.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete an existing factor risk report for your basket
|
def delete_factor_risk_report(self, risk_model_id: str):
payload = CustomBasketRiskParams(risk_model=risk_model_id, delete=True)
return GsIndexApi.update_risk_reports(payload)
|
[
"def delete(self, crash_report_id):\n pass",
"def test_delete_report(self):\n report = dict(_id=\"report_uuid\")\n self.database.reports.find_one.return_value = report\n self.assertEqual(dict(ok=True), delete_report(\"report_uuid\", self.database))",
"def delete_record():",
"def remove_expenditure(id):\n\n from models import Budget, Expenditure, User, Category\n from utils import expenditure_total_amount_and_avg, budget_totals, get_dates_for_budget, get_progress, get_budget_per_category, connect_to_db \n\n expenditure_at_hand = Expenditure.query.filter_by(id=id).first()\n db.session.delete(expenditure_at_hand)\n db.session.commit()\n return jsonify({\"expenditure_id\": id})",
"def test_delete(self, init_db, new_risk_type):\n new_risk_type.delete()\n assert RiskType.get(new_risk_type.id) is None",
"def test_delete(self):\n pk = self.rpt.pk\n r=self.client.get(reverse('makeReports:delete-rpt',kwargs={'pk':self.rpt.pk}))\n self.assertEquals(r.status_code,200)\n self.client.post(reverse('makeReports:delete-rpt',kwargs={'pk':self.rpt.pk}))\n num = Report.objects.filter(pk=pk).count()\n self.assertEquals(num,0)",
"def doDelete(self):\n if self.factor_id is None:\n raise ValueError(\n \"Financial Factor cannot be in Database and not have \"\n \"a FactorID\")\n connection = self.CreateConnection()\n cursor = connection.cursor()\n cursor.execute(\n \"DELETE FROM FinancialFactor WHERE FactorID = %s\",\n (self.factor_id,))\n connection.commit()",
"def reports_delete_block_report(request):\n return reports_delete_report(request, block=True)",
"def delete_ballotsub(debate):\n debate.ballotsubmission_set.all().delete()",
"def remove_budget(id):\n\n from models import User, Expenditure, Budget\n\n budget_at_hand = Budget.query.filter_by(id=id).first()\n user_id = session.get('id')\n\n if user_id == budget_at_hand.budget_userid:\n db.session.delete(budget_at_hand)\n db.session.commit()\n return redirect(url_for('dashboard', id=user_id))",
"def del_review(user, product):\n try:\n Reviewing.objects.get(user=user, product=product).delete()\n except:\n pass",
"def delete_issue(request, pk):\n issue_for_deletion = Issue.objects.get(pk=pk)\n issue_for_deletion.delete()\n messages.success(request, \"You have successfully deleted this issue.\")\n return redirect('index')",
"def delete_survey(self,iSurveyID):",
"def delete(self,delete_records):\n \n # sub = categories.find_subcategories(delete_records)\n \n # temp = list(filter(lambda n : n.category in sub ,self._records))\n # print(\"Date\"+\" \"*10+\"Categories\"+\" \"*5+\"Description\"+\" \"*5+\"Amount\")\n # print(\"=================================\")\n # for j,i in enumerate(temp):\n # print(f\"{i.date:<20s}{i.category:<20s} {i.description:^20s} {str(i.amount):>20s} {str(j):>10} \")\n # #print(i[0]+\" \"*10+i[1]+\" \"*10+str(i[2])+' '+str(j)+')')\n # print(\"=================================\")\n # ask = int(input('Which index do you want to delete?'))\n \n self._initial_money = self._initial_money - int(self._records[delete_records].amount)\n self._records.pop(delete_records)",
"def delete_cicd_defect_reporter(self, cicd_id):\n return super().request('DELETE', '/cicd/defectReporting/' + str(cicd_id) + '/delete')",
"def remove(self, ticket):\n ticket_id = str(ticket.id)\n if ticket_id in self.cart:\n del self.cart[ticket_id]\n self.save()",
"def delete_basket():\n basket_id = request.form.get('basket_id')\n fruit_baskets.delete_one({'_id': ObjectId(basket_id)})\n return redirect(url_for('index'))",
"def delete_element(request):\n if request.method == \"POST\":\n try:\n report = load_report(request.session)\n report.delete_element(request.POST[\"type\"], int(request.POST[\"index\"])-1)\n return HttpResponse(\"\")\n except:\n raise Http404()\n raise Http404()",
"def unlink(self, cr, uid, ids, context=None):\n for e in self.browse(cr, uid, ids):\n check_reference = self.pool.get(\"hr.employee.violation\").search(cr, uid, [('punishment_id', '=', e.id)])\n if check_reference:\n raise osv.except_osv(_('Warning!'), _('You Cannot Delete This Punishment Record Which Is Referenced!'))\n return super(osv.osv, self).unlink(cr, uid, ids, context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If basket should be backcasted using the current composition
|
def default_backcast(self) -> Optional[bool]:
return self.__default_backcast
|
[
"def process_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def test_copyBasket(self):\n basket1 = self.createBasket()\n basket1.addItem(\"beans\")\n basket1.addItem(\"spaghetti hoops\")\n\n basket2 = self.createBasket()\n basket2.copyFrom(basket1)\n\n self.assertEqual(basket1.total(), basket2.total())\n self.assertEqual(basket2.total(), beans.price() + spaghettiHoops.price())\n self.assertEqual(basket1.savings(), basket2.savings())",
"def __init__(self, inner_basket):\n self._basket = inner_basket\n self._price_adjusters = []",
"def bidirectional(self):\n return self.bw_layer is not None",
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def is_mix(breed):\n if \"Mix\" in breed:\n return True\n else:\n return False",
"def is_blanc(self):\n \n return self.binning is None",
"def get_composition_bag(self):\n if self._bag:\n for name, quantity in self._bag.items():\n price = MyWarehouse._get_price_for_order(name) # получаем цену на товар из класса MyWarehouse\n print(f'{name}, price - {price:.2f}, quantity - {quantity}, total amount - {price * quantity:.2f}')\n else:\n print('Shopping bag is empty')",
"def check_baking_condition(self, action_type):\n if action_type == 'transform':\n has_obj_cst = check_object_constraint(self.blender_object)\n has_pose_cst = check_pose_constraint(self.blender_object)\n has_non_inherit_bone = False\n if isinstance(self.blender_object.data, bpy.types.Armature):\n for rbone in self.blender_object.data.bones:\n if (rbone.use_inherit_rotation is False or\n rbone.use_inherit_scale is False):\n has_non_inherit_bone = True\n break\n self.need_baking = (\n has_obj_cst or has_pose_cst or has_non_inherit_bone)",
"def __add__(self, other):\n if other in Product.store:\n self.basket.append(other)\n for cle, valeur in Product.store.items():\n if cle == other:\n self.totals += valeur",
"def absorb(self, other: \"Stack\", maximum=None):\n\n if other.get_item().get_id() == self.get_item().get_id():\n if maximum is None:\n quantity = other.get_quantity()\n else:\n quantity = maximum\n other.subtract(self.add(quantity))\n if other._quantity <= 0:\n return True\n return False",
"def __contains__(self, item):\n return item in self.__bag",
"def clone(self):\n position_set = deepcopy(self.position_set)\n return Basket(position_set=position_set, clone_parent_id=self.id, parent_basket=self.ticker)",
"def test_bothOffers(self):\n basket = self.createBasket()\n\n basket.addItem(\"beans\")\n basket.addItem(\"beans\")\n basket.addItem(\"beans\")\n basket.addItem(\"chickpeas\")\n basket.addItem(\"chickpeas\")\n basket.addItem(\"spaghetti hoops\")\n\n # Basket contains one of each offer\n self.assertEqual(basket.total(), 2 * beans.price() + chickpeas.price() + spaghettiHoops.price())\n self.assertEqual(basket.savings(), beans.price() + chickpeas.price())",
"def test_integration_processing_bag(self):\n subprocess.call(\n \"python {} -b {} {}\".format(\n self.SCRIPT_PATH, TSK_FIXTURE_PATH, self.OUTPUT_DIR\n ),\n shell=True,\n )\n bag = bagit.Bag(self.SIP_DIR)\n self.assertTrue(bag.validate())",
"def is_satisfied(self, item: Product):",
"def is_backbone(self):\n return self.atomType in backbone",
"def isDerivedFrom(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoType_isDerivedFrom(self, type)",
"def inBone(self, model):\n uz = self.uz\n n_i = model.layers[self.layer].n # current layer\n n_t = model.layers[self.layer].nBone # new layer\n # calculate reflectance\n r, uzNew = self.calcFresnel(n_i, n_t, abs(uz))\n if np.random.random() > r: # transmitted to bone\n self.ux *= (n_i/n_t)\n self.uy *= (n_i/n_t)\n inside = True\n if uz > 0:\n self.uz = uzNew\n else:\n self.uz = -uzNew\n else:\n self.uz = -uz # reflected\n inside = False\n return inside"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the basket is flagship (internal only)
|
def flagship(self) -> Optional[bool]:
return self.__flagship
|
[
"def is_shippable(sender, **kwargs):\n rental_item = kwargs.get('instance')\n if rental_item.id:\n return\n\n if rental_item.shipping_method:\n rental_item.is_shippable = True",
"def is_flagged(self, key: Key) -> bool:\n return self.get_rank(key) == Rank.FLAG",
"def is_gift(self):\n return self._is_gift",
"def shipping_cost(fruit):\n\n if is_berry(fruit) is True:\n return 0\n if is_berry(fruit) is False:\n return 5",
"def __contains__(self, item):\n return item in self.__bag",
"def is_flagged(self):\n return self._flagged",
"def get_is_fedex_shipping(self, name):\n return self.carrier and \\\n self.carrier.carrier_cost_method == 'fedex' or False",
"def is_basket_empty(basket):\n if not basket:\n return True\n return False",
"def set_as_ship(self):\n self.is_ship = True",
"def has_ship(data, coord, b = False):\n #print(coord)\n #print(ord(coord[0]) - ord('A'))\n if b:\n if data[coord[0]][coord[1]] == ' ':\n return False\n else:\n return True\n if data[ord(coord[0]) - ord('A')][coord[1]-1] == '*' or data[ord(coord[0]) - ord('A')][coord[1]-1] == 'X':\n return True\n elif data[ord(coord[0]) - ord('A')][coord[1]-1] == ' ':\n return False",
"def ship(self) -> bool:\n desired_date = self.order.desired_shipment_date\n if desired_date is None or desired_date <= timezone.now():\n self.order.item.ship(to=self.order.user, order=self.order)\n\n return True\n\n return False",
"def is_allergic_to(self, item):\n idx = Allergies.allergies.index(item)\n\n score = 2 ** idx\n\n if score & self.patient_allergies:\n return True\n\n return False",
"def is_reserved(self):\n return bool(self.current_cart())",
"def _is_stackable(self):\n if (\n self.acceptance_off is None\n or self.acceptance is None\n or self.counts_off is None\n ):\n return False\n else:\n return True",
"def billing_same_as_shipping():",
"def okay_for_stuff_delivery(zip_type):\n return zip_type is not ZipType.PO_BOXES_ONLY \\\n and zip_type is not ZipType.INTERNAL",
"def isin_bond(self):\n return 'bond' in self.flags",
"def is_blanc(self):\n \n return self.binning is None",
"def sagittalFlag(): \n slicingDim = params.WhichExperiment.Dataset.slicingInfo.slicingDim\n nucleus_index = params.WhichExperiment.Nucleus.Index[0]\n return (nucleus_index == 1) and (slicingDim == 2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initial price the basket it should start ticking at
|
def initial_price(self) -> Optional[float]:
return self.__initial_price
|
[
"def set_next_price(bundle_item):\r\n prev_price = bundle_item",
"def _get_base_price(self) -> int:\n pass",
"def set_prev_price(bundle_item):\r\n prev_price = bundle_item",
"def get_base_price(self):\n # in progress\n # day = datetime.date.weekday()\n # print day\n # time = datetime.time()\n # print time\n base_price = random.randint(5, 9)\n\n return base_price",
"def simulate_fundamental_price(self, events):\n price_changes = int_zeros(self.max_t)\n price_changes[events == EVENT_PRICE_CHANGE_DOWN] = -1\n price_changes[events == EVENT_PRICE_CHANGE_UP] = +1\n return self.initial_price + np.cumsum(price_changes)",
"def test_simple_stock_zero_initial_half_step(self):\n with mn.model(timestep=0.5) as m:\n S = mn.stock('S', 5)\n self.assertEqual(S[''], 0)\n m.step()\n self.assertEqual(S[''], 2.5)\n m.step()\n self.assertEqual(S[''], 5)\n m.reset()\n self.assertEqual(S[''], 0)\n m.step(3)\n self.assertEqual(S[''], 7.5)",
"def __init__(self, inner_basket):\n self._basket = inner_basket\n self._price_adjusters = []",
"def get_base_price(self):\n\n #randint(0,6)--range weekday\n #randint(0,24)\n base_price = randint(5, 9)\n today = datetime.datetime.today()\n weekday = int(today.weekday())\n is_rush_hours = int(today.hour)\n if weekday in range(0, 4) and is_rush_hours in range(8, 11):\n base_price += 4\n\n return base_price",
"def starting_balance(self) -> Decimal:\n raise NotImplementedError",
"def sellPrice(self):\n return self.initial_btcprice * (1 + FEE + self.strategy)",
"def roundToMinTick(self, price):\n self.log.debug(__name__ + \": \" + 'roundToMinTick price: ' + str(price) +\n str(type(price)) + str(type(self.minTick)))\n return int(price / self.minTick) * self.minTick",
"def set_initial_leverage():\n leverage = get_relevant_leverage()\n if leverage < conf.leverage_low:\n set_leverage(conf.leverage_default)\n return True",
"def afford_amount(self, market_prices, product):\n return int(min(market_prices[product][1],\n self.gold // market_prices[product][0]))",
"def get_item_base_price(self, item):\n return item.price",
"def add_prices(self):\n for i in range(self.parameters[\"number_of_products\"]):\n self.product_space.nodes[i][\"price\"] = \\\n self.product_space.nodes[i][\"delta\"] / max(\n self.product_space.nodes[i][\"firms\"], 1)",
"def roundToMinTick(self, price, minTick=0.01):\n if price < 0.0:\n self.log.error(__name__ + '::roundToMinTick price: EXIT, negtive price =' + str(price))\n self.end()\n rounded = int(price / minTick) * minTick\n self.log.debug(__name__ + '::roundToMinTick price: round ' + str(price) + 'to ' + str(rounded))\n return rounded",
"def change_price_precent(self):\n stock_firstday = self.closeprice[0]\n self.dataframe['stock_%chg'] = (self.closeprice - stock_firstday)/stock_firstday\n change_price_precent = self.dataframe['stock_%chg']\n return change_price_precent",
"def test_simple_stock_with_varying_initial(self):\n with mn.model(treatments=['As is', 'To be']) as m:\n S = mn.stock('S', 1, mn.PerTreatment({'As is': 22, 'To be': 23}))\n self.assertEqual(S['As is'], 22)\n self.assertEqual(S['To be'], 23)\n m.step()\n self.assertEqual(S['As is'], 23)\n self.assertEqual(S['To be'], 24)\n m.step()\n self.assertEqual(S['As is'], 24)\n self.assertEqual(S['To be'], 25)\n m.reset()\n self.assertEqual(S['As is'], 22)\n self.assertEqual(S['To be'], 23)\n m.step(3)\n self.assertEqual(S['As is'], 25)\n self.assertEqual(S['To be'], 26)",
"def sale_price(self) :\n if self.sold_on is not None :\n return 0.0 # Already sold\n return 5000.0 * self.wheels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the basket should be published to Bloomberg
|
def publish_to_bloomberg(self) -> Optional[bool]:
return self.__publish_to_bloomberg
|
[
"def has_published_version(self, xblock):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def is_available_bsin(cls, bsin):\n try:\n cls.objects.get(bsin=bsin)\n return False\n except Brand.DoesNotExist:\n return True",
"def PublishingTo3DDwf(self) -> bool:",
"def is_basket_empty(basket):\n if not basket:\n return True\n return False",
"def may_publish(ctx):\n\n ctx.exit(0 if is_publish_branch(ctx) else 1)",
"def totest_is_publishing(self):\n\n url = self.api.makeurl(\n ['source', 'openSUSE:%s:ToTest' % self.project, '_meta'])\n f = self.api.retried_GET(url)\n root = ET.parse(f).getroot()\n if not root.find('publish'): # default true\n return True\n\n for flag in root.find('publish'):\n if flag.get('repository', None) or flag.get('arch', None):\n continue\n if flag.tag == 'enable':\n return True\n return False",
"def is_good_buy(self, ticker: str) -> bool:\n pass",
"def test_i_decide_not_to_buy_the_product():",
"def publish_to_factset(self) -> Optional[bool]:\n return self.__publish_to_factset",
"def brokerInUse(self,bid):\n found = False\n for tLayer in self.getTLayers().itervalues():\n found = found or (bid == tLayer.getBroker().id())\n return found",
"def bnb_resupply(self):\n\n if not BUY_BNB:\n return\n\n try:\n bnb_current_quantity = 1.0\n bnb_quantity_to_buy = 0.0\n balance = self.client.futures_account_balance()\n \n for dictionary in balance:\n if dictionary['asset'] == BNB:\n bnb_current_quantity = float(dictionary['balance'])\n break\n\n bnb_current_quantity = round(bnb_current_quantity, 2)\n\n if bnb_current_quantity == NOTHING:\n usdt_spot_quantity = self.get_spot_coin_balance(symbol=USDT)\n available_usdt = self.futures_get_available_tether()\n bnb_mark_price = self.futures_get_mark_price(\"BNBUSDT\")\n usdt_quantity_to_transfer = available_usdt * 0.01\n\n\n if available_usdt < BNB_BUY_MIN:\n self.print_log(f\"Can't buy bnb. Need at least $10. Available usdt is ${available_usdt}\")\n return\n\n if bnb_mark_price == 0.0:\n self.print_log(f\"Can't buy bnb. Mark price of BNB is ${bnb_mark_price}\")\n return\n\n\n if usdt_quantity_to_transfer <= BNB_BUY_MIN:\n usdt_quantity_to_transfer = BNB_BUY_MIN\n self.print_log(\"USDT transfer too low, setting transfer amount to $11.00\")\n\n if usdt_spot_quantity < BNB_BUY_MIN:\n # transfer money to spot account\n self.futures_to_spot_transfer(usdt_quantity_to_transfer)\n\n # check if the transfer from futures to spot was successful\n usdt_spot_quantity = self.get_spot_coin_balance(symbol=USDT)\n\n if usdt_spot_quantity < usdt_quantity_to_transfer:\n self.print_log(f\"Can't buy bnb. Spot transfer failed\")\n return\n\n bnb_quantity_to_buy = (usdt_quantity_to_transfer - 0.90) / bnb_mark_price\n bnb_quantity_to_buy = round(bnb_quantity_to_buy, 4)\n\n # min amount of BNB to buy is BNB_BUY_MIN\n notional = self.get_notional_value(bnb_mark_price, bnb_quantity_to_buy, BNB_MIN)\n\n if bnb_quantity_to_buy < notional:\n bnb_quantity_to_buy = notional\n\n self.client.order_market(\n symbol = \"BNBUSDT\",\n side = SideType.SIDE_BUY,\n quantity = bnb_quantity_to_buy,\n recvWindow = RECV_WINDOW)\n\n self.print_log(f\"Bought BNB/USDT {bnb_quantity_to_buy}\")\n\n # check the quantity of bnb we just bought\n bnb_spot_qty = self.get_spot_coin_balance(symbol=BNB)\n\n # transfer BNB back in futures\n self.spot_to_futures_transfer(asset=BNB, amount=bnb_spot_qty)\n except Exception as e:\n self.handle_exception(e, \"Could not resupply bnb\")",
"def is_buy(self) -> bool:\n return self.side == TradeSide.BUY",
"def is_bid_line(self, params):\n if len(params) > 1 and params[2] == 'BID':\n return True \n return False",
"def test_buctetlist_has_one_item(self):\n self.blist.add_item_to_bucket(\"going to glide\")\n self.assertEqual([\"going to glide\"], self.blist.display_list())",
"def _should_automatically_send(business_process):\n return BrokerNoteBulkGeneral.should_automatically_send()",
"def is_seller(self) -> bool:\n keywords = ['budget']\n for word in keywords:\n if word in self.content.lower():\n return False\n return True",
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def is_published(self):\n return self.article.stage == STAGE_PUBLISHED",
"def soldout():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the basket should be published to Factset
|
def publish_to_factset(self) -> Optional[bool]:
return self.__publish_to_factset
|
[
"def publish_to_bloomberg(self) -> Optional[bool]:\n return self.__publish_to_bloomberg",
"def is_published(self):\n return self.article.stage == STAGE_PUBLISHED",
"def is_satisfied(self, item: Product):",
"def is_basket_empty(basket):\n if not basket:\n return True\n return False",
"def requires_republish(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"requiresRepublish\"),\n )",
"def requires_republish(self, value: bool):\n self._properties[\"requiresRepublish\"] = value",
"def PublishingTo3DDwf(self) -> bool:",
"def totest_is_publishing(self):\n\n url = self.api.makeurl(\n ['source', 'openSUSE:%s:ToTest' % self.project, '_meta'])\n f = self.api.retried_GET(url)\n root = ET.parse(f).getroot()\n if not root.find('publish'): # default true\n return True\n\n for flag in root.find('publish'):\n if flag.get('repository', None) or flag.get('arch', None):\n continue\n if flag.tag == 'enable':\n return True\n return False",
"def test_album_has_published(self):\n one_album = Album.objects.get(title='first')\n self.assertEqual(one_album.published, 'PRIVATE')",
"def is_published(self, request=None):\n if request is not None:\n if request.user.is_staff or request.user == self.user:\n return True\n return (self.publish_date <= now() and\n self.status == CONTENT_STATUS_PUBLISHED)",
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def test_already_published(self):\n # Create some published objects\n for i in range(0, 3):\n ConcretePublishableFactory(published=True)\n # Some mock objects\n mock_modeladmin = mock.Mock()\n mock_request = mock.Mock()\n\n # Call make_published() on the published objects\n make_published(mock_modeladmin, mock_request, ConcretePublishableModel.objects.all())\n\n # All of the objects are still published\n for model in ConcretePublishableModel.objects.all():\n self.assertTrue(model.published)",
"def test_publishable_queryset(self):\n unpublished = self.object\n published = ConcretePublishableFactory(published=True)\n\n qs = PublishableQuerySet(self.object_class)\n # Currently, there are 2 objects, 1 published and 1 unpublished\n self.assertEqual(self.object_class.objects.count(), 2)\n self.assertEqual(\n set(self.object_class.objects.filter(published=True)),\n set([published])\n )\n self.assertEqual(\n set(self.object_class.objects.filter(published=False)),\n set([unpublished])\n )\n\n with self.subTest('unpublished() method'):\n # The .unpublished() method returns all the unpublished objects\n self.assertEqual(\n set(qs.unpublished()),\n set(self.object_class.objects.filter(published=False))\n )\n\n with self.subTest('published() method with PUBLISH_FILTER_ENABLED==True'):\n # The .published() method returns all the published objects when\n # PUBLISH_FILTER_ENABLED is True\n self.assertTrue(settings.PUBLISH_FILTER_ENABLED)\n self.assertEqual(\n set(qs.published()),\n set(self.object_class.objects.filter(published=True))\n )\n\n with self.subTest('published() method with PUBLISH_FILTER_ENABLED==False'):\n # The .published() method returns all of the objects when\n # PUBLISH_FILTER_ENABLED is False\n with self.settings(PUBLISH_FILTER_ENABLED=False):\n self.assertEqual(\n set(qs.published()),\n set(self.object_class.objects.all())\n )",
"def publish_items(self, request, queryset):\n # We should exclude any draft copies: these can only be published \n # through merging.\n original_length = len(queryset)\n rows_updated = queryset.filter(copy_of__exact=None).update(\n status=PUBLISHED_STATE\n )\n if rows_updated == 1:\n message = \"One item was successfully published.\"\n else:\n message = \"%d items were successfully published.\" % rows_updated\n if original_length != rows_updated:\n message += (\n \" Any draft copies selected were not published; to publish \"\n \" these, merge them into the original.\"\n )\n self.message_user(request, message)",
"def has_stored_food(self) -> bool:\n return self.has_fat_tissue and (self.stored_fat_food > 0)",
"def has_a_product(obj):\n return \"products\" in obj and len(obj[\"products\"]) > 0",
"def isIsomerization(self):\n return len(self.reactants) == 1 and len(self.products) == 1",
"def is_published(self):\n\n now = datetime.now()\n\n return ((not self.publish_from or self.publish_from < now)\n and\n (not self.publish_to or self.publish_to > now))",
"def cart_contains_item_needing_delivery(request):\n cart = Cart(request)\n for item in cart:\n if item[\"item\"].delivery_required:\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If the basket should be published to Reuters
|
def publish_to_reuters(self) -> Optional[bool]:
return self.__publish_to_reuters
|
[
"def publish_to_bloomberg(self) -> Optional[bool]:\n return self.__publish_to_bloomberg",
"def requires_republish(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"requiresRepublish\"),\n )",
"def publish_to_factset(self) -> Optional[bool]:\n return self.__publish_to_factset",
"def requires_republish(self, value: bool):\n self._properties[\"requiresRepublish\"] = value",
"def runs_pubd(self):\n return self.is_root or not (self.is_hosted or only_one_pubd)",
"def is_published(self):\n\n now = datetime.now()\n\n return ((not self.publish_from or self.publish_from < now)\n and\n (not self.publish_to or self.publish_to > now))",
"def can_publish(self, user=None):\n\n if user is None:\n user = get_request().user\n\n return user.has_privilege(NEWS_PUBLIC)",
"def mptt_can_publish(self):\n try:\n public_parent = self.parent.public\n except ObjectDoesNotExist:\n return False\n except AttributeError:\n pass\n return True",
"def should_run(self):\n if self.amount_to_recycle > 0 and self.item_to_recycle is not None:\n return True\n return False",
"def is_transcribed(self) -> bool:\n raise NotImplementedError",
"def totest_is_publishing(self):\n\n url = self.api.makeurl(\n ['source', 'openSUSE:%s:ToTest' % self.project, '_meta'])\n f = self.api.retried_GET(url)\n root = ET.parse(f).getroot()\n if not root.find('publish'): # default true\n return True\n\n for flag in root.find('publish'):\n if flag.get('repository', None) or flag.get('arch', None):\n continue\n if flag.tag == 'enable':\n return True\n return False",
"def publish_items(self, request, queryset):\n # We should exclude any draft copies: these can only be published \n # through merging.\n original_length = len(queryset)\n rows_updated = queryset.filter(copy_of__exact=None).update(\n status=PUBLISHED_STATE\n )\n if rows_updated == 1:\n message = \"One item was successfully published.\"\n else:\n message = \"%d items were successfully published.\" % rows_updated\n if original_length != rows_updated:\n message += (\n \" Any draft copies selected were not published; to publish \"\n \" these, merge them into the original.\"\n )\n self.message_user(request, message)",
"def is_published(self, request=None):\n if request is not None:\n if request.user.is_staff or request.user == self.user:\n return True\n return (self.publish_date <= now() and\n self.status == CONTENT_STATUS_PUBLISHED)",
"def is_seller(self) -> bool:\n keywords = ['budget']\n for word in keywords:\n if word in self.content.lower():\n return False\n return True",
"def PublishingTo3DDwf(self) -> bool:",
"def supportsPublisherAffiliation():",
"def is_published(self):\n return self.article.stage == STAGE_PUBLISHED",
"def may_publish(ctx):\n\n ctx.exit(0 if is_publish_branch(ctx) else 1)",
"def has_perm_publish_data(user):\n has_perm_publish(user, rights.publish_data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If updates require edit and rebalance, rebal will not be scheduled until/if edit report succeeds
|
def __edit_and_rebalance(self, edit_inputs: CustomBasketsEditInputs,
rebal_inputs: CustomBasketsRebalanceInputs) -> CustomBasketsResponse:
_logger.info('Current update request requires multiple reports. Your rebalance request will be submitted \
once the edit report has completed. Submitting basket edits now...')
response = GsIndexApi.edit(self.id, edit_inputs)
report_id = response.report_id
self.__latest_create_report = GsReportApi.get_report(response.report_id)
report_status = self.poll_report(report_id, timeout=600, step=15)
if report_status != ReportStatus.done:
raise MqError(f'The basket edit report\'s status is {report_status}. The current rebalance request will \
not be submitted in the meantime.')
_logger.info('Your basket edits have completed successfuly. Submitting rebalance request now...')
response = GsIndexApi.rebalance(self.id, rebal_inputs)
return response
|
[
"def run_scheduler(self, cr, uid, context=None): \n self.update_crm(cr, uid, context)\n return True",
"def test_successful_update(self):\n\n manager = SchedulerManager()\n manager.sync_with_database()",
"def if_trigger_update(self):\n\n if self.status != st.ComponentStatus.OPERATIONAL:\n self.current_fails = self.current_fails + 1\n self.logger.warning(f\"Failure #{self.current_fails} with threshold set to {self.allowed_fails}\")\n if self.current_fails <= self.allowed_fails:\n self.trigger_update = False\n return\n self.current_fails = 0\n self.trigger_update = True",
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost",
"def test_billing_recurring_update(self):\n pass",
"def test_updating_account_bad(self) -> None:\n\n self.assertFalse(self.user.is_active)\n updating_account(uid='sefbnsefnlwefnlkewfjnlkewjfelwi', user_id=1, action='activate')\n self.user.refresh_from_db()\n self.assertFalse(self.user.is_active)",
"async def run(self):\n last_update = await self._get_last_update()\n if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():\n await self._update_prices()\n else:\n self._schedule_next_update()",
"def check_update_is_requested_and_apply(self):\n # check\n self.assertTrue(self.plot.update_required)\n self.assertTrue(self.plot.plot_updater.active)\n # update\n self.plot._check_scheduled_updates()",
"def test_patch_with_reschedule(self):\n return_dts = timezone.now()\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': return_dts.isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Run.objects.filter(job_id=1).count())\n self.assertEqual(\n return_dts, Run.objects.filter(job_id=1)[0].return_dts)",
"def do_update(self):\n pass",
"def test_patch_with_reschedule(self):\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': timezone.now().isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Job.objects.get(pk=1).run_set.count())\n self.assertEqual(1, Job.objects.get(pk=3).run_set.count())",
"def update(self, force_add=False):\n user = self.get_target_user()\n if not can_use_temp_charge(user.pk):\n self.error(_('you are not authorized to charge'), True)\n uid = user.pk\n self.__validate(uid)\n max_data = self.get_max_charges(uid)\n credit = 0\n days = 0\n if max_data[0] > 0:\n credit = self.get_int('c')\n if max_data[1] > 0:\n days = self.get_int('d')\n if credit == 0 and days == 0:\n self.error(_('please select charge or days'), True)\n if credit > max_data[0]:\n credit = max_data[0]\n elif credit < 0:\n credit = 0\n if days > max_data[1]:\n days = max_data[1]\n elif days < 0:\n days = 0\n t = TempChargeState.objects.filter(user=uid).first()\n i = self.gen_invoice(uid, credit, days, t.is_locked)\n self.update_state(uid, credit * -1, days * -1)\n if t.is_locked:\n return True, i\n pi = PayInvoice(use_discount=False, invoice=i, is_online=False, is_system=True,\n price=0, ref_code='-', default_less_subject=read_config('invoice_temp_charge_subject', 5),\n request=self.req)\n pi.pay()\n pi.commit()\n return True, 0",
"def inline_update_budget(request):\n\n logger.debug('INLINE UPDATE BUDGET IS RUNNING')\n\n from stalker_pyramid.views import get_logged_in_user, \\\n get_date_range, milliseconds_since_epoch\n logged_in_user = get_logged_in_user(request)\n utc_now = datetime.datetime.now(pytz.utc)\n\n # *************************************************************************\n # collect data\n attr_name = request.params.get('attr_name', None)\n attr_value = request.params.get('attr_value', None)\n\n logger.debug('attr_name %s', attr_name)\n logger.debug('attr_value %s', attr_value)\n\n # get task\n budget_id = request.matchdict.get('id', -1)\n budget = Budget.query.filter(Budget.id == budget_id).first()\n\n # update the task\n if not budget:\n transaction.abort()\n return Response(\"No budget found with id : %s\" % budget_id, 500)\n\n if attr_name and attr_value:\n\n logger.debug('attr_name %s', attr_name)\n\n if attr_name == 'start_and_end_dates':\n logger.debug('attr_name %s', attr_name)\n start, end = attr_value.split(' - ')\n\n budget.set_generic_text_attr('start_date', int(start))\n budget.set_generic_text_attr('end_date', int(end))\n\n logger.debug(\"int(start) : %s\" % budget.get_generic_text_attr('start_date'))\n logger.debug(\"int(end) : %s\" % budget.get_generic_text_attr('end_date'))\n\n check_project_start_end_date(budget.project)\n\n budget.updated_by = logged_in_user\n budget.date_updated = utc_now\n else:\n setattr(budget, 'attr_name', attr_value)\n\n else:\n logger.debug('not updating')\n return Response(\"MISSING PARAMETERS\", 500)\n\n return Response(\n 'Budget updated successfully %s %s' % (attr_name, attr_value)\n )",
"def test_update_escalation(self):\n pass",
"def rebalance(self):\n rebalance_table = PrettyTable()\n rebalance_table.field_names = ['Name', 'ID', 'Price', 'Percentage Diff', 'Required Purchase/Sales', 'Status']\n for s in self.stocks:\n status = None\n s.rebalance(self.total_asset)\n if (abs(s.percentage_difference()) > 3.8) & (abs(s.purchase_sales()) > 1.0):\n status = \"ACT\"\n else:\n status = \"\"\n rebalance_table.add_row([s.stock_name(), s.stock_id(), s.price(), s.percentage_difference(), s.purchase_sales(), status])\n check = input(\"Click any key to view rebalance information...\")\n os.system(\"clear\")\n print(\"Rebalancing Information:\")\n print(rebalance_table)",
"def test_stale_update_after(self):\n try:\n self.view001(stale='update_after')\n except Exception as err:\n self.fail('An unexpected error was encountered:' +str(err))",
"def data_updater():\n # This is a daemon thread so no need to explicitly\n # poll for any shutdown events.\n sleep_time = 0\n while True:\n interval = wallet['update_info']['interval']\n if time.time() > sleep_time + interval or \\\n wallet['update_info']['in_need']:\n do_update()\n sleep_time = time.time()\n time.sleep(1)",
"def schedule_full_hist_refresh_maybe(self):\n parent = self.weakParent()\n if self._full_refresh_ctr > 60:\n # Too many retries. Give up.\n self.print_error(\n \"History tab: Full refresh scheduler timed out.. wallet hasn't settled\"\n \" in 1 minute. Giving up.\"\n )\n self.full_hist_refresh_timer.stop()\n elif parent and parent.history_list.has_unknown_balances:\n # Still have 'Unknown' balance. Check if wallet is settled.\n if self.need_process_v or not parent.wallet.is_fully_settled_down():\n # Wallet not fully settled down yet... schedule this function to run later\n self.print_error(\n \"History tab: Wallet not yet settled.. will try again in 1\"\n \" second...\"\n )\n else:\n # Wallet has settled. Schedule an update. Note this function may be called again\n # in 1 second to check if the 'Unknown' situation has corrected itself.\n self.print_error(\n \"History tab: Wallet has settled down, latching need_update to true\"\n )\n parent.need_update.set()\n self._full_refresh_ctr += 1\n else:\n # No more polling is required. 'Unknown' balance disappeared from\n # GUI (or parent window was just closed).\n self.full_hist_refresh_timer.stop()\n self._full_refresh_ctr = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Current basket settings for existing basket
|
def __populate_current_attributes_for_existing_basket(self, gs_asset: GsAsset):
self.__clone_parent_id = get(gs_asset, 'parameters.cloneParentId')
self.__default_backcast = get(gs_asset, 'parameters.defaultBackcast')
self.__description = get(gs_asset, 'description')
self.__flagship = get(gs_asset, 'parameters.flagship')
self.__gs_asset_type = get(gs_asset, 'type')
self.__hedge_id = get(gs_asset, 'parameters.hedgeId')
self.__include_price_history = False
self.__live_date = get(gs_asset, 'liveDate')
self.__return_type = get(gs_asset, 'parameters.indexCalculationType')
self.__ticker = get(gs_asset, 'xref.ticker')
self.__initial_state = {}
for prop in CustomBasketsEditInputs.properties().union(CustomBasketsRebalanceInputs.properties(),
CustomBasketsPricingParameters.properties(),
PublishParameters.properties()):
set_(self.__initial_state, prop, get(self, prop))
|
[
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def show_product(self):\n return self.baskets",
"def GetCurrentSettings():\n return Setting.get_by_id('current_settings')",
"def get_shipping_settings(self):\n return self.client.execute(\"product/get-shipping-setting\", \"GET\")",
"def getCurrentSettings(self):\n self.listener.sendData(BluetoothLampCommand.ACTION_GET_LAMP_CURRENT_SETTINGS)",
"def __populate_default_attributes_for_new_basket(self, **kwargs):\n self.__allow_ca_restricted_assets = get(kwargs, 'allow_ca_restricted_assets')\n self.__allow_limited_access_assets = get(kwargs, 'allow_limited_access_assets')\n self.__clone_parent_id = get(kwargs, 'clone_parent_id')\n self.__currency = get(kwargs, 'currency')\n self.__default_backcast = get(kwargs, 'default_backcast', True)\n self.__description = get(kwargs, 'description')\n self.__divisor = get(kwargs, 'divisor')\n self.__hedge_id = get(kwargs, 'hedge_id')\n self.__include_price_history = get(kwargs, 'include_price_history', False)\n self.__initial_price = get(kwargs, 'initial_price', 100) if self.__divisor is None else None\n self.__name = get(kwargs, 'name')\n self.__parent_basket = get(kwargs, 'parent_basket')\n if self.__parent_basket is not None and self.__clone_parent_id is None:\n self.__clone_parent_id = get(__get_gs_asset(self.__parent_basket), 'id')\n self.__position_set = get(kwargs, 'position_set')\n self.__publish_to_bloomberg = get(kwargs, 'publish_to_bloomberg', True)\n self.__publish_to_factset = get(kwargs, 'publish_to_factset', False)\n self.__publish_to_reuters = get(kwargs, 'publish_to_reuters', False)\n self.__return_type = get(kwargs, 'return_type')\n self.__target_notional = get(kwargs, 'target_notional', 10000000)\n self.__ticker = get(kwargs, 'ticker')",
"def get_current(self) -> SiteConfiguration:\n return self.get_for_site_id(Site.objects.get_current().pk)",
"def get_shop_basket_id(request):\n if request.user.is_authenticated:\n if request.user.basket_id:\n return request.user.basket_id\n\n return longclawbasket.utils.basket_id(request)",
"def loadBasket():\n if isUser(session.get(\"UserID\")):\n return getBasketAsJsonString(session[\"UserID\"])\n return {}",
"def process_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def get_settings():\n return settings",
"def settings(self) -> pulumi.Output['outputs.ExchangeSettings']:\n return pulumi.get(self, \"settings\")",
"def bims_preferences(request):\n return {\n 'bims_preferences': settings.BIMS_PREFERENCES\n }",
"def OperationSettings(self):\n return self.__OperationSettings",
"def get(ctx, setting):\n print(f\"{ctx.obj.config.dump(setting)}\")",
"def _get_settings(self):\n settings = ''\n for rotor in self.rotors:\n settings += rotor.get_setting()\n return settings",
"def get_iphone_iTunes_settings(self):\n return self.parsed_info_file['iTunes Settings']",
"def __init__(self, inner_basket):\n self._basket = inner_basket\n self._price_adjusters = []",
"def system_settings(self):\n return self._system_settings"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Default basket settings prior to creation
|
def __populate_default_attributes_for_new_basket(self, **kwargs):
self.__allow_ca_restricted_assets = get(kwargs, 'allow_ca_restricted_assets')
self.__allow_limited_access_assets = get(kwargs, 'allow_limited_access_assets')
self.__clone_parent_id = get(kwargs, 'clone_parent_id')
self.__currency = get(kwargs, 'currency')
self.__default_backcast = get(kwargs, 'default_backcast', True)
self.__description = get(kwargs, 'description')
self.__divisor = get(kwargs, 'divisor')
self.__hedge_id = get(kwargs, 'hedge_id')
self.__include_price_history = get(kwargs, 'include_price_history', False)
self.__initial_price = get(kwargs, 'initial_price', 100) if self.__divisor is None else None
self.__name = get(kwargs, 'name')
self.__parent_basket = get(kwargs, 'parent_basket')
if self.__parent_basket is not None and self.__clone_parent_id is None:
self.__clone_parent_id = get(__get_gs_asset(self.__parent_basket), 'id')
self.__position_set = get(kwargs, 'position_set')
self.__publish_to_bloomberg = get(kwargs, 'publish_to_bloomberg', True)
self.__publish_to_factset = get(kwargs, 'publish_to_factset', False)
self.__publish_to_reuters = get(kwargs, 'publish_to_reuters', False)
self.__return_type = get(kwargs, 'return_type')
self.__target_notional = get(kwargs, 'target_notional', 10000000)
self.__ticker = get(kwargs, 'ticker')
|
[
"def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:",
"def __populate_current_attributes_for_existing_basket(self, gs_asset: GsAsset):\n self.__clone_parent_id = get(gs_asset, 'parameters.cloneParentId')\n self.__default_backcast = get(gs_asset, 'parameters.defaultBackcast')\n self.__description = get(gs_asset, 'description')\n self.__flagship = get(gs_asset, 'parameters.flagship')\n self.__gs_asset_type = get(gs_asset, 'type')\n self.__hedge_id = get(gs_asset, 'parameters.hedgeId')\n self.__include_price_history = False\n self.__live_date = get(gs_asset, 'liveDate')\n self.__return_type = get(gs_asset, 'parameters.indexCalculationType')\n self.__ticker = get(gs_asset, 'xref.ticker')\n\n self.__initial_state = {}\n for prop in CustomBasketsEditInputs.properties().union(CustomBasketsRebalanceInputs.properties(),\n CustomBasketsPricingParameters.properties(),\n PublishParameters.properties()):\n set_(self.__initial_state, prop, get(self, prop))",
"def __defaults__(self): \n self.tag = 'weights'\n \n self.vehicle = Data()\n self.settings = Data()",
"def setup_item(self, item: BaseBasketItem, request: HttpRequest) -> None:",
"def config_defaults(self):\n return {\n \"ingredients\": [data_ingredient, builder_ingredient],\n \"run_config\": copy(cd.run_config),\n \"loader_config\": copy(cd.loader_config),\n \"builder_config\": copy(cd.builder_config),\n \"tb_config\": copy(cd.tb_config),\n \"lr_config\": copy(cd.lr_config),\n }",
"def set_default_market(self):\n index = int(self.ctl.get_parameter_value(14)) - 1\n self.set_combo_selection(index, self.gui.cmb_market_code)",
"def restore_defaults(self):\n\n pass",
"def _default_options(cls):\n pass",
"def __init__(self, inner_basket):\n self._basket = inner_basket\n self._price_adjusters = []",
"def addDefaults(cls):\n dic = cls.getAll()\n dic.update(cls.DEFAULT_HELIXTYPES)\n pymol.plugins.pref_set('BETAFAB_HELIXTYPES', dic)\n pymol.plugins.pref_save(quiet=True)",
"def install_defaults():\n normal = Level.get_severity(Level.normal)\n warning = Level.get_severity(Level.warning)\n error = Level.get_severity(Level.error)\n\n d = Status(name=\"Down\", slug=\"down\", image=\"cross-circle\", severity=error, \\\n description=\"The service is currently down\")\n u = Status(name=\"Up\", slug=\"up\", image=\"tick-circle\", severity=normal, \\\n description=\"The service is up\")\n w = Status(name=\"Warning\", slug=\"warning\", image=\"exclamation\", severity=warning, \\\n description=\"The service is experiencing intermittent problems\")\n\n d.put()\n u.put()\n w.put()\n\n s = Setting(name=\"installed_defaults\")\n s.put()",
"def setDefaults():\n user_defaults = NSUserDefaults.standardUserDefaults()\n pref_dict = {\n Preferences.ordering_key: Preferences.ordering_default,\n Preferences.update_interval_key: Preferences.update_interval_default,\n Preferences.subreddit_key: Preferences.subreddit_default,\n Preferences.limit_key: Preferences.limit_default\n\n }\n nspref_dict = NSDictionary.dictionaryWithDictionary_(pref_dict)\n user_defaults.registerDefaults_(nspref_dict)",
"def get_default_options(self):\n return {}",
"def make_default_settings():\n default_settings = {\n 'height': 24, \n 'width': 24, \n 'max_box_height': 7,\n 'max_box_width': 7,\n 'max_container_height': 5,\n 'max_container_width': 9,\n 'default_num_samples': 20,\n 'fixed_floor': False,\n 'floor_height': 3,\n 'infinite_position_domain': False,\n 'frame': False, # indicates presence of PixelWorld frame\n 'frame_color': PURPLE,\n 'padding': 0, # padding around outside edge\n 'colors': COLORS.values(), \n 'check_overlap': True,\n 'allow_pushable': False, # Whether to allow objects the option of being pushable\n 'allow_targets': False, # Whether to allow use of the is_target attribute\n 'add_self': True,\n 'make_self_red_pixel': True,\n 'self_color_is_unique': False,\n 'objects_are_white': False,\n 'objects_are_small_blobs': False,\n 'self_grips': False, # True if the self can grip/ungrip other objects\n }\n return default_settings",
"def __init__(self, name, quantity, shoppinglist=None):\n super().__init__()\n self.name = name\n self.quantity = quantity\n if shoppinglist:\n self.shoppinglist = shoppinglist",
"def __init__(self, all_products_packs={}, user_product_qty={}):\n self.all_products_packs = all_products_packs\n self.user_product_qty = user_product_qty\n self.order = {}\n self.get_order(printing=False)",
"def restore_defaults(self):\n\n # Set default values for each of the pysat provided values. Set\n # all but the last parameter directly. Set last using __setitem__\n # to trigger a file write.\n keys = list(self.defaults.keys())\n for key in keys:\n self.data[key] = self.defaults[key]\n\n # Trigger a file write\n self.store()\n\n return",
"def get_default_investing_settings(self):\n investing = copy.deepcopy(self.default_investing)\n investing['filters'] = Filter()\n return investing",
"def set_defaults( ):\n __param=__default"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
build and deploy project and give the log to Search4Ejb
|
def add_components(cls, project_path):
old_path = os.getcwd()
os.chdir(project_path)
# print("begin mvn clean package"+absoluteProjectPath)
# subprocess.call(["mvn", "clean", "package"], shell = True)
# print("end mvn clean package")
print("Veuillez deployer l'ear")
log = input("Saisissez le chemin vers le fichier de log : ")
f = open(log, "r")
content = f.read()
f.close()
os.chdir(old_path)
for path in cls.componentsFilePath:
Search4Ejb.parse_log(content, path)
WebMigration.update_jndi()
|
[
"def deploy_eis_app():",
"def build():\n if not os.path.exists(\"build\"):\n os.mkdir(\"build\")\n local(\"date >> build/log\")\n local(\"python setup.py sdist >> build/log\")\n local(\"python setup.py bdist_wheel >> build/log\")",
"def main():\n \n jobInfo = jenkinsBase(jenkinsUrl)\n dbConn,cursor = mysqlConnect()\n \n updateProjectIndB(jobInfo, dbConn, cursor)\n buildsInfo(jobInfo, dbConn, cursor)",
"def build_engine(setting):\n logger.info('Starting building of new engine')\n\n logger.info('Finish building of new engine')",
"def deploy_project():\n virtualenv.virtualenv_create()\n make_clone()\n\n virtualenv.pip_install(env.conf.PIP_REQUIREMENTS, restart=False)\n\n setup_web_server()\n update_django_config()\n\n dj_cmd.syncdb()\n dj_cmd.migrate()",
"def build(self):\n\n build_dir = os.path.join(self.path, 'build')\n\n self.logger.info('Build started...')\n\n self.run_command('rm -rf %s' % self.path)\n self.run_command('git clone %s %s' % (GIT, self.path))\n self.run_command('mkdir -p %s' % build_dir)\n self.run_command('cmake ..', build_dir)\n self.run_command('make procam -j4', build_dir)",
"def main():\n\n parser = argparse.ArgumentParser(\n description='Perform initial loading of build database from manifests'\n )\n parser.add_argument('-c', '--config', dest='add_proj_config',\n help='Configuration file for build database loader',\n default='build_db_loader_conf.ini')\n\n args = parser.parse_args()\n\n # Check configuration file information\n add_proj_config = configparser.ConfigParser()\n add_proj_config.read(args.add_proj_config)\n\n if any(key not in add_proj_config for key in ['build_db', 'repos']):\n print(\n f'Invalid or unable to read config file {args.add_proj_config}'\n )\n sys.exit(1)\n\n db_info = add_proj_config['build_db']\n db_required_keys = ['db_uri', 'username', 'password']\n\n if any(key not in db_info for key in db_required_keys):\n print(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(db_required_keys)}'\n )\n sys.exit(1)\n\n repo_info = add_proj_config['repos']\n repo_required_keys = ['manifest_dir', 'manifest_url', 'repo_basedir']\n\n if any(key not in repo_info for key in repo_required_keys):\n print(\n f'One of the following repo keys is missing in the '\n f'config file:\\n {\", \".join(repo_required_keys)}'\n )\n sys.exit(1)\n\n # Now run through all the manifests in build-manifests and update\n # the database with new project documents\n add_projects = AddProject(db_info, repo_info)\n last_manifest = [] # Start from beginning\n manifest_repo = repo_info['manifest_dir']\n\n print('Checking out/updating the build-manifests repo...')\n cbutil_git.checkout_repo(manifest_repo, repo_info['manifest_url'])\n\n manifest_walker = cbutil_git.ManifestWalker(manifest_repo, last_manifest)\n\n for commit_info, manifest_xml in manifest_walker.walk():\n try:\n manifest_info = add_projects.get_manifest_info(manifest_xml)\n except mf_parse.InvalidManifest as exc:\n # If the file is not an XML file, simply move to next one\n print(f'{commit_info[0]}: {exc}, skipping...')\n continue\n\n add_projects.update_project_documents(manifest_info)",
"def deploy():\n test()\n require('hosts', provided_by=servers)\n require('path')\n env.release = time.strftime('%Y-%m-%d-%H-%M')\n upload_tar_from_git()\n install_requirements()\n install_site()\n symlink_current_release()\n migrate()\n collect_static()\n restart_webserver()\n remove_remote_package()",
"def deploy_pypi():\n test()\n register_pypi()\n deploy_src()\n deploy_eggs()",
"def _buildLog(self, result):\r\n if result[0] == \"Build Success\":\r\n if self._buildWindow is not None:\r\n self._buildWindow.destroy()\r\n self._log(result[1])",
"def deploy_webapp():\r\n # require('settings', provided_by=[production, staging])\r\n # require('branch', provided_by=[stable, master, branch])\r\n\r\n # with settings(warn_only=True):\r\n # maintenance_up()\r\n\r\n upload_and_explode_code_bundle()\r\n # Apply requirements.txt, if it exists\r\n # _install_pip_requirements()\r\n \r\n # Restart the web server with the latest code\r\n stop_webserver()\r\n symlink_current_release()\r\n # maintenance_down()\r\n start_webserver()",
"def deploy():\n local('appcfg.py update src', capture=False)",
"def construct(self):\n slab_logger.log(15, 'Constructing the repo project')\n try:\n if self.check():\n return\n self.create_project()\n self.download_template()\n self.instantiate_template()\n self.create_nimbus()\n except Exception:\n raise",
"def deploy(tag=None):\n require('hosts')\n require('path')\n\n git_update_repo(tag)\n setup_virtualenv()\n sync_db()\n deploy_static()",
"def run():\n\n filename = modfile.get('modid') + '-' + modfile.get('version') + '.jar'\n compiled_file = OUTPUT_FOLDER + 'build/libs/' + filename\n\n log(f\"Compiling {filename}...\")\n os.system(f'cd {OUTPUT_FOLDER} && gradlew build --warn')\n\n try:\n filepath = JAR_FOLDER + filename\n os.makedirs(JAR_FOLDER, exist_ok=True)\n if os.path.exists(filepath):\n os.remove(filepath)\n os.rename(compiled_file, filepath)\n log(\"Successfully compiled \" + filename)\n log(\"Find file in ./\" + filepath)\n except:\n log(\"Compilation failed.\")",
"def build():\n script_dir = os.path.dirname(os.path.abspath(__file__))\n\n ext_dir = os.path.join(script_dir, \"ext\")\n if not os.path.exists(ext_dir):\n os.makedirs(ext_dir)\n tools.runInDirectory(ext_dir, initGitModules)\n\n rv_monitor_dir = os.path.join(ext_dir, \"rv-monitor\")\n if not os.path.exists(os.path.join(rv_monitor_dir, \"target\", \"release\", \"rv-monitor\", \"lib\", \"rv-monitor.jar\")):\n tools.runInDirectory(rv_monitor_dir, buildRvMonitor)",
"def deploy(version_tag=None):\n supervised_process = SITE_SETTINGS['supervised_process']\n\n #dust()\n stop(supervised_process)\n update(commit=version_tag)\n setup()\n collectstatic()\n start(supervised_process)\n #undust()",
"def build(self):\n self.create_dir()\n self.create_init()\n self.create_config()\n self.build_code()\n self.build_xml()",
"def main():\n\n parser = argparse.ArgumentParser(\n description='Update documents in build database'\n )\n parser.add_argument('-c', '--config', dest='check_build_config',\n help='Configuration file for build database loader',\n default='check_builds.ini')\n parser.add_argument('metadata_dir', type=Path,\n help='Path to product-metadata directory')\n parser.add_argument('-n', '--dryrun', action='store_true',\n help=\"Only check, don't update database or send email\")\n parser.add_argument('-v', '--verbose', action='store_true',\n help=\"Enable additional debug output\")\n args = parser.parse_args()\n\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n dryrun = args.dryrun\n metadata_dir = args.metadata_dir\n if not metadata_dir.exists():\n logger.error(\n f'product-metadata path {metadata_dir} does not exist'\n )\n sys.exit(1)\n\n # Check configuration file information\n check_build_config = configparser.ConfigParser()\n check_build_config.read(args.check_build_config)\n\n if any(key not in check_build_config\n for key in ['build_db', 'missing_builds']):\n logger.error(\n f'Invalid or unable to read config file {args.check_build_config}'\n )\n sys.exit(1)\n\n db_info = check_build_config['build_db']\n db_required_keys = ['db_uri', 'username', 'password']\n\n if any(key not in db_info for key in db_required_keys):\n logger.error(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(db_required_keys)}'\n )\n sys.exit(1)\n\n miss_info = check_build_config['missing_builds']\n miss_required_keys = [\n 'receivers', 'lb_base_dir', 'lb_base_url', 'smtp_server'\n ]\n\n if any(key not in miss_info for key in miss_required_keys):\n logger.error(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(miss_required_keys)}'\n )\n sys.exit(1)\n\n # Find builds to check\n db = cbutil_db.CouchbaseDB(db_info)\n builds = db.query_documents(\n 'build',\n where_clause=\"ifmissingornull(metadata.builds_complete, 'n/a')='n/a'\"\n )\n\n # Go through builds and based on age and whether certain metadata\n # values (builds_complete and email_notification) are set, determine\n # proper course of action. The basic process is as follows:\n # - Get age of build\n # - If build age is over 28 days old, simply mark as unknown\n # (files already gone from latestbuilds)\n # - If the product isn't in the product config data, skip\n # - Generate necessary file list, then get current file list from\n # latestbuilds (mounted via NFS)\n # - Check to see if any files in needed list aren't in current list:\n # - If not, mark build complete and continue\n # - Else if there are and build age is over 2 hours, check to\n # see if email's been sent previously and send notification\n # if not, marking email as sent\n # - And if there are and build age is also over 12 hours, mark\n # as incomplete and continue\n for build in builds:\n build_age = int(time.time()) - build.timestamp\n\n if build_age > 28 * 24 * 60 * 60: # 28 days\n dryrun or build.set_metadata('builds_complete', 'unknown')\n continue\n\n template_dir = metadata_dir / build.product / \"check_builds\"\n if not template_dir.exists():\n logger.debug(f\"Skipping build for unknown product {build.product}\")\n continue\n\n prodver_path = f'{build.product}/{build.release}/{build.build_num}'\n lb_dir = f'{miss_info[\"lb_base_dir\"]}/{prodver_path}/'\n lb_url = f'{miss_info[\"lb_base_url\"]}/{prodver_path}/'\n\n templates = list(\n filter(\n lambda x: x.name.endswith(('.yaml.j2', '.json')),\n template_dir.glob(\"pkg_data.*\")\n )\n )\n if len(templates) < 1:\n logger.error(f\"Product {build.product} has no pkg_data templates\")\n sys.exit(1)\n if len(templates) > 1:\n logger.error(f\"Found multiple possible pkg_data files for {build.product}!\")\n sys.exit(1)\n logger.debug(f\"Using template {templates[0]} for {build.product}\")\n\n logger.info(f\"***** Checking {build.product} {build.release} build {build.version}-{build.build_num} ({build_age} seconds old)\")\n\n needed_files = generate_filelist(\n build.product, build.release, build.version, build.build_num,\n templates[0]\n )\n try:\n existing_files = set(os.listdir(lb_dir))\n except FileNotFoundError:\n existing_files = set()\n missing_files = list(needed_files.difference(existing_files))\n\n if not missing_files:\n logger.info(\"All expected files found - build complete!\")\n dryrun or build.set_metadata('builds_complete', 'complete')\n continue\n\n if build_age > 2 * 60 * 60: # 2 hours\n logger.info(\"Still incomplete after 2 hours; missing files:\")\n for missing in missing_files:\n logger.info(f\" - {missing}\")\n if not build.metadata.setdefault('email_notification', False):\n curr_bld = \\\n f'{build.product}-{build.version}-{build.build_num}'\n message = {\n 'subject': f'Build {curr_bld} not complete after 2 hours',\n 'body': generate_mail_body(lb_url, missing_files)\n }\n receivers = miss_info['receivers'].split(',')\n send_email(miss_info['smtp_server'], receivers, message, dryrun)\n dryrun or build.set_metadata('email_notification', True)\n else:\n logger.info(\"Email previously sent\")\n else:\n logger.info(\"Incomplete but less than 2 hours old\")\n\n if build_age > 12 * 60 * 60: # 12 hours\n logger.info(\"Build incomplete after 12 hours - marking incomplete\")\n dryrun or build.set_metadata('builds_complete', 'incomplete')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that get_repo_data raises ValueError Execptions for invalid input
|
def test_input_validation(self):
with self.assertRaises(ValueError):
get_repo_data(" ")
with self.assertRaises(ValueError):
get_repo_data("nonExistentUserID")
self.assertTrue(get_repo_data("derobertsw"))
|
[
"def test_get_repo_data(self):\n self.assertEqual(get_repo_data(\"derobertsw\"),\n [('GitHubAPI567', 4), ('home', 2), ('ssw567_hw2_triangle', 9), ('Student-Repository', 30)])",
"def test_repo_get_contents(self):\n pass",
"def test_invalid_language(self):\n datasite = self.get_repo()\n with self.assertRaises(ValueError):\n datasite.search_entities('abc', 'invalidlanguage')",
"def test_api_v3_repositories_get(self):\n pass",
"def test_decode_data_errors(self):\n self.assert_raises(ValueError, self.import_cls.decode_data, 'hello', None)",
"def test_organization_valueerror_not_mutate():\n organization = Organization()\n data = {\n 'is_organization': False\n }\n assert_raises(ValueError, organization.format_data_set, data)",
"def test_repository_with_invalid_repo(self):\n self.instance.repository(\"user\", None)\n\n assert self.session.get.called is False",
"def test_exception_raised_if_repo_is_not_a_git_repository(tmp_path):\n with pytest.raises(InvalidGitRepositoryError):\n zenml.core.git_wrapper.GitWrapper(tmp_path)",
"def test_pull_git_repository_and_refresh_data_with_valid_data(self, mock_git_repo):\n with tempfile.TemporaryDirectory() as tempdir:\n with self.settings(GIT_ROOT=tempdir):\n\n def populate_repo(path, _url):\n os.makedirs(path)\n # Load device-types data for git-repository\n os.makedirs(os.path.join(path, \"device-types\"))\n os.makedirs(os.path.join(path, \"device-types\", \"Cisco\"))\n with open(os.path.join(path, \"device-types\", \"Cisco\", \"fake.yaml\"), \"w\") as file:\n yaml.dump(\n {\"manufacturer\": \"Cisco\", \"model\": \"Fake Model\"},\n file,\n )\n return mock.DEFAULT\n\n mock_git_repo.side_effect = populate_repo\n mock_git_repo.return_value.checkout.return_value = self.COMMIT_HEXSHA\n\n pull_git_repository_and_refresh_data(self.repo.pk, self.dummy_request, self.job_result)\n\n self.assertEqual(\n self.job_result.status,\n JobResultStatusChoices.STATUS_COMPLETED,\n self.job_result.data,\n )\n\n # Make sure ManufacturerImport was successfully loaded from file\n manufacturer_import = ManufacturerImport.objects.get(name=\"Cisco\")\n self.assertIsNotNone(manufacturer_import)\n\n # Make sure DeviceTypeImport was successfully loaded from file\n device_type = DeviceTypeImport.objects.get(filename=\"fake.yaml\")\n self.assertIsNotNone(device_type)\n self.assertEqual(device_type.name, \"Fake Model\")\n self.assertEqual(device_type.manufacturer, manufacturer_import)\n\n # Delete the GitRepository (this is a noop)\n self.repo.delete()",
"def test_failing_command_data(self):\n output = StringIO()\n self.assertEqual(Company.objects.count(), 0)\n\n kwarguments = {\"filepath\": DATA_FILE, \"stdout\": output}\n\n with patch(\n \"companies.management.commands.import_companies.Command._collect_data\",\n return_value=None,\n ):\n call_command(\"import_companies\", **kwarguments)\n\n self.assertEqual(Company.objects.count(), 0)\n self.assertIn(\n f\"Could not collect data from {DATA_FILE} properly or the \"\n f\"file is empty\",\n output.getvalue(),\n )",
"def test_get_repositories(self):\n unknown_repo = Repository.objects.create(\n name=\"my-org/some-repo\", organization_id=self.organization.id\n )\n # create a legacy repo tied to a plugin, not integration\n Repository.objects.create(\n name=\"plugin-repo\", provider=\"github\", organization_id=self.organization.id\n )\n self.assert_setup_flow()\n integration = Integration.objects.get(provider=self.provider.key)\n installation = integration.get_installation(self.organization.id)\n repos = installation.get_repositories()\n\n assert repos == [{\"name\": \"my-org/some-repo\", \"identifier\": str(unknown_repo.id)}]",
"def test_dig_absent_data(hello_data):\n project = 'test_project'\n dataset = 'test_dataset'\n version = 'v0'\n\n # Bucket just initialised\n with pytest.raises(exceptions.VersionDoesNotExist):\n shovel.dig(project, dataset, version, str(hello_data))\n shovel.bury(project, dataset, version, str(hello_data))\n hello_data.remove()\n shovel.dig(project, dataset, version, str(hello_data))\n\n # Missing project\n with pytest.raises(exceptions.VersionDoesNotExist):\n shovel.dig(project + '_x', dataset, version, str(hello_data))\n shovel.bury(project + '_x', dataset, version, str(hello_data))\n hello_data.remove()\n shovel.dig(project + '_x', dataset, version, str(hello_data))\n\n # Missing dataset\n with pytest.raises(exceptions.VersionDoesNotExist):\n shovel.dig(project, dataset + '_x', version, str(hello_data))\n shovel.bury(project, dataset + '_x', version, str(hello_data))\n hello_data.remove()\n shovel.dig(project, dataset + '_x', version, str(hello_data))\n\n # Missing version\n with pytest.raises(exceptions.VersionDoesNotExist):\n shovel.dig(project, dataset, 'v1', str(hello_data))\n shovel.bury(project, dataset, 'v1', str(hello_data))\n hello_data.remove()\n shovel.dig(project, dataset, 'v1', str(hello_data))",
"def test_repository_with_invalid_user_and_repo(self):\n self.instance.repository(None, None)\n\n assert self.session.get.called is False",
"def test_person_valueerror_not_mutate():\n person = Person()\n data = {\n 'is_organization': True\n }\n assert_raises(ValueError, person.format_data_set, data)",
"def test_read_dataset_config_not_found(self):\n\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that reading not existent dataset config raises an exception\n #--------------------------------------------------------------------------------------------------------\n\n with self.assertRaises(NotFound):\n dataset_config = self.ingestion_cli.read_dataset_config('abracadabra')",
"def test_multiple_repos_bad_default() -> None:\n from valiant.config import Config\n from valiant.repositories import RepositoryConfiguration\n\n with pytest.raises(ValueError):\n Config.prepare_repository_configurations(\n repository_configurations=[\n RepositoryConfiguration(\n name=\"test\", base_url=\"https://www.example.com\"\n ),\n RepositoryConfiguration(\n name=\"test2\", base_url=\"https://www.differentexample.com\"\n ),\n ],\n default_repository=\"otter\",\n )",
"def test_load_non_existant_data(self):\n dm = DataManager(data_endpoint='test/data/basic.csv', target_column='species')\n\n # Should raise an exception when trying to load a dataset that doesn't exist.\n with self.assertRaises(botocore.exceptions.ClientError):\n dm.load()",
"def test_none_input_data(self):\n invalid_struct = self.struct \n invalid_struct.diffusion_data = np.array([])\n invalid_struct.structural_data = np.array([])\n self.assertRaises(ValueError, module_01.run_module, invalid_struct)",
"def test_with_scmclient_errors_from_get_repository_info(self):\n tempdir = make_tempdir()\n git_dir = os.path.realpath(os.path.join(tempdir, 'git-repo'))\n\n e = Exception('oh no')\n\n execute(['git', 'init', git_dir])\n\n self.spy_on(GitClient.get_repository_info,\n owner=GitClient,\n op=kgb.SpyOpRaise(e))\n\n scan_result = scan_scmclients_for_path(\n path=git_dir,\n scmclient_kwargs={\n 'options': {},\n })\n\n self.assertFalse(scan_result.found)\n self.assertIsNone(scan_result.local_path)\n self.assertIsNone(scan_result.scmclient)\n\n # Check the candidates.\n self.assertEqual(len(scan_result.candidates), 1)\n\n candidate = scan_result.candidates[0]\n self.assertEqual(candidate.local_path, git_dir)\n self.assertIsInstance(candidate.scmclient, GitClient)\n\n # Check the errors.\n self.assertEqual(scan_result.scmclient_errors, {\n 'git': e,\n })"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that get_repo_data returns correct data
|
def test_get_repo_data(self):
self.assertEqual(get_repo_data("derobertsw"),
[('GitHubAPI567', 4), ('home', 2), ('ssw567_hw2_triangle', 9), ('Student-Repository', 30)])
|
[
"def test_repo_get_contents(self):\n pass",
"def test_api_v3_repositories_get(self):\n pass",
"def test_LocalRepo_get_data():\n\n # Create module to upload\n local_repo = cpenv.LocalRepo(\"test_modules\", data_path(\"modules\"))\n spec = local_repo.find(\"testmod-0.2.0\")[0]\n\n data = local_repo.get_data(spec)\n\n assert data[\"name\"] == \"testmod\"\n assert data[\"version\"] == \"0.2.0\"\n assert data[\"description\"] == \"A test module\"",
"def test_repo_get_contents_list(self):\n pass",
"def test_repo_get_git_hook(self):\n pass",
"def test_pull_git_repository_and_refresh_data_with_valid_data(self, mock_git_repo):\n with tempfile.TemporaryDirectory() as tempdir:\n with self.settings(GIT_ROOT=tempdir):\n\n def populate_repo(path, _url):\n os.makedirs(path)\n # Load device-types data for git-repository\n os.makedirs(os.path.join(path, \"device-types\"))\n os.makedirs(os.path.join(path, \"device-types\", \"Cisco\"))\n with open(os.path.join(path, \"device-types\", \"Cisco\", \"fake.yaml\"), \"w\") as file:\n yaml.dump(\n {\"manufacturer\": \"Cisco\", \"model\": \"Fake Model\"},\n file,\n )\n return mock.DEFAULT\n\n mock_git_repo.side_effect = populate_repo\n mock_git_repo.return_value.checkout.return_value = self.COMMIT_HEXSHA\n\n pull_git_repository_and_refresh_data(self.repo.pk, self.dummy_request, self.job_result)\n\n self.assertEqual(\n self.job_result.status,\n JobResultStatusChoices.STATUS_COMPLETED,\n self.job_result.data,\n )\n\n # Make sure ManufacturerImport was successfully loaded from file\n manufacturer_import = ManufacturerImport.objects.get(name=\"Cisco\")\n self.assertIsNotNone(manufacturer_import)\n\n # Make sure DeviceTypeImport was successfully loaded from file\n device_type = DeviceTypeImport.objects.get(filename=\"fake.yaml\")\n self.assertIsNotNone(device_type)\n self.assertEqual(device_type.name, \"Fake Model\")\n self.assertEqual(device_type.manufacturer, manufacturer_import)\n\n # Delete the GitRepository (this is a noop)\n self.repo.delete()",
"def test_get_repositories(self):\n unknown_repo = Repository.objects.create(\n name=\"my-org/some-repo\", organization_id=self.organization.id\n )\n # create a legacy repo tied to a plugin, not integration\n Repository.objects.create(\n name=\"plugin-repo\", provider=\"github\", organization_id=self.organization.id\n )\n self.assert_setup_flow()\n integration = Integration.objects.get(provider=self.provider.key)\n installation = integration.get_installation(self.organization.id)\n repos = installation.get_repositories()\n\n assert repos == [{\"name\": \"my-org/some-repo\", \"identifier\": str(unknown_repo.id)}]",
"def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.",
"def test_repo_transfer(self):\n pass",
"def test_repo_edit(self):\n pass",
"def test_get_by_repo(self):\n # Comment from a public repository.\n repo1 = self.create_repository(name='repo1', public=True)\n review_request1 = self.create_review_request(publish=True,\n repository=repo1)\n review1 = self.create_review(review_request1, publish=True)\n comment1 = self._create_diff_comment(review_request1, review1)\n\n # Comment from a private repository that the requester has\n # access to from being listed in the repository's users list.\n repo2 = self.create_repository(name='repo2', public=False)\n repo2.users.add(self.user)\n review_request2 = self.create_review_request(publish=True,\n repository=repo2)\n review2 = self.create_review(review_request2, publish=True)\n comment2 = self._create_diff_comment(review_request2, review2)\n\n # An invite-only review group that the requester has access to.\n group_accessible = self.create_review_group(invite_only=True)\n group_accessible.users.add(self.user)\n\n # Comment from a private repository that the requester has\n # access to through being a member of a targeted review group.\n repo3 = self.create_repository(name='repo3', public=False)\n repo3.review_groups.add(group_accessible)\n review_request3 = self.create_review_request(publish=True,\n repository=repo3)\n review3 = self.create_review(review_request3, publish=True)\n comment3 = self._create_diff_comment(review_request3, review3)\n\n # Comment from a private repository that the requester does\n # not have access to.\n repo4 = self.create_repository(name='repo4', public=False)\n review_request4 = self.create_review_request(publish=True,\n repository=repo4)\n review4 = self.create_review(review_request4, publish=True)\n self._create_diff_comment(review_request4, review4)\n\n # Comment from a private repository that the requester has access\n # to through being a member of a targeted review group and\n # being listed on the repository's users list.\n repo5 = self.create_repository(name='repo5', public=False)\n repo5.review_groups.add(group_accessible)\n repo5.users.add(self.user)\n review_request5 = self.create_review_request(publish=True,\n repository=repo5)\n review5 = self.create_review(review_request5, publish=True)\n comment5 = self._create_diff_comment(review_request5, review5)\n\n # An invite-only review group that the requester does not have\n # access to.\n group_inaccessible = self.create_review_group(invite_only=True)\n\n # Comment from a private repository that targets an invite-only review\n # group, but that the requester has access to from being listed in the\n # repository's users list.\n repo6 = self.create_repository(name='repo6', public=False)\n repo6.review_groups.add(group_inaccessible)\n repo6.users.add(self.user)\n review_request6 = self.create_review_request(publish=True,\n repository=repo6)\n review6 = self.create_review(review_request6, publish=True)\n comment6 = self._create_diff_comment(review_request6, review6)\n\n # Comment from a private repository that targets an invite-only review\n # group and that the requester does not have access to.\n repo7 = self.create_repository(name='repo7', public=False)\n repo7.review_groups.add(group_inaccessible)\n review_request7 = self.create_review_request(publish=True,\n repository=repo7)\n review7 = self.create_review(review_request7, publish=True)\n self._create_diff_comment(review_request7, review7)\n\n rsp = self.api_get(get_root_diff_comment_list_url(), {},\n expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 5)\n self.compare_item(rsp_items[0], comment1)\n self.compare_item(rsp_items[1], comment2)\n self.compare_item(rsp_items[2], comment3)\n self.compare_item(rsp_items[3], comment5)\n self.compare_item(rsp_items[4], comment6)",
"def test_user_list_repos(self):\n pass",
"def test_api_v3_repositories_repo_public_id_get(self):\n pass",
"def test_get_repo_build_super_user(self):\n pass",
"def getGitData(username):\n\n # fetch access token for given username\n conn = create_connection('test.db')\n query = f\"SELECT token from Token WHERE g_username='{username}';\"\n result = execute_read_query(conn, query)\n token = (result[0])[0]\n \n # appropriate header for GitHub API '/usr' endpoint\n headers = {'Authorization': f\"token {token}\"}\n usrUrl = \"https://api.github.com/user\"\n res = requests.get(url=usrUrl, headers=headers)\n res = res.json()\n\n # fetch required details from response\n response = {}\n response['id'] = res['login']\n response['followers'] = res['followers']\n response['public_repos'] = res['public_repos']\n\n # request for fetching repository details\n repoUrl = f\"https://api.github.com/users/{username}/repos\"\n res = requests.get(url=repoUrl, headers=headers)\n repo_data = res.json()\n\n # store all repository details in lst\n lst = []\n stars = 0\n languages = {}\n for repo in repo_data:\n obj = {}\n obj['name'] = repo['name']\n obj['stars'] = repo['stargazers_count']\n obj['language'] = repo['language']\n obj['description'] = repo['description']\n obj['forks_count'] = repo['forks_count']\n\n key = repo['language']\n if key is not None:\n key = str(repo['language'])\n if key in languages:\n languages[key] += 1\n else:\n languages[key] = 0\n stars += obj['stars']\n lst.append(obj)\n\n # sort all repos on number of stars\n def func(item): return item[1]\n languages_list = [k for k, v in sorted(languages.items(), key=func)]\n languages_list.reverse()\n response['stars'] = stars\n response['repo_data'] = lst\n response['languages'] = languages_list\n\n return response",
"def test_admrepodata_from_local():\n strRepoPath = \"./test_repo\"\n strDataset=\"test_dataset\"\n strVersion=\"v1\"\n fVerbose = False\n\n admrepo = AutodatamanRepoDataMD()\n admrepo.from_local_repo(strRepoPath,strDataset,strVersion,fVerbose)\n\n with open(\"test_repo/test_dataset/v1/data.json\",\"r\") as ifs:\n jmeta = json.load(ifs)\n\n assert admrepo.num_files() == len(jmeta[\"_FILES\"])\n assert admrepo.m_strVersion == jmeta[\"_DATA\"][\"version\"]\n assert admrepo.m_strDate == jmeta[\"_DATA\"][\"date\"]\n assert admrepo.m_strSource == jmeta[\"_DATA\"][\"source\"]",
"def test_full(self):\n github.input = lambda x: \"xmaayy\"\n data = get_hub(\"xmaayy\")\n assert data != []\n print(data[1].keys())\n repolist = [repo[\"name\"] for repo in data]\n assert repolist == [\n \"Alexandria\",\n \"cipher\",\n \"CUHackAPIO\",\n \"DarkBot\",\n \"megacrypt.js\",\n \"MGAN\",\n \"NoFace\",\n \"polybar\",\n \"SFMLinstall\",\n \"SubHelper\",\n \"UpStat\",\n \"Website\",\n \"xmaayy.github.io\",\n ]",
"async def test_include_repository(self):\n self.set_source_parameter(\"repositories_to_include\", \"other_repo\")\n response = await self.collect_data()\n self.assert_measurement(response, value=\"0\", entities=[])",
"async def test_api_store_repositories(api_client: TestClient, repository: Repository):\n resp = await api_client.get(\"/store/repositories\")\n result = await resp.json()\n\n assert result[\"data\"][0][\"slug\"] == repository.slug"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize logging for the repository update Logging is quite important as a full global update can take many hours and be totally unattended (run from cron, etc.), so if something goes wrong we need to be able to find out what went wrong.
|
def init_logging(log_folder_path):
# first try to make sure the logging folder actually exists
if not utils.createFolderPath(log_folder_path):
print("ERROR: failed to create logging folder in: %s" % log_folder_path)
return
else:
print("initializing logging")
# create main logger for the repo update
logger = logging.getLogger('repo')
logger.setLevel(logging.DEBUG)
# create a summary logger that logs everything
# (this is mainly done so that we can easily correlate
# what happened when during the repository update)
summary_file_log = logging.FileHandler(os.path.join(log_folder_path, SUMMARY_LOG))
summary_file_log.setLevel(logging.DEBUG)
# create console handler that also logs everything
# (this will be in the default tmux pane, the other panes will have
# tail -f running on the pool-specific log files)
summary_console_log = logging.StreamHandler()
summary_console_log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
summary_file_log.setFormatter(formatter)
summary_console_log.setFormatter(formatter)
# add the handlers to the logger summary logger
logger.addHandler(summary_file_log)
logger.addHandler(summary_console_log)
# next we create the pool specific loggers and add a file handler for each of them
source_logger = logging.getLogger('repo.source')
process_logger = logging.getLogger('repo.process')
package_logger = logging.getLogger('repo.package')
publish_logger = logging.getLogger('repo.publish')
source_file_log = logging.FileHandler(os.path.join(log_folder_path, SOURCE_LOG))
process_file_log = logging.FileHandler(os.path.join(log_folder_path, PROCESSING_LOG))
package_file_log = logging.FileHandler(os.path.join(log_folder_path, PACKAGING_LOG))
publish_file_log = logging.FileHandler(os.path.join(log_folder_path, PUBLISHING_LOG))
source_file_log.setLevel(logging.DEBUG)
process_file_log.setLevel(logging.DEBUG)
package_file_log.setLevel(logging.DEBUG)
publish_file_log.setLevel(logging.DEBUG)
source_file_log.setFormatter(formatter)
process_file_log.setFormatter(formatter)
package_file_log.setFormatter(formatter)
publish_file_log.setFormatter(formatter)
source_logger.addHandler(source_file_log)
process_logger.addHandler(process_file_log)
package_logger.addHandler(package_file_log)
publish_logger.addHandler(publish_file_log)
logger.info("logging initialized")
|
[
"def init_logger(self):\n logger.Reinitialize(level=self.log_level, logToFileAtSpecifiedPath=self.log_file)",
"def _init_logger(self):\n #self._logger = logger_factory.make_logger(__name__)",
"def logging_init():\n # Default logging levels. These can be overridden when the config file is loaded.\n logging.getLogger().setLevel(logging.WARNING)\n logging.getLogger('neocommon').setLevel(logging.INFO)\n logging.getLogger('fetch').setLevel(logging.INFO)\n\n # Add logging handlers\n logging.getLogger().addHandler(_LOG_HANDLER)",
"def log_init(self):\n try:\n self._logfile = open('/var/log/pnpuppettester', \"a\")\n self._logfile.write(\"Log stream opened at %s\\n\" % self.time())\n return 0\n except (OSError, IOError):\n self.logging = False\n self.message('error',\n 'Log file could not be opened, disabling logging')\n return -1",
"def __init__(self):\n self._logger = logging.getLogger(__name__)",
"def _set_logging(self):\n logging.basicConfig(**self.settings[\"general\"][\"logging\"])\n log.info(\n \"Setting logging config: {!r}\".format(self.settings[\"general\"][\"logging\"])\n )",
"def update_logging(self):\n # custom loggers passed into tcex would not have log_info method\n if not hasattr(self.tcex.logger, 'log_info'):\n return\n\n if self._default_args.tc_log_level is None:\n # some Apps use logging while other us tc_log_level. ensure tc_log_level is always\n # available.\n self._default_args.tc_log_level = self._default_args.logging\n\n self.tcex.logger.log_info(self._default_args)\n\n # add api handler\n if self._default_args.tc_token is not None and self._default_args.tc_log_to_api:\n self.tcex.logger.add_api_handler(level=self.tcex.default_args.tc_log_level)\n\n # add rotating log handler\n self.tcex.logger.add_rotating_file_handler(\n name='rfh',\n filename=self._default_args.tc_log_file,\n path=self._default_args.tc_log_path,\n backup_count=self._default_args.tc_log_backup_count,\n max_bytes=self._default_args.tc_log_max_bytes,\n level=self.tcex.default_args.tc_log_level,\n )\n\n # replay cached log events\n self.tcex.logger.replay_cached_events(handler_name='cache')",
"def init_logging():\n manager = qr.Manager().log_conf\n try:\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n if size == 1:\n manager.is_serial = True\n else:\n manager.is_serial = False\n manager.log_file_appendix = \".\"+str(rank)\n except:\n manager.is_serial = True\n manager.initialized = True",
"def setup_upgrade_logging():\n try:\n rootLogger = logging.getLogger()\n rootLogger.setLevel(logging.DEBUG)\n fh = RotatingFileHandler(LOGGING_FILENAME, maxBytes=LOGGING_MAX_BYTES, backupCount=LOGGING_MAX_FILES)\n fh.setFormatter(logging.Formatter('[%(asctime)s %(levelname)s %(name)s - %(threadName)s] %(message)s'))\n rootLogger.addHandler(fh)\n except:\n pass",
"def init_log(self):\n LOG.log('INFO', \"GAME STARTING!\")\n LOG.log('INFO', \"----Initial Pygame parameters----\")\n LOG.log('INFO', \"Game initial frames per second: \", self.fps)\n LOG.log('INFO', \"RESOLUTION: \", self.display.get_size())",
"def _init_log():\n global log\n\n orig_logger_cls = logging.getLoggerClass()\n logging.setLoggerClass(EyeLogger)\n try:\n log = logging.getLogger('eye')\n log._set_defaults()\n finally:\n logging.setLoggerClass(orig_logger_cls)\n\n return log",
"def setup(self):\n from Utilities.movoto.logger import MLogger\n self._mlogger = MLogger().getLogger(*self._args, **self._kw)",
"def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()",
"def _init_logger(self):\n ProctorLoggerFactory.init('proctor',\n ProctorConfig.get_config_value('Proctor', 'console_log_level'),\n ProctorConfig.get_proctor_working_dir(),\n ProctorConfig.get_config_value('Proctor', 'logfile_name'))\n self._logger = ProctorLoggerFactory.getLogger()",
"def _initialize(self):\n if not os.path.exists(self._logdir):\n raise CollectorError(\"Log directory %s not exists\" % self._logdir)\n\n self.logger.info(\"Collector started, taking %s as parent directory\"\n \"for all job logs.\" % self._logdir)\n\n # clear old records\n JobRecord.objects.filter().delete()\n TrialRecord.objects.filter().delete()\n ResultRecord.objects.filter().delete()",
"def _init_logging(self):\n if sys.stdout.isatty():\n # We only use colorized logging when writing to TTYs, so we don't\n # bother initializing it then.\n colorama.init()\n\n root = logging.getLogger()\n\n if self.options.debug:\n handler = logging.StreamHandler()\n handler.setFormatter(self._create_formatter(\n 'DEBUG', '{color}>>>{reset} %(message)s'))\n handler.setLevel(logging.DEBUG)\n handler.addFilter(LogLevelFilter(logging.DEBUG))\n root.addHandler(handler)\n\n root.setLevel(logging.DEBUG)\n else:\n root.setLevel(logging.INFO)\n\n # Handler for info messages. We'll treat these like prints.\n handler = logging.StreamHandler()\n handler.setFormatter(self._create_formatter(\n 'INFO', '{color}%(message)s{reset}'))\n\n handler.setLevel(logging.INFO)\n handler.addFilter(LogLevelFilter(logging.INFO))\n root.addHandler(handler)\n\n # Handlers for warnings, errors, and criticals. They'll show the\n # level prefix and the message.\n levels = (\n ('WARNING', logging.WARNING),\n ('ERROR', logging.ERROR),\n ('CRITICAL', logging.CRITICAL),\n )\n\n for level_name, level in levels:\n handler = logging.StreamHandler()\n handler.setFormatter(self._create_formatter(\n level_name, '{color}%(levelname)s:{reset} %(message)s'))\n handler.addFilter(LogLevelFilter(level))\n handler.setLevel(level)\n root.addHandler(handler)\n\n logging.debug('RBTools %s', get_version_string())\n logging.debug('Python %s', sys.version)\n logging.debug('Running on %s', platform.platform())\n logging.debug('Home = %s', get_home_path())\n logging.debug('Current directory = %s', os.getcwd())",
"def __init__(self):\n self.logger = logging.getLogger('TwitterLogger')\n self.fh = logging.FileHandler('scraper_logs.log')\n self.set_level()\n self.set_formatter()\n self.add_handler()",
"def init(log_dir=None):\n global logger\n global file_handler\n global stderr_handler\n global orig_except_hook_handler\n logger.removeHandler(stderr_handler)\n logger.removeHandler(file_handler) \n\n # Here we get the flags using the FLAGS.FlagDict(), because the FLAGS [] op \n # imposes a requirement that flags have been parsed. We need to support\n # users who won't parse any command line args and need the default values.\n d = FLAGS.FlagDict()\n \n \n logtostderr = d['logtostderr'].value\n stderrthreshold = d['stderrthreshold'].value\n minloglevel = d['minloglevel'].value\n if log_dir is None:\n log_dir = d['log_dir'].value\n\n # If we redirect all logging to stderr, the --minloglevel flags controls \n # how much is output just like it did for the file output mode\n if logtostderr:\n stderr_handler = logging.StreamHandler(stream=sys.stderr)\n level = _glog_to_python_level(minloglevel)\n init_handler(stderr_handler, level) \n # In interactive mode, file logging and stderr logging levels can be set \n # independently by --minloglevel and --stderrthreshold, respectively \n else:\n filename = logfile_name(log_dir)\n file_handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1e6, backupCount=4, delay=True)\n level = _glog_to_python_level(minloglevel)\n init_handler(file_handler, level) \n stderr_handler = logging.StreamHandler(stream=sys.stderr)\n level = _glog_to_python_level(stderrthreshold)\n init_handler(stderr_handler, level)\n \n logger.setLevel(logging.DEBUG) # delegate filtering to each output handler\n orig_except_hook_handler = sys.excepthook\n sys.excepthook = log_uncaught_exceptions\n return",
"def init():\n global current_status\n global events\n events = [{'service_name': \"tontoapp\", 'status':'healthy', 'updated': datetime.now()}]\n logging.basicConfig(level=loglevel, format='%(asctime)s|%(levelname)s|%(funcName)s|%(message)s')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Move through the whole hit_id list and attempt to expire the HITs
|
def expire_all_unassigned_hits(self):
for hit in view.all():
if not hit.complete and hit.hit_id in self.hit_ids:
print(hit.hit_id)
mturk_utils.expire_hit(mturk_config['is_sandbox'], hit.hit_id)
|
[
"def expire_and_dispose_hits(\n client: MTurkClient, hits: List[Dict[str, Any]], quiet: bool = False\n) -> List[Dict[str, Any]]:\n non_disposed_hits = []\n for h in tqdm(hits, disable=quiet):\n try:\n client.delete_hit(HITId=h[\"HITId\"])\n except Exception as e:\n client.update_expiration_for_hit(HITId=h[\"HITId\"], ExpireAt=datetime(2015, 1, 1))\n h[\"dispose_exception\"] = e\n non_disposed_hits.append(h)\n return non_disposed_hits",
"def update_expiration_for_hit(HITId=None, ExpireAt=None):\n pass",
"def delete_hit(HITId=None):\n pass",
"def __clean_up_expired_sessions():\n\n for session_id, session in sessions.items():\n if session.IsExpired():\n del sessions[session_id]",
"def remove_expired(self):\n self._tickets = {t for t in self._tickets if not self._is_expired(t)}",
"def remove_expired(self):\n while len(self.insertedTimes) != 0 and self.is_expired(self.insertedTimes[0]):\n key = self.insertedTimes[0].key\n if self.insertedTimesDict[key] == self.insertedTimes[0].insertion_time:\n del self.data[key]\n del self.insertedTimesDict[key]\n self.insertedTimes.popleft()",
"def evict_expired_access_tokens():\n # give a 5-minute buffer\n now = now_in_utc() - timedelta(minutes=5)\n RedditAccessToken.objects.filter(token_expires_at__lt=now).delete()",
"def _clean_cache(self):\n query = _AppEngineUtilities_Cache.all()\n query.filter('timeout < ', datetime.datetime.now())\n results = query.fetch(self.max_hits_to_clean)\n db.delete(results)\n #for result in results:\n # result.delete()",
"def test_hits_too_old(self):\n h1 = HitTracker().open()\n url = '/test_hits_too_old.py'\n\n ten_minutes_ago = datetime.datetime.now() - timedelta(minutes=11)\n for i in range(0, 5): # adding 5 hits from 10 minutes ago\n h1.add_hit(url, ts=ten_minutes_ago.timestamp())\n\n self.assertTrue(h1.num_hits_last_mins(10, url) == 0)\n h1.close()",
"def invalidate(self, pages):\n for p in pages:\n self.cache.invalidate(p)",
"def expire_routes(self):\n # TODO: fill this in!\n hosts_to_delete = []\n\n for host,entry in self.table.items():\n if entry.expire_time <= api.current_time(): #delete if equal to expiry time as well.\n hosts_to_delete.append(host)\n\n for host in hosts_to_delete:\n if self.POISON_EXPIRED: # added during poison expired update (stage 9)\n self.table[host] = TableEntry(dst=self.table[host].dst, port=self.table[host].port, latency=INFINITY,\n expire_time=self.table[host].expire_time)\n else:\n del self.table[host]\n self.s_log(\"Removed route to {} has expire time {}, time is {}\".format(host, entry.expire_time, api.current_time()))",
"def expire_data(self):\n expiry_time = current_time()-self.expire_locations_seconds\n Client._expire_from(self.locations, 'end_time', expiry_time)\n Client._expire_from(self.daily_ids_used, 'last_used', expiry_time)\n Client._expire_from(self.id_alerts, 'received_at', expiry_time)",
"def invalidate_caches():",
"def clean_expired(self):\n\t\tl_time = datetime.datetime.now() - datetime.timedelta(seconds = 600)\n\t\tself.get_query_set().filter(last_update__lt=l_time).delete()",
"def purge(self):\n for key, (expiry, _) in list(self._items.items()):\n if expiry < time():\n self._log.debug('Purging expired item %s', key)\n self._items.pop(key, None)",
"def garbageCollector(self):\n tcutoff = self.latest_event - TimeSpan(self.expirationtime)\n for evID in self.event_dict.keys():\n evt = self.cache.get(seiscomp3.DataModel.Event, evID)\n if self.event_dict[evID]['timestamp'] < tcutoff:\n self.event_dict.pop(evID)",
"def reclaim_id(self,object_id):\n self.recycled_ids.add(object_id)\n return self.recycled_ids",
"def expire_routes(self):\n # TODO: fill this in!\n toDelete = []\n if self.POISON_EXPIRED is True:\n for host, entry in self.table.items():\n if entry.has_expired:\n self.table[host] = TableEntry(host, entry.port, INFINITY, api.current_time())\n else:\n for host, entry in self.table.items():\n if entry.has_expired:\n toDelete.append(host)\n self.deleteRoutes(toDelete)",
"def timer_dead(self):\n self.stop_timer_dead()\n logger.debug(\"[%s] - Remove dead entries in cache\", self.__class__.__name__)\n try:\n now = datetime.datetime.now()\n dead_time = now - datetime.timedelta(seconds=self._cache_dead_ttl)\n for key in list(self._cache.keys()):\n self._lock.acquire()\n if 'last_update' not in self._cache[key]:\n self._cache[key]['last_update'] = now\n try:\n if key in self._cache and self._cache[key]['last_update'] < dead_time:\n logger.debug(\"[%s] - Remove dead entries in cache : %s\", self.__class__.__name__, key)\n self.remove_rrd_from_list(key)\n del self._cache[key]\n except Exception:\n logger.exception(\"[%s] - Exception when removing dead entry %s in cache\", self.__class__.__name__, key)\n finally:\n self._lock.release()\n except Exception:\n logger.exception(\"[%s] - Exception when removing dead entries\", self.__class__.__name__)\n self.start_timer_dead()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
approve work for a given assignment through the mturk client.
|
def approve_work(self, assignment_id, override_rejection=False):
client = mturk_utils.get_mturk_client(mturk_config['is_sandbox'])
assignment_status = None
approve_attempt_num = 0
if assignment_status != SUBMIT_STATUS and approve_attempt_num < APPROVE_TIME_LIMIT:
try:
response = client.get_assignment(AssignmentId=assignment_id)
if response:
assignment_status = response['Assignment']['AssignmentStatus']
except Exception as error:
approve_attempt_num += 1
timer = Timer(10.0, self.approve_work, [assignment_id, override_rejection])
timer.start()
return # start new thread and return this one
try:
client.approve_assignment(
AssignmentId=assignment_id, OverrideRejection=override_rejection
)
print('Assignment {} approved.' ''.format(assignment_id))
except Exception as error:
print(error)
client = mturk_utils.get_mturk_client(mturk_config['is_sandbox'])
|
[
"def approve_work(client: MTurkClient, assignment_id: str, override_rejection: bool = False) -> None:\n try:\n client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection)\n except Exception as e:\n logger.exception(\n f\"Approving MTurk assignment failed, likely because it has auto-approved. Details: {e}\",\n exc_info=True,\n )",
"def approve_assignment(AssignmentId=None, RequesterFeedback=None, OverrideRejection=None):\n pass",
"def approve_assignments_for_hit(client: MTurkClient, hit_id: str, override_rejection: bool = False):\n assignments = get_assignments_for_hit(client, hit_id)\n for assignment in assignments:\n assignment_id = assignment[\"AssignmentId\"]\n client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection)",
"def accept_assignment(self):\n if self.status == StatusEnum.submitted.value:\n client = TolokaClient(self.sandbox)\n resp = client.accept_assignment(self.assignment)\n self.status = resp.status\n self.save()\n return dict(error=False, **resp) # send toloka accept request here\n else:\n return dict(error=True)",
"def approve(self, approver, signatory_name, signatory_email=None, ):\n self.convert_to_voucher(signatory_name, signatory_email,)\n tmpl = get_template('vouchers/emails/request_approval_admin.txt')\n ctx = Context({\n 'approver': approver,\n 'request': self,\n 'approval_type': 'voucher',\n })\n body = tmpl.render(ctx)\n mail_admins(\n 'Request approval: %s approved $%s [voucher]' % (\n approver,\n self.amount,\n ),\n body,\n )",
"def reject_work(client: MTurkClient, assignment_id: str, reason: str) -> None:\n try:\n client.reject_assignment(AssignmentId=assignment_id, RequesterFeedback=reason)\n except Exception as e:\n logger.exception(\n f\"Rejecting MTurk assignment failed, likely because it has auto-approved. Details:{e}\",\n exc_info=True,\n )",
"def approveRequest(self, requestName):\n if self.verbose:\n print \" approveRequest(): Attempting to approve '%s'.\" % requestName\n \n # Check to see if the request exists and is in the correct state\n requestInfo = self.requestInfo(requestName)\n if requestInfo == None:\n return False\n elif requestInfo[\"RequestStatus\"] != \"new\":\n if self.verbose:\n print \" approveRequest(): Request in wrong state '%s'.\" % requestInfo[\"RequestStatus\"]\n return False\n\n reqParams = {\"requestName\": requestName, \"status\": \"assignment-approved\"}\n reqHeaders = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"}\n try:\n self.requestor = self.createConnection(self.reqMgrURL)\n self.requestor.request(\"PUT\", \"/reqmgr/reqMgr/request\",\n urllib.urlencode(reqParams), reqHeaders)\n except socket.error, ex:\n if self.verbose:\n print \" Error connecting to ReqMgr: %s\" % ex.strerror \n sys.exit(-1)\n \n reqMgrResponse = self.requestor.getresponse()\n if reqMgrResponse.status != 200:\n print \" approveRequest(): Approval failed, status: %d.\" % reqMgrResponse.status \n return False\n \n reqMgrResponseString = reqMgrResponse.read()\n return True",
"def setWorkflowApproved(url, workflowname):\n params = {\"requestName\" : workflowname,\"status\" : \"assignment-approved\"}\n data = requestManagerPut(url,\"/reqmgr/reqMgr/request\", params)\n return data",
"def approve_tool(self):\n\n self.approve.click()",
"def test_team_request_approve(self):\r\n self.test_team_request()\r\n url = reverse('team_request_approve', args=[self.project.slug, self.language_ar.code])\r\n resp = self.client['maintainer'].post(url, {'team_request_approve':'Approve'}, follow=True)\r\n self.assertContains(resp, 'You approved the', status_code=200)",
"def approve_selected_request(self):\n self.click_on_element_by_css(adpl.APPROVE_REQUEST_BUTTON)\n self.find_element_by_css(adpl.SUCCESSFUL_ALERT)",
"def prompt_for_approval(daprClient: DaprClient):\n daprClient.raise_workflow_event(instance_id=_id, workflow_component=workflow_component, \n event_name=\"manager_approval\", event_data={'approval': True})",
"def admin_approve(reqid):\n if g.role == \"Admin\":\n isexist = requestObj.fetch_by_id(reqid)\n\n if not isexist:\n return jsonify(response=\"Request doesnt exists\"), 404\n else:\n if isexist['isresolved'] is True:\n return jsonify({\"request\": isexist, \"response\": \"Request is already resolved\"}), 409\n elif isexist['status'] != \"Pending\":\n return jsonify({\"request\": isexist, \"response\": \"Request is already approved\"}), 409\n else:\n try:\n resp = requestObj.approve(reqid)\n return jsonify({\"message\": \"Approved Successfully\", \"Request\": resp}), 200\n except Exception as error:\n # an error occured when trying to update request\n response = {'message': str(error)}\n return jsonify(response), 401\n else:\n return jsonify(response=\"Sorry you don't have enough \\\n rights to view this resource\"), 401",
"def approve(self, task_id):\n data = {'approved': True}\n url = 'tasks/%s' % task_id\n return self._post(url, data)",
"def application_approve(self, application_approve):\n\n self._application_approve = application_approve",
"def approve_selected(modeladmin, request, queryset):\n\n qs_active = Q(status_detail='active')\n qs_expired = Q(status_detail='expired')\n qs_archived = Q(status_detail='archive')\n\n # exclude already approved memberships\n memberships = queryset.exclude(\n status=True,\n application_approved_dt__isnull=False,\n ).exclude(qs_active | qs_expired | qs_archived)\n\n for membership in memberships:\n is_renewal = membership.is_renewal()\n membership.approve(request_user=request.user)\n membership.send_email(request, ('approve_renewal' if is_renewal else 'approve'))\n if membership.corporate_membership_id:\n # notify corp reps\n membership.email_corp_reps(request)",
"def approve_quali(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, { 'state' : 'approved' }, context=context)",
"def approve(self, request, pk):\n if not has_perm(request.user, 'memberships.approve_membershipdefault'):\n raise Http403\n\n m = get_object_or_404(MembershipDefault, pk=pk)\n is_renewal = m.is_renewal()\n m.approve(request_user=request.user)\n m.send_email(request, ('approve_renewal' if is_renewal else 'approve'))\n if m.corporate_membership_id:\n # notify corp reps\n m.email_corp_reps(request)\n\n messages.add_message(\n request,\n messages.SUCCESS,\n _('Successfully Approved')\n )\n\n return redirect(reverse(\n 'admin:memberships_membershipdefault_change',\n args=[pk],\n ))",
"def approval_handler(sender, **kwargs):\n \n entry = kwargs['entry']\n user = kwargs['user']\n\n try:\n from content_management.models import ManualEntry\n if entry.status in [-1, 2] and not (entry.send_to_industryfeed_buyers.exists() or entry.industry_rejected_news):\n mentry = ManualEntry.objects.get(id = entry.id)\n mentry.appproved_on = datetime.datetime.today()\n mentry.approved_by = user\n mentry.save()\n\n except ManualEntry.DoesNotExist:\n # not an instance of ManualEntry, ignore and move on\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Give a worker a particular qualification.
|
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
qual_id = mturk_utils.find_or_create_qualification(qual_name,
'Worker has done this task',
mturk_config['is_sandbox'])
if qual_id is False or qual_id is None:
print(
'Could not give worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name)
)
return
mturk_utils.give_worker_qualification(
worker_id, qual_id, qual_value, mturk_config['is_sandbox']
)
print('gave {} qualification {}'.format(worker_id, qual_name))
|
[
"def give_worker_qualification(\n client: MTurkClient,\n worker_id: str,\n qualification_id: str,\n value: Optional[int] = None,\n) -> None:\n if value is not None:\n client.associate_qualification_with_worker(\n QualificationTypeId=qualification_id,\n WorkerId=worker_id,\n IntegerValue=value,\n SendNotification=False,\n )\n else:\n client.associate_qualification_with_worker(\n QualificationTypeId=qualification_id,\n WorkerId=worker_id,\n IntegerValue=1,\n SendNotification=False,\n )",
"def associate_qualification_with_worker(QualificationTypeId=None, WorkerId=None, IntegerValue=None, SendNotification=None):\n pass",
"def create_requirement(conn, workers, qualid, performance_thresh):\n # qualification type must already exist, for now.\n # qual_id = '28EKH1Q6SVQD54NMWRXLEOBVCK22L4' \n for worker in workers:\n try:\n conn.assign_qualification(qualid, worker, value=100, send_notification=False)\n except Exception, e:\n print 'Worker qualification already exists.'\n\n\n req = Requirement(qualification_type_id=qualid, comparator='GreaterThan',\n integer_value=performance_thresh)\n qual = Qualifications()\n qual.add(req)\n return qual",
"def disassociate_qualification_from_worker(WorkerId=None, QualificationTypeId=None, Reason=None):\n pass",
"def list_workers_with_qualification_type(QualificationTypeId=None, Status=None, NextToken=None, MaxResults=None):\n pass",
"def get_qualification_score(QualificationTypeId=None, WorkerId=None):\n pass",
"def setWorkerClass(self, iCollectorWorker):",
"def assign_worker(self, worker):\n self.worker = worker\n self.worker.busy = True",
"def assign_work(workorder):\n\n # get cert techs\n elig_workers = get_eligible_workers(workorder['equipment_type'])\n\n # get facility location and calculate travel time and note if full\n wo_fac = data_tools.get_facility_detail(workorder['facility'])\n gmaps_tools.get_drive_time(wo_fac['latit'], wo_fac['longit'])\n\n # need to figure out how to store current time left of job -- feature in workers?\n\n # check priority\n #",
"def worker_id(self, w):\n self._worker_id = w",
"def queue_worker(task):\n taskqueue.add(url='/workers/bake-assignee-description',\n params={ 'task': task.identifier(),\n 'domain': task.domain_identifier()})",
"def start_ep_priority_worker(**kwargs):\n for attr in [\"txQ\", \"rxQ\", \"prQ\", \"bcastQ\", \"bcastPrQ\", \"fabric\", \"wid\"]:\n if attr not in kwargs:\n logger.error(\"missing required attribute: %s\" % attr)\n return\n ep_p_worker = EPPriorityWorker(\n kwargs.get(\"txQ\"), kwargs.get(\"rxQ\"), kwargs.get(\"prQ\"), \n kwargs.get(\"bcastQ\"), kwargs.get(\"bcastPrQ\"), kwargs.get(\"fabric\"), \n kwargs.get(\"wid\")\n )\n ep_p_worker.start()",
"def allocate_worker(self):\n raise NotImplementedError",
"def workRequirement(world, action):",
"def worker(self, worker):\n if worker is None:\n raise ValueError(\"Invalid value for `worker`, must not be `None`\") # noqa: E501\n\n self._worker = worker",
"def isWorker(self, unitId: UnitTypeId):\n return race_worker[self.race] == unitId",
"def product_workers(self, product_workers):\n\n self._product_workers = product_workers",
"def add_worker(self, worker, count):\n\n # We record the multiple counts as different workers\n for idx in xrange(count):\n self._workers.put(worker)\n self._cpus[worker] = count\n self._active[worker] = 0",
"def worker(self, worker_id):\n return self._wrap_get('/workers/worker-{}'.format(worker_id))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SurveyQuestionGroupScore a model defined in Swagger
|
def __init__(self):
self.swagger_types = {
'question_group_id': 'str',
'total_score': 'float',
'max_total_score': 'float',
'marked_na': 'bool',
'question_scores': 'list[SurveyQuestionScore]'
}
self.attribute_map = {
'question_group_id': 'questionGroupId',
'total_score': 'totalScore',
'max_total_score': 'maxTotalScore',
'marked_na': 'markedNA',
'question_scores': 'questionScores'
}
self._question_group_id = None
self._total_score = None
self._max_total_score = None
self._marked_na = None
self._question_scores = None
|
[
"def _get_scores(self, obj):\n if not hasattr(obj, '_scores'):\n obj._scores = QuestionScore.objects.filter(question=obj)\n\n return obj._scores",
"def score_answer(self, answer, answer_spec):\n raise NotImplementedError",
"def question_scores(self):\n return self._question_scores",
"def build(self):\n if not self._package.resources:\n raise ValidationError(\"At least one data resource is required.\")\n\n resource = self._package.resources[0]\n if 'schema' not in resource.descriptor:\n raise ValidationError(\"The 'schema' object is missing in resource\")\n if 'questions' not in resource.descriptor['schema']:\n raise ValidationError(\n \"The 'questions' object is missing from schema\")\n\n questions = resource.descriptor['schema']['questions']\n if isinstance(questions, dict):\n question_keys = list(questions.keys())\n question_keys.sort()\n for name in question_keys:\n xform_from_floip_dict(self._survey, name, questions[name])\n elif isinstance(questions, list):\n for question in questions:\n for name in question:\n xform_from_floip_dict(self._survey, name, question[name])\n else:\n raise ValidationError(\n \"Expecting 'questions' to be an object or array\")\n\n meta_dict = {\n \"name\": \"meta\",\n \"type\": \"group\",\n \"control\": {\n \"bodyless\": True\n },\n \"children\": [{\n \"name\": \"instanceID\",\n \"type\": \"calculate\",\n \"bind\": {\n \"calculate\": \"concat('uuid:', uuid())\"\n }\n }, {\n \"name\": \"contactID\",\n \"type\": \"string\",\n }, {\n \"name\": \"sessionID\",\n \"type\": \"string\",\n }]\n } # yapf: disable\n self._survey.add_child(create_survey_element_from_dict(meta_dict))\n self._survey.validate()\n\n # check that we can recreate the survey object from the survey JSON\n create_survey_element_from_dict(self._survey.to_json_dict())",
"def unpack_score(score, **kwargs):\n model_name = kwargs.get('model_name')\n if 'attributeScores' in score:\n model_score = round(\n score['attributeScores'][model_name]['summaryScore']['value'] * 100)\n else:\n model_score = 0\n return model_score",
"def setup_score(embedding_model_name):\n global dic_score\n if not embedding_model_name in dic_score:\n dic_score[embedding_model_name] = {}\n\n dic_score[embedding_model_name][\"davies_bouldin\"] = []\n dic_score[embedding_model_name][\"calinski_harabasz\"] = []\n dic_score[embedding_model_name][\"silhouette_score\"] = []",
"def test_box_score_by_scoreid_v(self):\n pass",
"def mi_score(res):\r\n res = res.sort_values('ID')\r\n score = skm.adjusted_mutual_info_score(meta['Group'], res['Group'])\r\n a = {'score': score, 'nc': len(res.groupby('Group'))}\r\n return a",
"def score_individual(request, student_id, round):\n\n # Iterate questions and get answers\n student = home.models.Student.objects.filter(id=student_id).first()\n answers = []\n question_answer = []\n for question in round.questions.order_by(\"number\").all():\n answer = models.Answer.objects.filter(student=student, question=question).first()\n if not answer:\n answer = models.Answer(student=student, question=question)\n answer.save()\n answers.append(answer)\n question_answer.append((question, answer))\n\n # Update the answers\n if request.method == \"POST\":\n update_answers(request, answers)\n return redirect(\"student_view\")\n\n # Render the grading view\n return render(request, \"grading/grader.html\", {\n \"name\": student.name,\n \"division\": student.team.get_division_display,\n \"round\": round,\n \"question_answer\": question_answer,\n \"mode\": \"student\"})",
"def test_speech_score_options(self):\n response = self.client.open(\n '/NLP/speech/score',\n method='OPTIONS')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def question_scores(self, question_scores):\n \n self._question_scores = question_scores",
"def get_score(self, student_answers):\n student_answer = student_answers[self.answer_id]\n student_option = self._get_submitted_option_id(student_answer)\n\n scoring = self.scoring_map[self.answer_id]\n is_valid = student_option is not None and student_option in list(scoring.keys(\n ))\n\n (correctness, points) = ('incorrect', None)\n if is_valid:\n correctness = scoring[student_option]['correctness']\n points = scoring[student_option]['points']\n\n return CorrectMap(self.answer_id, correctness=correctness, npoints=points)",
"def test_get_score(self):\n student_answers = {'1_2_1': 'abcd'}\n correct_map = CorrectMap(answer_id='1_2_1', correctness=\"correct\", npoints=0.9)\n module = CapaFactory.create(correct=True, override_get_score=False)\n module.lcp.correct_map = correct_map\n module.lcp.student_answers = student_answers\n assert module.get_score().raw_earned == 0.0\n module.set_score(module.score_from_lcp(module.lcp))\n assert module.get_score().raw_earned == 0.9\n\n other_correct_map = CorrectMap(answer_id='1_2_1', correctness=\"incorrect\", npoints=0.1)\n other_module = CapaFactory.create(correct=False, override_get_score=False)\n other_module.lcp.correct_map = other_correct_map\n other_module.lcp.student_answers = student_answers\n assert other_module.get_score().raw_earned == 0.0\n other_module.set_score(other_module.score_from_lcp(other_module.lcp))\n assert other_module.get_score().raw_earned == 0.1",
"def get_survey_results(participant):\n\tdef survey_event_parse(survey):\n\t\t\"\"\"\n\t\tHelper function that parses survey event \n\n\t\tParameters:\n\t\tsurvey (list): contains all questions in particicular survey \n\n\t\tReturns:\n\t\t\tsurvey_result (dict): maps relevant question categories to a score\n\t\t\"\"\"\n\t\tsurvey_result = {}\n\t\tfor event in survey:\n\t\t\tquestion = event['item']\n\t\t\t\n\t\t\t#Check if question in one of the categories\n\t\t\tif question in SurveyQuestionDict:\n\t\t\t\tcategory = SurveyQuestionDict[question]\n\t\t\t\t\n\t\t\t\t#If reverse coded social question, then flip the score\n\t\t\t\tif category == \"Social_Reverse\":\n\t\t\t\t\tcategory = 'Social'\n\t\t\t\t\tscore = 3 - event['value']\n\t\t\t\telif category == 'Medication':\n\t\t\t\t\tscore = 3 - event['value']\n\t\t\t\telse:\n\t\t\t\t\tscore = event['value']\n\t\t\t\t\t\t\t\t \n\t\t\t\tif category in survey_result: survey_result[category].append(score) \n\t\t\t\telse: survey_result[category] = [score]\n\t\t\t\t\t\n\t\t#Take mean for each cat\n\t\tfor cat in survey_result:\n\t\t\tsurvey_result[cat] = float(sum(survey_result[cat])/len(survey_result[cat]))*(10.0/3.0)\n\t\treturn survey_result \n\n\tparticipant_surveys = {} #initialize dict mapping survey_type to occurence of scores\n\tparticipant_results = lamp.result_event.result_event_all_by_participant(participant).data\n\tfor res in participant_results:\n\t\t#Check if its a survey event\n\t\tif 'survey_name' in res['static_data'].keys():\n\t\t\ttry:\n\t\t\t\tsurvey_result = survey_event_parse(res['temporal_events'])\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tsurvey_time = res['timestamp']\n\t\t\t#Add to master dictionary\n\t\t\tfor category in survey_result:\n\t\t\t\tif category not in participant_surveys:\n\t\t\t\t\tparticipant_surveys[category] = [(survey_result[category], survey_time)]\n\t\t\t\telse:\n\t\t\t\t\tparticipant_surveys[category].append((survey_result[category], survey_time))\n\t\t\t\t\t\t \n\treturn participant_surveys",
"def get_score(self):\n jira = JiraServer()\n jira_response = jira.make_api_call(self.query)\n return self.process_jira_response(jira_response)",
"def test_valid_post_201(self):\n data = {\"score\": 0, \"reason\": \"√\" * 512}\n request = self.factory.post(\"/\", data)\n request.user = self.user\n resp = post_score(request)\n self.assertEqual(resp.status_code, 201)\n respj = json.loads(resp.content)\n self.assertEqual(respj[\"success\"], True)\n self.assertEqual(respj[\"score\"][\"user\"], self.user.id)\n self.assertEqual(respj[\"score\"][\"score\"], data[\"score\"])\n self.assertEqual(respj[\"score\"][\"group\"], score_group(data[\"score\"]))",
"def normalize_scores(request, quiz_id):\n quiz, quiz_data, category_score_dict, norm_score_dict, questions_list = get_session_data(request, quiz_id)\n categories = quiz.get_quiz_categories()\n\n # Creating a list of the max score for each section, needed to normalize to a score range of 0 - 10\n old_max_list = []\n for i in range(len(category_score_dict)):\n category = categories[i]\n old_max_list.append(len(quiz_data[str(category.category_name)]))\n\n # Normalizing the old scores and populating the session variable session_norm_data\n norm_score_list = [\n (10 / (old_max_list[i] - 0)) * (category_score_dict[str(categories[i].category_name)] - old_max_list[i]) + 10\n for i in range(len(category_score_dict))\n ]\n\n for i in range(len(category_score_dict)):\n category = categories[i]\n if norm_score_list[i] is not None:\n norm_score_dict[str(category.category_name)] = norm_score_list[i]\n request.session[quiz.session_norm_data()] = norm_score_dict\n return request.session[quiz.session_norm_data()]",
"def questions_count(self, request: Request) -> Response:\n return super().list(request)",
"def score(self):\n return score_by_team()[self]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the question_group_id of this SurveyQuestionGroupScore.
|
def question_group_id(self):
return self._question_group_id
|
[
"def get_group_idx(self) -> int:\n return self.group_idx",
"def question_group_id(self, question_group_id):\n \n self._question_group_id = question_group_id",
"def task_group_id(self):\n return self._task_group_id",
"def google_group_id(self) -> str:\n return pulumi.get(self, \"google_group_id\")",
"def get_process_group_id(self, name: str): \r\n process_group = nipyapi.canvas.get_process_group(name)\r\n return process_group.id",
"def resource_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_group_id\")",
"def workgroup_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workgroup_id\")",
"def placement_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"placement_group_id\")",
"def workgroup_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workgroup_id\")",
"def getQuestionId(self):\n return self._qid",
"def access_group_id(self) -> str:\n return pulumi.get(self, \"access_group_id\")",
"def getgroupid(self, groupname):\n\n request_string = f\"{self.base_url}/groups?$filter=displayName eq '{groupname}'\"\n header = {\n \"Content-type\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.access_token2\n }\n response = requests.get(request_string, headers=header)\n data = response.json()\n return data['value'][0]['id']",
"def master_group_id(self):\n return self.fetch_master_group_id()",
"def get_task_group(self):\n return self._task_group",
"def get_pipeline_group(self, group_name):\n return self.find('pipelines[@group=\"%s\"]' % group_name)",
"def GroupIncrement(self):\n return self._get_attribute('groupIncrement')",
"def operation_group_id(self) -> str:\n return pulumi.get(self, \"operation_group_id\")",
"def find_group(self, group_name):\n ret_val = self._find_group(group_name.encode())\n return ret_val",
"def group(self) -> click.Group:\n if self._group is None:\n self._load_group()\n return self._group"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the question_group_id of this SurveyQuestionGroupScore.
|
def question_group_id(self, question_group_id):
self._question_group_id = question_group_id
|
[
"def campaign_group_id(self, campaign_group_id):\n\n self._campaign_group_id = campaign_group_id",
"def role_group_id(self, role_group_id):\n\n self._role_group_id = role_group_id",
"def question_group_id(self):\n return self._question_group_id",
"def set_group(self, group: t.Optional[jank.graphics.Group]):",
"def set_group(self, group):\n return _pal.lib.body_base_set_group(self._body_base, c.c_int(group))",
"def set_task_group(self, task_group):\n self._task_group = task_group",
"def set_group(self, group):\n try:\n supports_group = self.supports_group(group)\n if not supports_group:\n self.get_logger().error(f\"{self.name} does not support {group}!\")\n else:\n self._group = group\n except NotImplementedError:\n self.get_logger().warning(f\"{self.name} does not support restricting on groups!\")",
"def work_hours_group_id(self, work_hours_group_id):\n\n self._work_hours_group_id = work_hours_group_id",
"def question_id(self, question_id: int):\n\n self._question_id = question_id",
"def front_end_group_id(self, front_end_group_id):\n self._front_end_group_id = front_end_group_id",
"def project_group_ids(self, project_group_ids):\n\n self._project_group_ids = project_group_ids",
"def group_id(self, group_id, persister=None):\n persister.exec_stmt(MySQLServer.UPDATE_SERVER_GROUP_ID,\n {\"params\":(group_id, str(self.uuid))})\n self.__group_id = group_id",
"def group_id(self, group_id, persister=None):\n persister.exec_stmt(Shards.UPDATE_SHARD,\n {\"params\":(group_id, self.__shard_id)})\n self.__group_id = group_id",
"def group_label(self, group_label):\n\n self._group_label = group_label",
"def azure_group_id(self, azure_group_id):\n\n self._azure_group_id = azure_group_id",
"def _set_group_id(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"group_id must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__group_id = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_group_id(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"group_id must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/telemetry', defining_module='openconfig-telemetry', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__group_id = t\n if hasattr(self, '_set'):\n self._set()",
"def ap_group(self, ap_group):\n\n self._ap_group = ap_group",
"def put(self, learner_group_id: str) -> None:\n\n assert self.user_id is not None\n assert self.normalized_payload is not None\n title = self.normalized_payload['group_title']\n description = self.normalized_payload['group_description']\n learner_usernames = self.normalized_payload['learner_usernames']\n invited_learner_usernames = self.normalized_payload[\n 'invited_learner_usernames']\n subtopic_page_ids = self.normalized_payload['subtopic_page_ids']\n story_ids = self.normalized_payload['story_ids']\n\n # Check if user is the facilitator of the learner group, as only\n # facilitators have the right to update a learner group.\n is_valid_request = learner_group_services.is_user_facilitator(\n self.user_id, learner_group_id\n )\n if not is_valid_request:\n raise self.UnauthorizedUserException(\n 'You are not a facilitator of this learner group.')\n\n learner_ids = user_services.get_multi_user_ids_from_usernames(\n learner_usernames, strict=True\n )\n invited_learner_ids = user_services.get_multi_user_ids_from_usernames(\n invited_learner_usernames, strict=True\n )\n\n learner_group = learner_group_services.update_learner_group(\n learner_group_id, title, description, [self.user_id],\n learner_ids, invited_learner_ids, subtopic_page_ids, story_ids\n )\n\n self.render_json({\n 'id': learner_group.group_id,\n 'title': learner_group.title,\n 'description': learner_group.description,\n 'facilitator_usernames': user_services.get_usernames(\n learner_group.facilitator_user_ids),\n 'learner_usernames': user_services.get_usernames(\n learner_group.learner_user_ids),\n 'invited_learner_usernames': user_services.get_usernames(\n learner_group.invited_learner_user_ids),\n 'subtopic_page_ids': learner_group.subtopic_page_ids,\n 'story_ids': learner_group.story_ids\n })"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the total_score of this SurveyQuestionGroupScore. Score of all questions in the group
|
def total_score(self):
return self._total_score
|
[
"def score(self):\n return score_by_team()[self]",
"def _cv_total_score(self):\n scores = self.scores\n numbers = self.number_predicted\n total = sum(numbers)\n number_correct = sum([s*n for s,n in zip(scores,numbers)])\n total_score = number_correct / total\n return total_score",
"def __get_score(self, groups):\n cost = 0\n scores = [PairwiseScore.objects.filter(Q(holder__email__in=map(lambda x: x.email, group))).filter(Q(partner__email__in=map(lambda x: x.email, group))).aggregate(score=Sum('score'))['score'] for group in groups]\n if scores[0]:\n return sum(scores)\n return 0.0",
"def total_score(self, total_score):\n \n self._total_score = total_score",
"def max_total_score(self):\n return self._max_total_score",
"def question_scores(self):\n return self._question_scores",
"def calc_score(self):\n if self.exc:\n return 0\n for set_result in self.arg_sets_res:\n if not set_result.is_correct:\n return 0\n return self.score",
"def calc_score_simple(self):\n return sum([self._calc_hand_score_simple(hand)\n for hand in self.__hands])",
"def qm_score(self):\n return self._qm_score",
"def score_aggregation(self, word_scores):\n score = np.sum(word_scores)\n score *= len(word_scores)**(-self.normalisation_index)\n return score",
"def get_max_score(self):\n return sum(self.maxpoints.values())",
"def mean_score(self):\n pass",
"def similarity_score(self) -> float:\n return self.__score",
"def calculate_score(self, score_data):\n asl = score_data['num_words'] / score_data['num_sentences']\n asw = score_data['num_syllables'] / score_data['num_words']\n return self.SCORE_CONSTANT - (1.015 * asl) - (84.6 * asw)",
"def getRawScore(self):\n\n return self.score",
"def total_score(self):\n\n total = 0\n\n for i in range(len(self.x_align)):\n\n a = self.x_align[i]\n b = self.y_align[i]\n\n if a != \" \" and b != \" \":\n\n total += self.score_table[a][b]\n\n if total > self.min_score:\n\n self.found_alignment = False\n\n if self.matches.count(\"|\") < self.overlap:\n\n self.found_alignment = False",
"def getRelativeScore(self):\n minimum = self.getMinimumScore()\n current = self.getScore()\n # check how many times current score fits into min. score\n return current[1] / minimum",
"def _get_score(self):\n return self.data[self.score].to_numpy()",
"def get_grade_sum(response_score):\n grade = 0\n grade_sum = 0\n if 'score_data' in response_score:\n for score in response_score['score_data']:\n if score != 0:\n grade_sum += grade * score\n grade += 1\n return grade_sum",
"def _get_average_best_score(self):\n return mean([x['best_score'] for x in self._results])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the total_score of this SurveyQuestionGroupScore. Score of all questions in the group
|
def total_score(self, total_score):
self._total_score = total_score
|
[
"def max_total_score(self, max_total_score):\n \n self._max_total_score = max_total_score",
"def _cv_total_score(self):\n scores = self.scores\n numbers = self.number_predicted\n total = sum(numbers)\n number_correct = sum([s*n for s,n in zip(scores,numbers)])\n total_score = number_correct / total\n return total_score",
"def qm_score(self, qm_score):\n self._qm_score = qm_score",
"def reset_score(self):\n\n self.score = 0",
"def question_scores(self, question_scores):\n \n self._question_scores = question_scores",
"def update_score(self, to_add):\n self.score += to_add",
"def update_score(self, score: float):\n if self.score == score:\n return\n self.score = score\n for edge in self._in_edges:\n edge.top.taint()",
"def reset_score(self):\n self.score = 0\n self._set_score()",
"def __setScore(self, score):\n\t\tself.score = score\n\t\treturn self.score",
"def max_total_score(self):\n return self._max_total_score",
"def score_division(self, score_division: List[float]):\n\n self._score_division = score_division",
"def set_score(self,new_score):\n self.__fitness = new_score",
"def setNodeScore(self, score):\n self.score = score",
"def set_input_score(self, score):\n pass",
"def score(self):\n return score_by_team()[self]",
"def add_score(self, player_score):\n self.score += player_score",
"def slots_total(self, slots_total):\n\n self._slots_total = slots_total",
"def add_score(self, score):\r\n self.add_scores([score])",
"def change_score(self, new_score):\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the max_total_score of this SurveyQuestionGroupScore. Maximum possible score of all questions in the group
|
def max_total_score(self):
return self._max_total_score
|
[
"def get_max_score(self):\n return sum(self.maxpoints.values())",
"def max_total_score(self, max_total_score):\n \n self._max_total_score = max_total_score",
"def max_raw_score(self):\n if self._max_raw_score is None:\n self._max_raw_score = self.matrix.max(axis=0).sum()\n return self._max_raw_score",
"def max_team_score(self):\r\n return self.data.maxTeamObjective",
"def max_objective_score(self):\r\n return self.data.maxObjectivePlayerScore",
"def max_combat_score(self):\r\n return self.data.maxCombatPlayerScore",
"def get_max_ppi_score():\n list_scores_PPI_fk_couple = []\n sqlObj = _PPIpreview_sql_new()\n results = sqlObj.select_all_score_PPI()\n for element in results:\n list_scores_PPI_fk_couple.append(element[2])\n max_value = max(list_scores_PPI_fk_couple)\n return max_value",
"def getBestScore(self):\n return self.bestScore",
"def get_highest_score(self):\n highest_scored_topic = models.Topic.objects.order_by('-score').first()\n if not highest_scored_topic:\n return 0 + self.HIGHEST_SCORE_ADDITION\n else:\n return highest_scored_topic.score + self.HIGHEST_SCORE_ADDITION",
"def personal_best(self) -> int:\n return max(self._scores)",
"def get_max_score(self):\n\n if self.anchor_scores:\n max_val = max(w[1] for v in self.anchor_scores.values() for t in v for w in t)\n return {k: [w for t in v for w in t\n if w[1] == max_val] for k, v in self.anchor_scores.items()\n if any(max_val in w for t in v for w in t)}\n return None",
"def RandomMaxMcGroupCount(self):\n return self._RandomMaxMcGroupCount",
"def highest_score(self):\r\n if len(self.__students) < 5:\r\n raise ValueError(\"at least 5 students are needed\")\r\n self.__students.sort()\r\n return self.__students[:5]",
"def _get_best_score(self):\n a = numpy.array([x['best_scores'][-1] for x in self.results])\n return a",
"def max_total_rps(self):\n\n max_tested = self.max_tested_total_rps\n return max([max_tested, self.highest_recorded_rps, self.max_tested_rps])",
"def _calc_max(self):\n return np.max(self.get_points()) + 1",
"def GetBestScore(self):\n if not self.queue:\n return np.inf\n best_score_and_item = self.GetBest(self.queue)\n return self.denormalize(best_score_and_item[0])",
"def max_tested_total_rps(self):\n\n max_tested = 0\n if len(self.tests) > 0:\n max_tested = max([t.max_rps for t in self.tests])\n return max([max_tested, self.max_tested_rps])",
"def score(self):\n return score_by_team()[self]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the max_total_score of this SurveyQuestionGroupScore. Maximum possible score of all questions in the group
|
def max_total_score(self, max_total_score):
self._max_total_score = max_total_score
|
[
"def max_total_score(self):\n return self._max_total_score",
"def total_score(self, total_score):\n \n self._total_score = total_score",
"def max_total_recipients(self, max_total_recipients):\n\n self._max_total_recipients = max_total_recipients",
"def get_max_score(self):\n return sum(self.maxpoints.values())",
"def max_team_score(self):\r\n return self.data.maxTeamObjective",
"def max_raw_score(self):\n if self._max_raw_score is None:\n self._max_raw_score = self.matrix.max(axis=0).sum()\n return self._max_raw_score",
"def set_highest_risk_score(self, highest_risk_score, operator):\n if not highest_risk_score:\n raise ApiError(\"Invalid highest risk score\")\n self._update_criteria(\"highest_risk_score\", highest_risk_score, operator)\n return self",
"def max_percentage(self, max_percentage):\n\n self._max_percentage = max_percentage",
"def max_objective_score(self):\r\n return self.data.maxObjectivePlayerScore",
"def fulltext_max_rate(self, fulltext_max_rate):\n\n self._fulltext_max_rate = fulltext_max_rate",
"def maximum_results(self, maximum):\n self.result_set_max = int(math.ceil(maximum)) if (maximum > 0) else 25\n if self.result_set_max > self.result_set_max_cap:\n self.result_set_max = self.result_set_max_cap",
"def percentage_max_power(self, percentage_max_power: float):\n\n self._percentage_max_power = percentage_max_power",
"def set_max(self, max_subs=0):\r\n self.__max_subs = max_subs",
"def set_amax(self, value):\n assert 0 <= value <= 1, 'Invalid scale factor value'\n self._amax = value",
"def max_combat_score(self):\r\n return self.data.maxCombatPlayerScore",
"def MiniGameRhythmExcelAddMaxScore(builder, MaxScore):\n return AddMaxScore(builder, MaxScore)",
"def setMaximumThreshold(self, max_threshold):\n self.max_threshold = max_threshold",
"def maxq(self, maxq):\n\n\n self._maxq = maxq",
"def personal_best(self) -> int:\n return max(self._scores)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the marked_na of this SurveyQuestionGroupScore.
|
def marked_na(self):
return self._marked_na
|
[
"def marked_na(self, marked_na):\n \n self._marked_na = marked_na",
"def notna(self) -> npt.NDArray[np.bool_]:\n return ~self.isna()",
"def nan(self):\r\n\t\treturn float(\"nan\")",
"def nan(self, x):\n return math.isnan(x)",
"def get_labeled_mask(self):\n return ~np.fromiter((e is None for e in self._y), dtype=bool)",
"def nan_min(self) -> int | float | date | datetime | timedelta | str:\n return self.to_frame().select(pli.col(self.name).nan_min())[0, 0]",
"def getMarkedIcon(self):\r\n\r\n return self.__marked",
"def isna(self) -> npt.NDArray[np.bool_]:\n return self._codes == -1",
"def get_blanking_level(self):\n return self.configuration.get_float('blank', default=np.nan)",
"def missing_marker(data):\n dtype = data.dtype\n if issubclass(dtype.type, np.inexact):\n return np.nan\n else:\n if np.issubdtype(dtype, str):\n return ''\n elif dtype == np.bool_:\n return NotImplemented \n elif dtype == object:\n return None\n else: \n return NotImplemented",
"def label(self):\n if self.score > 0.5:\n return 1\n else:\n return 0",
"def remove_nans(dataset):\n return dataset.fillna(0.0)",
"def get_single_nan_pattern_nos(self):\n return self.get_multi_nan_pattern_nos(multi=False)",
"def value(self):\n for item in self.group:\n log.debug(item)\n if item.get_state() and item.label == \"Yes\":\n return True\n if item.get_state() and item.label == \"No\":\n return False\n if item.get_state():\n return item.label\n return None",
"def pos_marker(self) -> Optional[PositionMarker]:\n for seg in self.segments:\n if seg.pos_marker:\n return seg.pos_marker\n return None",
"def make_mask_from_NaNs(array, ghost_array=dsa.NoneArray, is_cell=False):\n from ..vtkCommonDataModel import vtkDataSetAttributes\n if is_cell:\n mask_value = vtkDataSetAttributes.HIDDENCELL\n else:\n mask_value = vtkDataSetAttributes.HIDDENPOINT\n\n return bitwise_or(isnan(array).astype(numpy.uint8) * mask_value,\n ghost_array)",
"def getNumNaNs( self ):\n return self.numNaNs",
"def nan_max(self) -> int | float | date | datetime | timedelta | str:\n return self.to_frame().select(pli.col(self.name).nan_max())[0, 0]",
"def test_predict_is_nans():\n\tmodel = pf.GASLLT(data=data, family=pf.GASNormal())\n\tx = model.fit()\n\tx.summary()\n\tassert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the marked_na of this SurveyQuestionGroupScore.
|
def marked_na(self, marked_na):
self._marked_na = marked_na
|
[
"def marked_na(self):\n return self._marked_na",
"def mark_empty_groups(self, mark):\n self._mark_empty_groups(mark)",
"def _fill_nans(dataset, val):\n for k in dataset.keys():\n dataset.values[np.isnan(dataset.values)] = val",
"def _fill_null(self, df):\n invalid_jobs = df.index[df.isnull().sum(axis=1).gt(0)].values\n print(\"Fill %d missing values with feature mean\" % len(invalid_jobs))\n df.fillna(df.mean(), inplace=True)",
"def replace_nan_with(self, value: Any) -> None:\n self.__internal_frequency.fillna(value, inplace=True)",
"def replace_nan(self):\n for column in self.X.columns:\n self.calc_and_fill_mean(column)",
"def _set_nan_via_mask(self, df: pd.DataFrame, nan_mask: np.ndarray) -> pd.DataFrame:\n df[nan_mask] = pd.NA\n return df.astype(\"boolean\")",
"def mark_as_spam(self):\n self.is_spam = True",
"def fill_nan(self, fill_value: int | float | pli.Expr | None) -> Series:",
"def nan(self, indices=None):\n if self.coordinates is None:\n return\n\n if (not isinstance(self.coordinates, np.ndarray)\n or self.coordinates.shape == ()):\n if self.unit is not None:\n self.coordinates = np.nan * self.unit\n else:\n self.coordinates = np.nan\n return\n\n if indices is None:\n self.coordinates.fill(np.nan)\n elif self.coordinates.ndim == 1:\n self.coordinates[indices] = np.nan\n else:\n self.coordinates[:, indices] = np.nan",
"def remove_nans(dataset):\n return dataset.fillna(0.0)",
"def mark_all_groups(self, mark):\n self._mark_all_groups(mark)",
"def none_count(self, none_count):\n\n self._none_count = none_count",
"def correct_for_NaN(satmask, dqmask):\n # If there are NaNs as the saturation values, update those values\n # to ensure there will not be saturation.\n wh_nan = np.isnan(satmask)\n\n if np.any(wh_nan):\n satmask[wh_nan] = HUGE_NUM\n dqmask[wh_nan] |= dqflags.pixel['NO_SAT_CHECK']\n\n log.info(\"Unflagged pixels having saturation values set to NaN were\"\n \" detected in the ref file; for those affected pixels no\"\n \" saturation check will be made.\")",
"def set_num_to_none(self):\n self.m.T0.value = None",
"def imputeNaN(data, newValue):\n\tdata[np.isnan(data)] = newValue; # Se asigno este valor de manera arbitraria para que no marcara un error de validacion por valores muy grandes",
"def nan(self, x):\n return math.isnan(x)",
"def set_default(self, item, state=False):\n for i in self.group:\n if i.label == item:\n i.set_state(state)",
"def mark(self):\n\n self.is_marked = True\n self.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the question_scores of this SurveyQuestionGroupScore.
|
def question_scores(self):
return self._question_scores
|
[
"def _get_scores(self, obj):\n if not hasattr(obj, '_scores'):\n obj._scores = QuestionScore.objects.filter(question=obj)\n\n return obj._scores",
"def _get_scores(self):\n a = numpy.array([x['scores'] for x in self.results])\n return a",
"def score_division(self) -> List[float]:\n return self._score_division",
"def question_scores(self, question_scores):\n \n self._question_scores = question_scores",
"def score(self):\n return score_by_team()[self]",
"def getPlayerScores(self):\n rv = {}\n for cid, client in self.getplayerlist().items():\n data = getattr(client, 'score', None)\n if data:\n rv[cid] = data\n return rv",
"def get_scores(self, player=None):\n #if player is None:\n return self.score\n #else:\n # return self.order_for_player(player, self.score)",
"def scores(self):\n qs = self.lessontestscore_set.all().select_related('student')\n if qs.exists():\n return qs\n return False",
"def get_scores(self, player_id=None):\n if player_id is None:\n return self.score\n else:\n return self.order_for_player(player_id, self.score)",
"def _get_test_scores():\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT testset_id, score\n FROM (\n SELECT test_option.testset_id, AVG(mco.is_correct) AS score, \n COUNT(*) as n_responses\n FROM (\n SELECT tsr.testset_id, mcr.option_id\n FROM drill_testset_responses AS tsr\n INNER JOIN drill_multiplechoiceresponse AS mcr\n ON tsr.multiplechoiceresponse_id = mcr.response_ptr_id\n ) AS test_option\n INNER JOIN drill_multiplechoiceoption AS mco\n ON test_option.option_id = mco.id\n GROUP BY test_option.testset_id\n ) AS results\n WHERE n_responses > 0\n \"\"\")\n return [(i, float(s)) for (i, s) in cursor.fetchall()]",
"def _get_score(self):\n return self.data[self.score].to_numpy()",
"def get_turn_scores(self):\n\n return self.turn_scores.copy()",
"def _get_best_scores(self):\n a = numpy.array([x['best_scores'] for x in self.results])\n return a",
"def get_score(self, key):\n return Score.objects.get_for(self, key)",
"def scored_questions(self):\n if not self._scored_questions:\n self._scored_questions = [q for q in self._question_list if q.scored]\n return self._scored_questions",
"def get_score(self, student_answers):\n student_answer = student_answers[self.answer_id]\n student_option = self._get_submitted_option_id(student_answer)\n\n scoring = self.scoring_map[self.answer_id]\n is_valid = student_option is not None and student_option in list(scoring.keys(\n ))\n\n (correctness, points) = ('incorrect', None)\n if is_valid:\n correctness = scoring[student_option]['correctness']\n points = scoring[student_option]['points']\n\n return CorrectMap(self.answer_id, correctness=correctness, npoints=points)",
"def get_raw_scores(self):\n return self.get_raw_results()",
"def get_score_models(self, as_tuples:bool=False):\n score_model_list = []\n qs = self.subjects()\n if qs:\n for subject in qs:\n if as_tuples:\n score_model_list.extend(\n subject.get_score_models(as_tuples=True)\n )\n else:\n score_model_list.extend(subject.get_score_models())\n return score_model_list",
"def score_history(self):\n return [player.score_history for player in self.players]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the question_scores of this SurveyQuestionGroupScore.
|
def question_scores(self, question_scores):
self._question_scores = question_scores
|
[
"def qm_score(self, qm_score):\n self._qm_score = qm_score",
"def score_division(self, score_division: List[float]):\n\n self._score_division = score_division",
"def question_scores(self):\n return self._question_scores",
"def set_input_score(self, score):\n pass",
"def reset_score(self):\n self.scores = []",
"def __init__(self, scores: list[int]) -> None:\n self._scores = scores",
"def questions(self, questions):\n self._questions = questions",
"def __setScore(self, score):\n\t\tself.score = score\n\t\treturn self.score",
"def update_review_scores(self):\n for review in self.reviews.all():\n review.save(update_score=True)",
"def set_score(self, a, b, score):\n\t\tself.match_matrix[(a,b)] = score",
"def set_score_by_set(self, event_id, set_path, score):\n self._rtdb.reference(\n self._db_keywords[\"root\"] + str(int(event_id)) + self._db_keywords[\"score\"] + set_path).set(score)",
"def setNodeScore(self, score):\n self.score = score",
"def resetScores(self):\n self.scores = {self.clientA: 0, self.clientB: 0}\n self.announceScoreTo(self.clientA)\n self.announceScoreTo(self.clientB)",
"def setMinScore(self, value) -> None:\n ...",
"def normalize_scores(request, quiz_id):\n quiz, quiz_data, category_score_dict, norm_score_dict, questions_list = get_session_data(request, quiz_id)\n categories = quiz.get_quiz_categories()\n\n # Creating a list of the max score for each section, needed to normalize to a score range of 0 - 10\n old_max_list = []\n for i in range(len(category_score_dict)):\n category = categories[i]\n old_max_list.append(len(quiz_data[str(category.category_name)]))\n\n # Normalizing the old scores and populating the session variable session_norm_data\n norm_score_list = [\n (10 / (old_max_list[i] - 0)) * (category_score_dict[str(categories[i].category_name)] - old_max_list[i]) + 10\n for i in range(len(category_score_dict))\n ]\n\n for i in range(len(category_score_dict)):\n category = categories[i]\n if norm_score_list[i] is not None:\n norm_score_dict[str(category.category_name)] = norm_score_list[i]\n request.session[quiz.session_norm_data()] = norm_score_dict\n return request.session[quiz.session_norm_data()]",
"def set_score(student, assessment_name, score):\n if not student.scores:\n score_dict = {}\n else:\n score_dict = json.loads(student.scores)\n score_dict[assessment_name] = score\n student.scores = json.dumps(score_dict)",
"def set_score(self,new_score):\n self.__fitness = new_score",
"def update_scores(self, assessment):\n metrics = RiskOfBiasMetric.objects.get_required_metrics(\n assessment, self.study\n ).prefetch_related(\"scores\")\n scores = self.scores.all()\n # add any scores that are required and not currently created\n for metric in metrics:\n if not (metric.scores.all() & scores):\n logging.info(f\"Creating score: {self.study}->{metric}\")\n RiskOfBiasScore.objects.create(riskofbias=self, metric=metric)\n # delete any scores that are no longer required\n for score in scores:\n if score.metric not in metrics:\n logging.info(f\"Deleting score: {self.study}->{score.metric}\")\n score.delete()",
"def update_scores(self):\n\t\tself.score_black.text = str(self.data['score'][1])\n\t\tself.score_white.text = str(self.data['score'][0])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set parameters for animal species.
|
def set_animal_parameters(species, params):
if species == "Herbivore":
Herbivore.set_parameters(params)
if species == "Carnivore":
Carnivore.set_parameters(params)
|
[
"def set_animal_parameters(self, species, params):",
"def test_set_animal_parameters_callable(self):\n params = {}\n self.biosim.set_animal_parameters('Herbivore', params)",
"def set_parameters(cls, params):\n # cls.params_dict.update(params)\n for parameter in params:\n # cls.params_dict.update(params)\n if parameter in cls.params_dict:\n if params[parameter] < 0:\n raise ValueError(f\"{parameter} cannot be negative.\")\n if parameter == \"DeltaPhiMax\" and params[parameter] <= 0:\n raise ValueError(\"DeltaPhiMax must be larger than zero\")\n if parameter == \"eta\" and not 0 <= params[parameter] <= 1:\n raise ValueError(\"Eta must be greater than zero and smaller than one\")\n cls.params_dict.update(params)\n else:\n raise ValueError(\"Parameter not defined for this animal\")",
"def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('c_'):\n pc[k[2:]] = v\n else:\n raise ValueError( # pragma: no cover\n f\"Unexpected parameter name '{k}'\")\n self.clus.set_params(**pc)\n self.estimator.set_params(**pe)",
"def set_params(self, **params):\n for key, value in params.items():\n if hasattr(self, key):\n if key == 'layers':\n value = list(value)\n setattr(self, key, value)\n else:\n # accessing deep parameters\n param, sep, param_of_param = key.partition('__')\n if sep != '__':\n raise ValueError(key + ' is an invalid parameter a Theanets estimator')\n if param == 'trainers':\n index, sep, param = param_of_param.partition('_')\n index = int(index)\n if index >= len(self.trainers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n if param == '':\n # e.g. trainers__0 = {'optimize': 'sgd', 'learning_rate': 0.3}\n self.trainers[index] = value\n else:\n # e.g. trainers__0_optimize = 'sgd'\n self.trainers[index][param] = value\n elif param == 'layers':\n index = int(param_of_param)\n if index >= len(self.layers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n self.layers[index] = value\n elif param == 'scaler':\n try:\n self.scaler.set_params(**{param_of_param: value})\n except Exception:\n raise ValueError('was unable to set parameter {}={} '\n 'to scaler {}'.format(param_of_param, value, self.scaler))\n else:\n raise ValueError(key + ' is an invalid parameter for a Theanets estimator')",
"def set_annot_species(ibs, aid_list, species_list):\n species_list = [species.lower() for species in species_list]\n ibsfuncs.assert_valid_species(ibs, species_list, iswarning=True)\n ibs.set_annot_lblannot_from_value(aid_list, species_list, constants.SPECIES_KEY)",
"def animal_attribute(self, legs, eyes):\r\n self.legs = legs\r\n self.eyes = eyes\r\n print(\" It has \", self.legs, \"legs\")\r\n print(\" It has \", self.eyes, \"eyes\")",
"def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value",
"def set(**kwargs):\n init()\n \n global tree, variable, weight, cuts, categories, dataset, name, title\n global variables\n \n\n ## require that tree and variable must be set\n if (not (tree or 'tree' in kwargs) or \n (not (variable or 'variable' in kwargs) and not 'variables' in kwargs) or\n (not (variable or 'variable' in kwargs) and not kwargs['variables'])):\n raise RuntimeError, \"Must provide tree and variable.\"\n\n for arg in ('tree variable weight cuts categories dataset name '\n 'title variables').split():\n if arg in kwargs.keys():\n setattr( sys.modules[__name__], arg, kwargs[arg] )\n del kwargs[arg]\n if kwargs.keys():\n raise RuntimeError, \"Unknown argument(s): %s\" % repr( kwargs.keys() )\n\n if name != 'data' and title == 'data':\n title = name\n\n if variable and not variable in variables:\n variables.append(variable)\n \n if not 'variable' in kwargs:\n variable = variables[0]",
"def setSIFTParams(self):\n for x in [self.detector, self.extractor]:\n for idx, (name, setname, getname, val) in enumerate(self.SIFTParams):\n if val is not None:\n getattr(x, setname)(name, val)\n else:\n self.SIFTParams[idx][3] = getattr(x, getname)(name)",
"def set_species(self, value):\n if value is None:\n return\n value = list(value)\n self._species_to_index_dict = {el: i for i, el in enumerate(value)}\n self._species = value[:]\n self._store_elements = {el.Abbreviation: el for el in value}",
"def setParams(self, stages=[]):\n kwargs = self.setParams._input_kwargs\n return self._set_params(**kwargs)",
"def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1",
"def set_param_values(self, x):\n\n for name, value in x.iteritems():\n self.theano_params[name].set_value(value)",
"def _update_params(self, param_values, param_names):\n if param_names is None:\n param_names = [\"sigma\"]\n for name, value in zip(param_names, param_values):\n setattr(self, name, value)",
"def set_parameters(parameters):\n if parameters:\n for p_name, p_value in parameters.items():\n setattr(DataHelper, p_name, p_value)",
"def set_species_initial_value(self, species_initial_value):\n self.listOfSpecies['S'].initial_value = species_initial_value[0]\n self.listOfSpecies['I'].initial_value = species_initial_value[1]\n self.listOfSpecies['R'].initial_value = species_initial_value[2]",
"def setWoodType(self, species):\n getHandle().setType(getBoatType(species))",
"def set_compartment_param(self, compartment, name, value, mechanismname):\n if name == 'CM':\n compartment.Cm = value*math.pi*compartment.diameter*compartment.length\n elif name == 'RM':\n compartment.Rm = value/(math.pi*compartment.diameter*compartment.length)\n elif name == 'RA':\n compartment.Ra = value*compartment.length/(math.pi*(compartment.diameter/2.0)**2)\n elif name == 'Em':\n compartment.Em = value\n elif name == 'initVm':\n compartment.initVm = value\n elif name == 'inject':\n print compartment.name, 'inject', value, 'A.'\n compartment.inject = value\n elif mechanismname is 'synapse': # synapse being added to the compartment\n ## these are potential locations, we do not actually make synapses.\n #synapse = self.context.deepCopy(self.context.pathToId('/library/'+value),\\\n # self.context.pathToId(compartment.path),value) # value contains name of synapse i.e. synapse_type\n #moose.connect(compartment,\"channel\", synapse, \"channel\")\n ## I assume below that compartment name has _segid at its end\n segid = string.split(compartment.name,'_')[-1] # get segment id from compartment name\n self.segDict[segid][5].append(value)\n elif mechanismname is 'spikegen': # spikegen being added to the compartment\n ## these are potential locations, we do not actually make the spikegens.\n ## spikegens for different synapses can have different thresholds,\n ## hence include synapse_type in its name\n ## value contains name of synapse i.e. synapse_type\n #spikegen = moose.SpikeGen(compartment.path+'/'+value+'_spikegen')\n #moose.connect(compartment,\"VmSrc\",spikegen,\"Vm\")\n pass\n elif mechanismname is not None:\n ## if mechanism is not present in compartment, deep copy from library\n if not moose.exists(compartment.path+'/'+mechanismname):\n ## if channel does not exist in library load it from xml file\n if not moose.exists(\"/library/\"+mechanismname):\n cmlR = ChannelML(self.nml_params)\n model_filename = mechanismname+'.xml'\n model_path = neuroml_utils.find_first_file(model_filename,self.model_dir)\n if model_path is not None:\n cmlR.readChannelMLFromFile(model_path)\n else:\n raise IOError(\n 'For mechanism {0}: files {1} not found under {2}.'.format(\n mechanismname, model_filename, self.model_dir\n )\n )\n\n neutralObj = moose.Neutral(\"/library/\"+mechanismname)\n if 'CaConc' == neutralObj.className: # Ion concentration pool\n libcaconc = moose.CaConc(\"/library/\"+mechanismname)\n ## deep copies the library caconc under the compartment\n caconc = moose.copy(libcaconc,compartment,mechanismname)\n caconc = moose.CaConc(caconc)\n ## CaConc connections are made later using connect_CaConc()\n ## Later, when calling connect_CaConc,\n ## B is set for caconc based on thickness of Ca shell and compartment l and dia\n ## OR based on the Mstring phi under CaConc path.\n channel = None\n elif 'HHChannel2D' == neutralObj.className : ## HHChannel2D\n libchannel = moose.HHChannel2D(\"/library/\"+mechanismname)\n ## deep copies the library channel under the compartment\n channel = moose.copy(libchannel,compartment,mechanismname)\n channel = moose.HHChannel2D(channel)\n moose.connect(channel,'channel',compartment,'channel')\n elif 'HHChannel' == neutralObj.className : ## HHChannel\n libchannel = moose.HHChannel(\"/library/\"+mechanismname)\n ## deep copies the library channel under the compartment\n channel = moose.copy(libchannel,compartment,mechanismname)\n channel = moose.HHChannel(channel)\n moose.connect(channel,'channel',compartment,'channel')\n ## if mechanism is present in compartment, just wrap it\n else:\n neutralObj = moose.Neutral(compartment.path+'/'+mechanismname)\n if 'CaConc' == neutralObj.className: # Ion concentration pool\n caconc = moose.CaConc(compartment.path+'/'+mechanismname) # wraps existing channel\n channel = None\n elif 'HHChannel2D' == neutralObj.className : ## HHChannel2D\n channel = moose.HHChannel2D(compartment.path+'/'+mechanismname) # wraps existing channel\n elif 'HHChannel' == neutralObj.className : ## HHChannel\n channel = moose.HHChannel(compartment.path+'/'+mechanismname) # wraps existing channel\n if name == 'Gbar':\n if channel is None: # if CaConc, neuroConstruct uses gbar for thickness or phi\n ## If child Mstring 'phi' is present, set gbar as phi\n ## BUT, value has been multiplied by Gfactor as a Gbar,\n ## SI or physiological not known here,\n ## ignoring Gbar for CaConc, instead of passing units here\n child = moose_utils.get_child_Mstring(caconc,'phi')\n if child is not None:\n #child.value = value\n pass\n else:\n #caconc.thick = value\n pass\n else: # if ion channel, usual Gbar\n channel.Gbar = value*math.pi*compartment.diameter*compartment.length\n elif name == 'Ek':\n channel.Ek = value\n elif name == 'thick': # thick seems to be NEURON's extension to NeuroML level 2.\n caconc.thick = value ## JUST THIS WILL NOT DO - HAVE TO SET B based on this thick!\n ## Later, when calling connect_CaConc,\n ## B is set for caconc based on thickness of Ca shell and compartment l and dia.\n ## OR based on the Mstring phi under CaConc path.\n if neuroml_utils.neuroml_debug: print \"Setting \",name,\" for \",compartment.path,\" value \",value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set parameters for landscape type.
|
def set_landscape_parameters(landscape, params):
if landscape == "L":
Lowland.set_parameters(params)
elif landscape == "H":
Highland.set_parameters(params)
else:
raise ValueError('Lowland and Highland are the'
'only ones that can have different parameters')
|
[
"def set_landscape_parameters(self, landscape, params):",
"def test_set_landscape_parameters(self):\n params1 = {'alpha': 0.4}\n params2 = {'f_max': 500}\n self.biosim.set_landscape_parameters('S', params1)\n self.biosim.set_landscape_parameters('J', params2)\n assert Savannah.alpha == 0.4\n assert Jungle.f_max == 500",
"def to_landscape(self) -> None:\n if self.is_portrait:\n self.width, self.height = self.height, self.width",
"def change_orientation_landscape(self):\n\n self.mob_conn.orientation = 'LANDSCAPE'\n return self",
"def setUpOrientation(node, parmname, defaultup):\n pass",
"def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width",
"def set_horizontal(self) -> None:\n self.orientation = constants.HORIZONTAL",
"def set_orientation(self, value):\n self._selenium_web_driver().orientation = value.upper()",
"def set_rotation(self, rotation: float):",
"def setHorizontalFieldOfView(*args, **kwargs):\n \n pass",
"def set_model_params(self, params):",
"def setPlane(*args, **kwargs):\n \n pass",
"def test_write_page_setup_landscape(self):\n\n self.worksheet.set_landscape()\n\n self.worksheet._write_page_setup()\n\n exp = \"\"\"<pageSetup orientation=\"landscape\"/>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)",
"def set_new_hw(self,height,width):\n self.im_out_h = height\n self.im_out_w = width",
"def set_hyper_parameters(self, x):\n self.set_n_active_points(x[0])\n self.set_n_rejection_samples(x[1])\n self.set_enlargement_factor(x[2])\n self.set_ellipsoid_update_gap(x[3])\n self.set_dynamic_enlargement_factor(x[4])\n self.set_alpha(x[5])",
"def setScanArea(self, left=0.0, top=0.0, width=8.267, height=11.693):\n #((left, top, width, height) document_number, page_number, frame_number)\n width = float(width)\n height = float(height)\n left = float(left)\n top = float(top)\n self.scanner.SetImageLayout((left, top, width, height), 1, 1, 1)",
"def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1",
"def set_parameters(self, params: List[Union[torch.Tensor, list]]) -> None:\n self.means = params[0]\n self.precs = params[1]",
"def set_orient(self, new_orient):\n self[:2, :2] = new_orient"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a population to each cell on the island
|
def add_population(self, population):
for cell_coord in population:
x, y = cell_coord.get('loc')
self.island[x][y].place_animals(cell_coord.get('pop'))
|
[
"def add_population(self, population):",
"def appendPopulation(self, population):\n for index in range(population.size()):\n gene = population.getGene(index)\n fitness = population.getFitness(index)\n self.appendGene(gene, fitness)",
"def populateCells(self):\n for p in self.Points:\n self.Cells[self.findInd(p)].append(p)",
"def InsertPopulation(self, Population ):\n\t\tself.populations.append(Population)\n\t\tfor Individual in Population:\n\t\t\tself.InsertIndividual(Individual)",
"def create_population(self):\r\n self.generation = 0\r\n for genome_num in range(self.pop_size):\r\n genome = Genome()\r\n genome.mutate()\r\n self.genomes.append(genome)\r\n txt = self.data_loc(self.generation, genome_num)\r\n savetxt(txt, genome.node_net, fmt=\"%f\")",
"def update_grid(self):\r\n if self.game.bacteria_mode:\r\n over_population_limit = 4\r\n else:\r\n over_population_limit = 3\r\n for row in range(1, self.game.num_of_rows + 1):\r\n for col in range(1, self.game.num_of_cols + 1):\r\n curr_cell = self.grid[row][col]\r\n adj_list = [self.grid[row-1][col-1], self.grid[row-1][col], self.grid[row-1][col+1],\r\n self.grid[row][col-1], self.grid[row][col+1], self.grid[row+1][col-1],\r\n self.grid[row+1][col], self.grid[row+1][col+1]]\r\n\r\n # act upon adj_list\r\n adj_live_count = 0\r\n for cell in adj_list:\r\n if cell.alive:\r\n adj_live_count += 1\r\n\r\n if curr_cell.alive:\r\n if adj_live_count < 2:\r\n curr_cell.alive_next_round = False # loneliness\r\n elif adj_live_count > over_population_limit:\r\n curr_cell.alive_next_round = False # overpopulation\r\n else:\r\n curr_cell.alive_next_round = True # same same\r\n else:\r\n if adj_live_count == 3:\r\n curr_cell.alive_next_round = True # A cell is born\r\n else:\r\n curr_cell.alive_next_round = False # same same\r",
"def mutate(self):\n for i in range(len(self.population)):\n self.population[i].mutate(individual=i)",
"def generate_random_population(self):\n for i in range(POPULATION_SIZE):\n Chromosome = individual()\n Chromosome.generate()\n self.populationlist.append(Chromosome)",
"def generation_initial_population():\n pass",
"def addPopulation(self, population):\n #assert isinstance(population, Population) # allow other Population-like objects?\n assert population.label not in self.populations # refuse to overwrite existing population with same label\n self.populations.append(population)\n return population",
"def add_pop_to_nodes():\n path = os.path.join(DATA_INTERMEDIATE, 'pop_by_elec_node.csv')\n pop_data = pd.read_csv(path)\n\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n elec_nodes = gpd.read_file(path, crs='epsg:27700')\n\n output = []\n\n for idx, item in pop_data.iterrows():\n for idx, elec_node in elec_nodes.iterrows():\n if item['id'] == elec_node['id']:\n output.append({\n 'geometry': elec_node['geometry'],\n 'properties': {\n 'id': elec_node['id'],\n 'population': item['population'],\n },\n })\n\n output = gpd.GeoDataFrame.from_features(output)\n\n path = os.path.join(DATA_INTERMEDIATE, 'elec_distribution.shp')\n output.to_file(path, crs='epsg:27700')",
"def create_population(self):\n global maxid\n self.population= []\n #.....0th individual is the initial guess if there is\n ind= Individual(0,self.ngene,self.murate,self.func,self.args)\n genes=[]\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)\n #.....other individuals whose genes are randomly distributed\n for i in range(self.nindv-1):\n ind= Individual(i+1,self.ngene,self.murate,self.func,self.args)\n maxid= i+1\n genes= []\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n #.....randomize by mutating with high rate\n g.mutate(0.25)\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)",
"def register(self, *items):\n for item in items:\n if isinstance(item, (common.BasePopulation, common.Assembly)):\n if item.celltype.injectable: # don't do memb_init() on spike sources\n self.population_list.append(item)\n else:\n if hasattr(item._cell, \"memb_init\"):\n self.cell_list.append(item)",
"def test_add_population_callable(self):\n self.biosim.add_population(self.population)",
"def populate(self):\n self.population = []\n \n self.population = np.random.multivariate_normal(self.mu, self.S, size = self.n)",
"def createPopulation(self, *args, **kwargs):\n return self.addPopulation(Population(*args, **kwargs))",
"def init_population(self, pop_size):\n # population\n self.pop_size = pop_size\n self.population.extend([MLP2(self.in_dim, self.hid_dim1, self.hid_dim2, self.out_dim)\n for _ in range(pop_size)])\n # roulette wheel\n for i in range(pop_size - self.num_elitism):\n self.fitness_roulette.extend([i] * (i + 1))",
"def initialize_population(self):\n # Convert number of genes to the largest integer under the binary\n # representation with that number of bits.\n # Do this here instead of in self.__init__() because this is specific to\n # the representation, and this method should be overridden when\n # subclassing\n self._indiv_size = pow(2, self._num_genes) - 1\n pop = [self._random.randint(0, self._indiv_size) for _ in\n range(self._pop_size)]\n self._pop = self._rank_pop(pop)",
"def set_population(feature, distance):\n geo = ee.Geometry.Point([feature.get('longitude'), feature.get('latitude')])\n disk = geo.buffer(ee.Number(distance).multiply(1000))\n count = pop.reduceRegion(reducer='sum', geometry=disk)\n count = ee.Number(count.get('population')).toInt()\n return feature.set({f'population_within_{distance}km': count})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes the number of animals throughout the island and sum them up
|
def heat_num_animals(self):
total_herbivores = sum(sum(self.heatmap_of_population()['Herbivore']))
total_carnivores = sum(sum(self.heatmap_of_population()['Carnivore']))
animal_count_dict = {"Herbivore": total_herbivores, "Carnivore": total_carnivores}
return animal_count_dict
|
[
"def num_animals(self):\n val_sum = 0\n for key, values in self.heat_num_animals.items():\n val_sum += values\n return val_sum",
"def num_animals(self):",
"def num_animals_per_species(self):",
"def population(self):\n return sum([len(s) for s in self.__species])",
"def calculate_total_bags(graph):\n value = 0\n for node in graph:\n value += int(node[\"count\"]) + int(node[\"count\"]) * calculate_total_bags(\n node[\"inside\"]\n )\n return value",
"def num_jewels_in_stones_ul(jewel_stones, all_stones) -> int:\n\n return sum(list(map(lambda jewel_stone: all_stones.count(jewel_stone), jewel_stones)))",
"def count_bags(bag, data):\n if data[bag] == {'no other bags':0}:\n return 0\n\n count = 0\n for content in data[bag]:\n print(content, data[bag][content])\n count += int(data[bag][content]) + int(data[bag][content]) * count_bags(content, data)\n return count",
"def get_current_number_sharks(self) -> int:\n number_sharks = 0\n for coord, animal in self._grid.items():\n if animal.animal_type == Animal.Shark:\n number_sharks += 1\n return number_sharks",
"def test_num_carn():\n jung = Jungle()\n jung.add_population([{'species': 'Herbivore', 'weight': 14, 'age': 0},\n {'species': 'Herbivore', 'weight': 54, 'age': 0},\n {'species': 'Carnivore', 'weight': 20, 'age': 13}])\n nt.assert_equal(1, jung.total_num_animals(species='carnivore'),\n \"Wrong number of carnivores\")",
"def getEpisodesTotal(self):\r\n totalepisodes = 0\r\n for seasons in self.seasonsepisodedict:\r\n totalepisodes += self.seasonsepisodedict[seasons]\r\n return totalepisodes",
"def getAverageSizeOfCity(self):\n cities = self.graph.nodes # get nodes(port)\n total_population = 0\n cities_num = 0\n for code in cities:\n city = cities[code]\n city_info = city.info\n # increase total number of population\n total_population += city_info[\"population\"]\n # increase total number of cities\n cities_num += 1\n self.average_population = total_population / cities_num",
"def LocationCount(animal, FileName, Distance, Lat, Lon):\r\n \r\n animal_type = str(animal)\r\n with open(FileName, \"r\") as FIn:\r\n \r\n MammalList = FIn.readlines()\r\n ancount = 0\r\n\r\n for x in range(len(MammalList)): # For loop to check if the distance between the given Lat and Lon is <= to Distance\r\n \r\n alist = LineToList(MammalList[x])\r\n \r\n dista = dist(alist[1], alist[2], Lat, Lon) # dist class call\r\n Dis = dista.latlon()\r\n \r\n if Dis <= Distance: # Checking if calculated distance is <= to given distance.\r\n ancount += 1\r\n FIn.close()\r\n \r\n print('\\n%s:' %animal_type)\r\n print(\"Number of %s within %skm: %d\" %(animal_type, Distance, ancount))\r\n return ancount",
"def total(self) -> int:\n\n return sum([entry.reps for entry in self.entries])",
"def test_num_herb():\n jung = Jungle()\n jung.add_population([{'species': 'Herbivore', 'weight': 14, 'age': 0},\n {'species': 'Herbivore', 'weight': 54, 'age': 0},\n {'species': 'Carnivore', 'weight': 20, 'age': 13}])\n nt.assert_equal(2, jung.total_num_animals(species='herbivore'),\n \"Wrong number of herbivores\")",
"def test_add_animals(self, landscape_data):\n lowland = landscape_data[\"L\"]\n assert len(lowland.fauna_dict['Herbivore']) == 2\n herb3 = Herbivore()\n lowland.add_animal(herb3)\n assert len(lowland.fauna_dict['Herbivore']) == 3",
"def count_individuals(self):\n if self.count_total is None:\n if self.sample_map.file_name in [None, \"none\", \"null\"]:\n ds_array = np.ones(shape=(self.fine_map.x_size, self.fine_map.y_size))\n else:\n ds = gdal.Open(self.sample_map.file_name)\n ds_array = ds.GetRasterBand(1).ReadAsArray()\n self.count_total = np.sum(ds_array) * self.sample_size * self.get_average_density()\n if len(self.times_list) > 0:\n self.count_total *= len(self.times_list)\n return self.count_total\n else:\n return self.count_total",
"def average_peng(icebergs):\n if not icebergs:\n return 0\n return int(sum([i.penguin_amount for i in icebergs])/len(icebergs))",
"def number_of_articles():",
"def average_calories(foods):\r\n avg = 0\r\n tot = 0\r\n \r\n for i in range(len(foods)):\r\n tot += foods[i].calories\r\n \r\n avg = tot/(len(foods))\r\n\r\n return avg"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Total number of animals on island.
|
def num_animals(self):
val_sum = 0
for key, values in self.heat_num_animals.items():
val_sum += values
return val_sum
|
[
"def num_animals(self):",
"def num_animals_per_species(self):",
"def heat_num_animals(self):\n total_herbivores = sum(sum(self.heatmap_of_population()['Herbivore']))\n total_carnivores = sum(sum(self.heatmap_of_population()['Carnivore']))\n animal_count_dict = {\"Herbivore\": total_herbivores, \"Carnivore\": total_carnivores}\n return animal_count_dict",
"def get_current_number_sharks(self) -> int:\n number_sharks = 0\n for coord, animal in self._grid.items():\n if animal.animal_type == Animal.Shark:\n number_sharks += 1\n return number_sharks",
"def population(self):\n return sum([len(s) for s in self.__species])",
"def count_asteroids(self):\n count = 0\n for obj in self.game_objects:\n if type(obj) == Asteroid:\n count += 1\n return count",
"def get_number_territories(self):\n territories_total = 0\n for data in self.country_data.values():\n if data[\"number_of_users\"] > 0:\n territories_total += 1\n\n return territories_total",
"def count_individuals(self):\n if self.count_total is None:\n if self.sample_map.file_name in [None, \"none\", \"null\"]:\n ds_array = np.ones(shape=(self.fine_map.x_size, self.fine_map.y_size))\n else:\n ds = gdal.Open(self.sample_map.file_name)\n ds_array = ds.GetRasterBand(1).ReadAsArray()\n self.count_total = np.sum(ds_array) * self.sample_size * self.get_average_density()\n if len(self.times_list) > 0:\n self.count_total *= len(self.times_list)\n return self.count_total\n else:\n return self.count_total",
"def get_total_num_homes(self):\n return self._total_num_homes",
"def number_of_individuals(self):\n return self._number_of_individuals",
"def number_of_articles():",
"def get_num_species(self):\n return len(self.get_all_species())",
"def towns_count(self):\n return len(self.town_map)",
"def LocationCount(animal, FileName, Distance, Lat, Lon):\r\n \r\n animal_type = str(animal)\r\n with open(FileName, \"r\") as FIn:\r\n \r\n MammalList = FIn.readlines()\r\n ancount = 0\r\n\r\n for x in range(len(MammalList)): # For loop to check if the distance between the given Lat and Lon is <= to Distance\r\n \r\n alist = LineToList(MammalList[x])\r\n \r\n dista = dist(alist[1], alist[2], Lat, Lon) # dist class call\r\n Dis = dista.latlon()\r\n \r\n if Dis <= Distance: # Checking if calculated distance is <= to given distance.\r\n ancount += 1\r\n FIn.close()\r\n \r\n print('\\n%s:' %animal_type)\r\n print(\"Number of %s within %skm: %d\" %(animal_type, Distance, ancount))\r\n return ancount",
"def getEpisodesTotal(self):\r\n totalepisodes = 0\r\n for seasons in self.seasonsepisodedict:\r\n totalepisodes += self.seasonsepisodedict[seasons]\r\n return totalepisodes",
"def calories(self) -> int:\n raise NotImplementedError(\"Hey, you need to override this\")",
"def get_number_locations(self):\n return len(self.locations)",
"def count_alive(self):\n\n # set the alive count to 0\n num_alive = 0\n\n # run through all cells in the map array, and if \n # the cell is a space cell, increment the alive \n # value.\n for j in range(0, self.height):\n for i in range(0, self.width):\n if (self.map[j][i] == self.space_val):\n num_alive += 1\n\n # check the map contents, and return the alive val.\n self.assert_array_size('count_alive', self.map)\n return num_alive",
"def test_num_carn():\n jung = Jungle()\n jung.add_population([{'species': 'Herbivore', 'weight': 14, 'age': 0},\n {'species': 'Herbivore', 'weight': 54, 'age': 0},\n {'species': 'Carnivore', 'weight': 20, 'age': 13}])\n nt.assert_equal(1, jung.total_num_animals(species='carnivore'),\n \"Wrong number of carnivores\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a key word argument 'level' to the augmentation function. The level policy is function that translates a level value to appropriate arguments of the augmentation.
|
def randaug(level_policy):
def decorator(aug):
@functools.wraps(aug)
def wrapper(image, *args, **kwargs):
if 'level' in kwargs:
args, kwargs = level_policy(image, kwargs['level'])
return aug(image, *args, **kwargs)
else:
return aug(image, *args, **kwargs)
return wrapper
return decorator
|
[
"def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))",
"def addLevel(self, level: 'SoNode') -> \"void\":\n return _coin.SoVRMLLOD_addLevel(self, level)",
"def SetLevel(self, level):\n self.level = level",
"def with_level(self, level):\n def decorator(fn):\n \"\"\"Return a wrapped function in which the log level is set to the desired value.\"\"\"\n @wraps(fn)\n def wrapper(*args, **kwargs):\n \"\"\"The wrapper.\"\"\"\n old_level = self._level\n self.set_level(level)\n try:\n result = fn(*args, **kwargs)\n finally:\n self.set_level(old_level)\n return result\n return wrapper\n return decorator",
"def insertLevel(self, *args):\n return _coin.SoVRMLLOD_insertLevel(self, *args)",
"def opt_level(level: int) -> None:",
"def add_level(request):\n try:\n level = get_level_for(request.user.get_profile())\n except:\n level = None\n return {'level': level}",
"def insertLevel(self, level: 'SoNode', idx: 'int') -> \"void\":\n return _coin.SoVRMLLOD_insertLevel(self, level, idx)",
"def _set_stats_at_level_(self, level):\n self.current_level = level\n self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)\n self.hp += self.hp_bonus\n self.hp_base = self.hp\n self._set_stats_with_pluses_('hp', self.hp_plus)\n self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale)\n self.base_atk += self.base_atk_bonus\n self.base_base_atk = self.base_atk\n self._set_stats_with_pluses_('atk', self.base_atk_plus)\n self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale)\n self.rcv += self.rcv_bonus\n self.rcv_base = self.rcv\n self._set_stats_with_pluses_('rcv', self.rcv_plus)",
"def update_level(character, add_experience):\n character_stats = character.to_dict()\n start_level = character_stats['level']\n experience = character_stats['experience'] + add_experience\n thresholds = [100]\n i = 1\n while experience >= thresholds[-1]:\n thresholds.append(int(thresholds[i-1] * 1.8))\n i += 1\n level = len(thresholds)\n free_stats = (level - start_level) * 5\n character.edit(experience=experience, level=level, free_stats=free_stats)",
"def apply_logging_args(args):\n global default_level\n default_level = logging.getLevelName(args.log_level.upper())",
"def addLevelName(lvl, levelName):\n _acquireLock()\n try: # unlikely to cause an exception, but you never know...\n _levelNames[lvl] = levelName\n _levelNames[levelName] = lvl\n finally:\n _releaseLock()",
"def player_gained_level(self, player):\n params = (self._b(self._c(\"DING!\", \"yellow\")),\n self._get_unit_title(player),\n self._b(player.level))\n\n announcement_msg = \"%s %s ascends to level %s!\" % params\n\n \"\"\" TODO: check if level changed and show new title \"\"\"\n\n self.announce(announcement_msg)",
"def SchoolDungeonStageExcelAddRecommandLevel(builder, RecommandLevel):\n return AddRecommandLevel(builder, RecommandLevel)",
"def setLevelAttribute(self, level: 'char const *') -> \"void\":\n return _coin.ScXMLLogElt_setLevelAttribute(self, level)",
"async def on_level_rated(self, level: Level) -> Any:\r\n pass",
"def _setupLevelPreferenceHook():\n\n pass",
"def update_var_levels(view, edit, line, amount=+1):\n match = __level__.match(view.substr(line))\n if not match:\n return\n start = match.start(1)\n end = match.end(1)\n level_string = match.group(1)\n new_level = int(level_string, base=10) + amount\n if new_level < 1:\n new_level = 1\n new_level_string = str(new_level)\n level_region = sublime.Region(line.begin() + start, line.begin() + end)\n view.replace(edit, level_region, new_level_string)",
"def advapi32_SaferCreateLevel(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwScopeId\", \"dwLevelId\", \"OpenFlags\", \"pLevelHandle\", \"lpReserved\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns image with an extra channel set to all 1s.
|
def _wrap(image):
ones = torch.ones_like(image[:,:1,:,:])
return torch.cat([image, ones], 1)
|
[
"def make_binary_image(im):",
"def add_alpha_channel(img):\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n _, alpha = cv2.threshold(gray_img, 25, 255, cv2.THRESH_BINARY)\n \n b, g, r = cv2.split(img)\n rgba = [b, g, r, alpha]\n alpha_img = cv2.merge(rgba, 4)\n \n return alpha_img",
"def example_filter( image ):\n new_image = image.copy()\n num_rows, num_cols, num_chans = new_image.shape\n for row in range(num_rows):\n for col in range(num_cols):\n r, g, b = image[row,col]\n new_image[row,col] = [b, b, b] # [0,0,b]\n return new_image",
"def get_quant_image(self):\n new_img = np.zeros_like(self.img)\n for lx in np.unique(self.labels):\n mask = (self.labels == lx)[:, :, None]\n vals = mask*self.img\n col = vals.reshape((-1, 3)).sum(0) / mask.reshape((-1,)).sum(0)\n new_img += (mask*col).astype(np.uint8)\n \n return new_img",
"def green(image):\n return image[:, :, 1]",
"def multichannelmask(self):\n # Each class gets one channel\n # Fill background channel with 1s\n chans = [np.ones(S.SAMPLESHAPE[:2], dtype=np.uint8)]\n top = 6 if \"post\" in self.img_name else S.N_CLASSES\n for i in range(1, top):\n chan = np.zeros(S.SAMPLESHAPE[:2], dtype=np.uint8)\n chans.append(chan)\n\n # For each building, set pixels according to (x,y) coordinates; they end up\n # being a one-hot-encoded vector corresponding to class for each (x,y) location\n for b in self.buildings:\n coords = b.coords()\n # \"un-classified\" buildings will cause an error during evaluation, so don't train\n # for them\n color = b.color()# if b.color() != 5 else 1\n if len(coords) > 0:\n # Set the pixels at coordinates in this class' channel to 1\n cv2.fillPoly(chans[color], np.array([coords]), 1)\n # Zero out the background pixels for the same coordinates\n cv2.fillPoly(chans[0], np.array([coords]), 0)\n img = np.dstack(chans)\n return img",
"def green_channel(image):\n new_image = copy(image)\n \n for (x, y, (r, g, b)) in image:\n green_colour = create_color(0,g,0)\n set_color(new_image, x, y, green_colour)\n \n return new_image",
"def addGrayLayer(image):\n gray = cv2.cvtColor(image, cv2.CV_HLS2GRAY)\n return np.concatenate((image, gray.reshape((image.shape[0], image.shape[1], 1))), axis=2)",
"def extract_green(image):\n # Return green channel, all rows, columns\n return np.copy(image[:, :, 1])",
"def __generate_masked_image(self) -> numpy.ndarray:\n modified_image = self.__image.copy()\n\n for channel_index in range(modified_image.shape[2]):\n channel = modified_image[:, :, channel_index]\n channel[self.__mask == 255] = numpy.nan\n modified_image[:, :, channel_index] = channel\n\n return modified_image",
"def getGrayscaleBits(self) -> retval:\n ...",
"def getEmpty(self, channels = 3):\n\n\n bitmap = cv.CreateImage(self.size(), cv.IPL_DEPTH_8U, channels)\n cv.SetZero(bitmap)\n return bitmap",
"def convert(self):\r\n\t\tself.image.convert_alpha()",
"def color_channel_combined(image):\n s_binary = hls_select(image,(120,255))\n b_binary = LAB_select(image, (155,200))\n l_binary = LUV_select(image,(195,255))\n color_combined_output = np.zeros_like(s_binary)\n color_combined_output[(s_binary ==1) |(b_binary ==1 )|(l_binary ==1 ) ] = 1\n return color_combined_output",
"def apply_noise_single_im(im, eps, grads):\n sign = grads.sign()\n noised_image = im + (eps * sign)\n noised_image = torch.clamp(noised_image, 0, 1)\n return noised_image",
"def fancyConvert(image):",
"def encode_image(image):\n\timage.encoded_channels = []\n\tfor i in range(3):\n\t\timage.encoded_channels.append([])\n\tfor pixel in image:\n\t\tx, y, (r, g, b) = pixel\n\t\timage.encoded_channels[0].append(chr(r))\n\t\timage.encoded_channels[1].append(chr(g))\n\t\timage.encoded_channels[2].append(chr(b))\n\t\tr = 0\n\t\tg = 0\n\t\tb = 0\n\t\tcol = create_color(r, g, b)\n\t\tset_color(image, x, y, col)",
"def convert_1_to_3_channels(image):\n stacked_img = np.stack((image,)*3, axis=-1)\n return stacked_img",
"def add_alpha(image):\n new_image = image.copy()\n mask = PILImage.new(GRAYSCALE_MODE, new_image.size, color=255)\n width, height = new_image.size\n ImageDraw.Draw(mask).rectangle(\n (width / 2.0, height / 2.0, width, height),\n fill=0,\n )\n new_image.putalpha(mask)\n return new_image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unwraps an image produced by wrap by filling each channel with the replacement values wherever the wrapper channel is zero.
|
def _unwrap(image, replace):
image, alpha = image[:,:3,...], image[:,3:4,...]
b, c, h, w = image.shape
replace = replace.to(image.dtype)
replace = replace.to(image.device)
if replace.dim() == 2:
replace = replace.view(-1,c,1,1)
else:
replace = replace.view(-1,1,1,1)
return torch.where(alpha == 1, image, replace)
|
[
"def _wrap_image(self, im, border=7):\n # We should throw an exception if the image is smaller than 'border', since at this point\n # this process doesn't make sense.\n if im.bounds.xmax - im.bounds.xmin < border:\n raise RuntimeError(\"Periodic wrapping does not work with images this small!\")\n expanded_bounds = galsim.BoundsI(im.bounds.xmin-border, im.bounds.xmax+border,\n im.bounds.ymin-border, im.bounds.xmax+border)\n # Make new image with those bounds.\n im_new = galsim.ImageD(expanded_bounds)\n # Make the central subarray equal to what we want.\n im_new[im.bounds] = galsim.Image(im)\n # Set the empty bits around the center properly. There are four strips around the edge, and\n # 4 corner squares that need to be filled in. Surely there must be a smarter python-y way\n # of doing this, but I'm not clever enough to figure it out. This is basically the grossest\n # code I've ever written, but it works properly. Anyone who wants is welcome to fix it.\n #\n # Mike suggested a way to optimize it slightly, if we find that speed is an issue later on:\n # We can make just 4 copies, corresponding to\n # * Strip along left side.\n # * Upper left and strip along top can be done together.\n # * Lower left and strip along bottom can be done together.\n # * Upper right, strip along right, and lower right can be done together.\n # The code will also be a bit neater this way.\n #\n ## Strip along left-hand side\n b1 = border-1\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n im.bounds.ymin, im.bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1,im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymax)])\n ## Strip along right-hand side\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n im.bounds.ymin, im.bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymin, im.bounds.ymax)])\n ## Strip along the bottom\n im_new[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymax-b1, im.bounds.ymax)])\n ## Strip along the top\n im_new[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Lower-left corner\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1, im.bounds.xmax,\n im.bounds.ymax-b1, im.bounds.ymax)])\n ## Upper-right corner\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Upper-left corner\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1, im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Lower-right corner\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymax-b1, im.bounds.ymax)])\n return im_new",
"def test_wrap():\n # Start with a fairly simple test where the image is 4 copies of the same data:\n im_orig = galsim.Image([[ 11., 12., 13., 14., 11., 12., 13., 14. ],\n [ 21., 22., 23., 24., 21., 22., 23., 24. ],\n [ 31., 32., 33., 34., 31., 32., 33., 34. ],\n [ 41., 42., 43., 44., 41., 42., 43., 44. ],\n [ 11., 12., 13., 14., 11., 12., 13., 14. ],\n [ 21., 22., 23., 24., 21., 22., 23., 24. ],\n [ 31., 32., 33., 34., 31., 32., 33., 34. ],\n [ 41., 42., 43., 44., 41., 42., 43., 44. ]])\n im = im_orig.copy()\n b = galsim.BoundsI(1,4,1,4)\n im_quad = im_orig[b]\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, 4.*im_quad.array, 12,\n \"image.wrap() into first quadrant did not match expectation\")\n\n # The same thing should work no matter where the lower left corner is:\n for xmin, ymin in ( (1,5), (5,1), (5,5), (2,3), (4,1) ):\n b = galsim.BoundsI(xmin, xmin+3, ymin, ymin+3)\n im_quad = im_orig[b]\n im = im_orig.copy()\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, 4.*im_quad.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n im[b].fill(0)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return a view of the original\")\n\n # Now test where the subimage is not a simple fraction of the original, and all the\n # sizes are different.\n im = galsim.ImageD(17, 23, xmin=0, ymin=0)\n b = galsim.BoundsI(7,9,11,18)\n im_test = galsim.ImageD(b, init_value=0)\n for i in range(17):\n for j in range(23):\n val = np.exp(i/7.3) + (j/12.9)**3 # Something randomly complicated...\n im[i,j] = val\n # Find the location in the sub-image for this point.\n ii = (i-b.xmin) % (b.xmax-b.xmin+1) + b.xmin\n jj = (j-b.ymin) % (b.ymax-b.ymin+1) + b.ymin\n im_test.addValue(ii,jj,val)\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, im_test.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im_wrap.bounds, b,\n \"image.wrap(%s) does not have the correct bounds\")\n\n # For complex images (in particular k-space images), we often want the image to be implicitly\n # Hermitian, so we only need to keep around half of it.\n M = 38\n N = 25\n K = 8\n L = 5\n im = galsim.ImageCD(2*M+1, 2*N+1, xmin=-M, ymin=-N) # Explicitly Hermitian\n im2 = galsim.ImageCD(2*M+1, N+1, xmin=-M, ymin=0) # Implicitly Hermitian across y axis\n im3 = galsim.ImageCD(M+1, 2*N+1, xmin=0, ymin=-N) # Implicitly Hermitian across x axis\n #print('im = ',im)\n #print('im2 = ',im2)\n #print('im3 = ',im3)\n b = galsim.BoundsI(-K+1,K,-L+1,L)\n b2 = galsim.BoundsI(-K+1,K,0,L)\n b3 = galsim.BoundsI(0,K,-L+1,L)\n im_test = galsim.ImageCD(b, init_value=0)\n for i in range(-M,M+1):\n for j in range(-N,N+1):\n # An arbitrary, complicated Hermitian function.\n val = np.exp((i/(2.3*M))**2 + 1j*(2.8*i-1.3*j)) + ((2 + 3j*j)/(1.9*N))**3\n #val = 2*(i-j)**2 + 3j*(i+j)\n\n im[i,j] = val\n if j >= 0:\n im2[i,j] = val\n if i >= 0:\n im3[i,j] = val\n\n ii = (i-b.xmin) % (b.xmax-b.xmin+1) + b.xmin\n jj = (j-b.ymin) % (b.ymax-b.ymin+1) + b.ymin\n im_test.addValue(ii,jj,val)\n #print(\"im = \",im.array)\n\n # Confirm that the image is Hermitian.\n for i in range(-M,M+1):\n for j in range(-N,N+1):\n assert im(i,j) == im(-i,-j).conjugate()\n\n im_wrap = im.wrap(b)\n #print(\"im_wrap = \",im_wrap.array)\n np.testing.assert_almost_equal(im_wrap.array, im_test.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im_wrap.bounds, b,\n \"image.wrap(%s) does not have the correct bounds\")\n\n im2_wrap = im2.wrap(b2, hermitian='y')\n #print('im_test = ',im_test[b2].array)\n #print('im2_wrap = ',im2_wrap.array)\n #print('diff = ',im2_wrap.array-im_test[b2].array)\n np.testing.assert_almost_equal(im2_wrap.array, im_test[b2].array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im2_wrap.array, im2[b2].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im2_wrap.bounds, b2,\n \"image.wrap(%s) does not have the correct bounds\")\n\n im3_wrap = im3.wrap(b3, hermitian='x')\n #print('im_test = ',im_test[b3].array)\n #print('im3_wrap = ',im3_wrap.array)\n #print('diff = ',im3_wrap.array-im_test[b3].array)\n np.testing.assert_almost_equal(im3_wrap.array, im_test[b3].array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im3_wrap.array, im3[b3].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im3_wrap.bounds, b3,\n \"image.wrap(%s) does not have the correct bounds\")\n\n b = galsim.BoundsI(-K+1,K,-L+1,L)\n b2 = galsim.BoundsI(-K+1,K,0,L)\n b3 = galsim.BoundsI(0,K,-L+1,L)\n assert_raises(TypeError, im.wrap, bounds=None)\n assert_raises(ValueError, im3.wrap, b, hermitian='x')\n assert_raises(ValueError, im3.wrap, b2, hermitian='x')\n assert_raises(ValueError, im.wrap, b3, hermitian='x')\n assert_raises(ValueError, im2.wrap, b, hermitian='y')\n assert_raises(ValueError, im2.wrap, b3, hermitian='y')\n assert_raises(ValueError, im.wrap, b2, hermitian='y')\n assert_raises(ValueError, im.wrap, b, hermitian='invalid')\n assert_raises(ValueError, im2.wrap, b2, hermitian='invalid')\n assert_raises(ValueError, im3.wrap, b3, hermitian='invalid')",
"def _wrap(image):\n ones = torch.ones_like(image[:,:1,:,:])\n return torch.cat([image, ones], 1)",
"def wipealldata(self):\n\n\t\t\"\"\" Goes through all the pixels and makes the LSB 0. It runs through 2 for loops, and just does the changes accordingly using the same methods for encrypting and decrypting\"\"\"\n\t\timgarr = self.img.size\n\t\tgrysc = 0\n\t\tif(type(self.pic[0,0]) is int):\n\t\t\tgrysc = 1\n\t\tfor x in range(0, imgarr[0]):\n\t\t\tfor y in range(0,imgarr[1]):\n\t\t\t\tif(grysc == 1):\n\t\t\t\t\tnewrgb = []\n\t\t\t\t\tdig = []\n\t\t\t\t\tdig.append(self.pic[x,y])\n\t\t\t\t\tnewrgb.append(self.pic[x,y])\n\t\t\t\telse:\n\t\t\t\t\tnewrgb = list(self.pic[x, y])\n\t\t\t\t\tdig = self.pic[x, y]\n\t\t\t\tfor z in range(0, len(newrgb)):\n\t\t\t\t\tif((dig[z] % 2) == 1):\n\t\t\t\t\t\tnewrgb[z] -= 1\n\t\t\t\tif(grysc == 1):\n\t\t\t\t\tself.pic[x,y] = newrgb[0]\n\t\t\t\t\tdel dig\n\t\t\t\t\tdel newrgb\n\t\t\t\telse:\n\t\t\t\t\tself.pic[x,y] = tuple(newrgb)\n\t\t\t\t\tdel dig\n\t\t\t\t\tdel newrgb\n\t\tself.img.save(self.imgname)",
"def reconstructAll(img,h,step) :\n \n allPatch=getAllPatch(img,h,step)\n\n while len(allPatch[\"noisyPatch\"])>0 :\n \n patchTarget=list(allPatch[\"noisyPatch\"].items())[0]\n _,newPatch = approximePatch(patchTarget,allPatch[\"goodPatch\"])\n img = newPatchInImage(img,newPatch)\n del allPatch[\"noisyPatch\"][newPatch[0]]\n return img",
"def __generate_masked_image(self) -> numpy.ndarray:\n modified_image = self.__image.copy()\n\n for channel_index in range(modified_image.shape[2]):\n channel = modified_image[:, :, channel_index]\n channel[self.__mask == 255] = numpy.nan\n modified_image[:, :, channel_index] = channel\n\n return modified_image",
"def invert(self):\n if self._pixels is None:\n self._pixels = [[3]*TILESIZE for _ in range(TILESIZE)]\n else:\n self._pixels = [ [ (3-val) for val in row] for row in self._pixels ]",
"def replace_fast(self, img, dst_clr):\n img[535:750, :290, :] = dst_clr #h(y) w(x) c\n img[575:705, 900:, :] = dst_clr\n return img",
"def replace(self, img, dst_clr):\n for i in range(80, 340): #x1 x2\n for j in range(500, 800): #y1 y2\n img[j][i] = dst_clr\n return img",
"def remove_channel(src: MyImage, red: bool = False, green: bool = False, blue: bool = False) -> MyImage:\r\n\r\n x, y = src.size\r\n\r\n copy = MyImage((x, y), src.pointer)\r\n\r\n for i in range(x):\r\n for j in range(y): # iterating over every pixel, and setting the bool values to zero\r\n\r\n pix = src.get(j, i)\r\n r, g, b = pix\r\n\r\n if red:\r\n r = 0\r\n\r\n if blue:\r\n b = 0\r\n\r\n if green:\r\n g = 0\r\n if not green and not blue and not red:\r\n r = 0\r\n pix = (r, g, b)\r\n copy.set(j, i, pix)\r\n\r\n return copy",
"def zero_fill_2d(dst):\n for i in range(dst.shape[0]):\n for j in range(dst.shape[1]):\n dst[i, j] = 0",
"def shrinkwrap(self):\n \n before = np.sum(self.support)\n \n if self.shrinkwrap_sigma > 0.0:\n im = ndimage.filters.gaussian_filter(self.image.real, \n self.shrinkwrap_sigma,\n mode='constant')\n if self.shrinkwrap_sigma > 1.5:\n self.shrinkwrap_sigma *= 0.99\n else:\n im = self.image.real\n \n t = self.shrinkwrap_threshold * np.max(im)\n self.support = (im > t) * self.support\n \n if self.inflate > 0:\n self.support = ndimage.morphology.binary_dilation(self.support, \n iterations=self.inflate)\n \n \n if self.fill:\n self.support = ndimage.binary_fill_holes(self.support)\n \n after = np.sum(self.support)\n print \"shrinkwrap'd support %d --> %d voxels\" % (before, after)\n \n return",
"def fill_holes(image):\n rec = binary_fill_holes(image)\n return rec",
"def nifti_capable(wrapped):\n @functools.wraps(wrapped)\n def wrapper(data, *args, **kwargs):\n if isinstance(data, nib.Nifti1Image):\n return nib.Nifti1Image(wrapper(np.copy(np.asanyarray(data.dataobj)), *args, **kwargs), data.affine)\n return wrapped(data, *args, **kwargs)\n\n return wrapper",
"def fill_holes_per_blob(self, labeled_mask):\n image_cleaned = np.zeros_like(labeled_mask)\n for i in range(1, labeled_mask.max() + 1):\n mask = np.where(labeled_mask == i, 1, 0)\n mask = ndi.binary_fill_holes(mask)\n image_cleaned = image_cleaned + mask * i\n return image_cleaned",
"def nested_encrypt(): # Closure\n nonlocal x, y\n for jpg in png_change(new_image.getdata(), message):\n\n new_image.putpixel((x, y), jpg)\n if x == width - 1:\n x = 0\n y += 1\n else:\n x += 1",
"def imstep(width, height): \n img = np.zeros( (height, width, 1), dtype=np.float32 )\n img[:, -width//2:] = 1\n return vipy.image.Image(array=img)",
"def test_Image_inplace_divide():\n for i in range(ntypes):\n decimal = 4 if types[i] == np.complex64 else 12\n # First try using the dictionary-type Image init\n image1 = galsim.Image((2 * (ref_array + 1)**2).astype(types[i]))\n image2 = galsim.Image((ref_array + 1).astype(types[i]))\n image1 /= image2\n np.testing.assert_almost_equal((2 * (ref_array + 1)).astype(types[i]), image1.array,\n decimal=decimal,\n err_msg=\"Inplace divide in Image class (dictionary call) does\"\n +\" not match reference for dtype = \"+str(types[i]))\n\n # Check a calculation where we should expect some rounding errors.\n image3 = galsim.Image((2 * (ref_array + 1)**2).astype(types[i]))\n image3 /= (image2 / 17) * 17\n np.testing.assert_allclose(image3.array, image1.array)\n\n # Then try using the eval command to mimic use via ImageD, ImageF etc.\n image_init_func = eval(\"galsim.Image\"+tchar[i])\n slice_array = (2*(large_array+1)**2).astype(types[i])[::3,::2]\n image1 = image_init_func(slice_array)\n image2 = image_init_func((ref_array + 1).astype(types[i]))\n image1 /= image2\n np.testing.assert_almost_equal((2 * (ref_array + 1)).astype(types[i]), image1.array,\n decimal=decimal,\n err_msg=\"Inplace divide in Image class does not match reference for dtype = \"\n +str(types[i]))\n\n # Test image.invertSelf()\n # Intentionally make some elements zero, so we test that 1/0 -> 0.\n image1 = galsim.Image((ref_array // 11 - 3).astype(types[i]))\n image2 = image1.copy()\n mask1 = image1.array == 0\n mask2 = image1.array != 0\n image2.invertSelf()\n np.testing.assert_array_equal(image2.array[mask1], 0,\n err_msg=\"invertSelf did not do 1/0 -> 0.\")\n np.testing.assert_array_equal(image2.array[mask2],\n (1./image1.array[mask2]).astype(types[i]),\n err_msg=\"invertSelf gave wrong answer for non-zero elements\")\n\n for j in range(i): # Only divide simpler types into this one.\n decimal = 4 if (types[i] == np.complex64 or types[j] == np.complex64) else 12\n image2_init_func = eval(\"galsim.Image\"+tchar[j])\n slice_array = (2*(large_array+1)**2).astype(types[i])[::3,::2]\n image1 = image_init_func(slice_array)\n image2 = image2_init_func((ref_array+1).astype(types[j]))\n image1 /= image2\n np.testing.assert_almost_equal((2 * (ref_array+1)).astype(types[i]), image1.array,\n decimal=decimal,\n err_msg=\"Inplace divide in Image class does not match reference for dtypes = \"\n +str(types[i])+\" and \"+str(types[j]))\n\n with assert_raises(ValueError):\n image1 /= image1.subImage(galsim.BoundsI(0,4,0,4))",
"def pull_out_L_channel(img_lab):\n img_l = img_lab[:, :, 0]\n return img_l"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a constant to all values below the threshold. The constant should be between 128 and 128.
|
def solarize_add(image, constant, threshold=128):
added_image = image.to(torch.int64) + constant
added_image = torch.clamp(added_image, 0, 255)
return torch.where(image >= threshold, image, added_image)
|
[
"def thresholding(self, thval=130):\n self.thval = thval\n self.temp_img[self.temp_img < thval] = thval",
"def setWhiteThreshold(self, value) -> None:\n ...",
"def assign_thresholds(self):\n self.thresholds=np.hstack([2.0,np.flip(np.linspace(0.,1.,10000))])",
"def _apply_signed_threshold(value, min_thr=None, max_thr=None):\n\tsat_val = value\n\t\n\tif min_thr is not None:\n\t\tif sat_val < 0:\n\t\t\tsat_val = max(sat_val, -min_thr)\n\t\telse:\n\t\t\tsat_val = min(sat_val, min_thr)\n\tif max_thr is not None:\n\t\tif sat_val < 0:\n\t\t\tsat_val = max(sat_val, -max_thr)\n\t\telse:\n\t\t\tsat_val = min(sat_val, max_thr)\n\t\t\t\n\treturn sat_val",
"def _threshold_scalar(self, bound, threshold, val):\n if bound == \"lb\":\n if val > threshold:\n return val\n else:\n return np.nan\n elif bound == \"ub\":\n if val < threshold:\n return val\n else:\n return np.nan\n else:\n raise RuntimeError(\"scalar_bounds dictionary \"\n + \" formatted incorrectly. See\"\n + \" the default for reference\")",
"def brighten(val, minval):\n return minval + (255 - minval) * val // 255",
"def setThreshold(self, epsilon):\n self.epsilon = epsilon",
"def add_constant(self):\n self.constant = Constant(value=self.oseries.mean(), name='constant')\n self.parameters = self.get_init_parameters()\n self.nparam += self.constant.nparam",
"def filter_low_values(values, threshold=1.e-11):\n return np.array([v if v > threshold else 0 for v in values])",
"def hard_thresh(signal, threshold):\n res = signal*(np.abs(signal)>=threshold)\n return res",
"def threshold_gte(self, threshold_gte):\n\n self._threshold_gte = threshold_gte",
"def adaptiveThreshold(\n src, maxValue, adaptiveMethod, thresholdType, blockSize, C, dst=...\n) -> dst:\n ...",
"def threshold(self, value: Union[float, Tuple[float, float, float, float]]): # noqa\n\n def limit(voltage: float):\n \"\"\"Applies hard cap to voltage values.\"\"\"\n return min(2.047, max(-1.024, voltage))\n\n # Broadcast single values into a 4-tuple\n avalue = np.asarray(value, dtype=float)\n if avalue.ndim == 0:\n avalue = np.resize(avalue, 4)\n\n # Check for length of tuple\n if avalue.size != 4:\n raise ValueError(\"Only arrays of size 4 is allowed.\")\n\n # Convert voltages into DAC values\n value_dac = tuple(\n TimestampTDC2._threshold_volt2dac(limit(float(v))) for v in avalue\n )\n\n # Set threshold voltages\n # Result from tuple comprehension is Tuple[Any, ...], which yields type mismatch\n # Alternative to 'ignore' is to cast type:\n # cast(Tuple[int, int, int, int], value_dac)\n self._threshold_dacs = value_dac # type: ignore\n return",
"def lighten(self, threshold, replace=255):\n it = np.nditer(self.array, flags=['multi_index'], op_flags=['writeonly'])\n while not it.finished:\n if it[0] >= threshold:\n it[0] = replace\n it.iternext()",
"def HardThresholding(data, thresh):\r\n thresh_data = np.copy(data)\r\n thresh_data[thresh_data < thresh] = 0.\r\n return thresh_data",
"def setSaturatedThreshold(self, lower, upper) -> None:\n ...",
"def _soft_threshold(x, threshold, positive=False):\n if positive:\n u = np.clip(x, 0, None)\n else:\n u = np.abs(x)\n u = u - threshold\n u[u < 0] = 0\n return u * np.sign(x)",
"def adjust_pixel(x):\n if x <= 200:\n x = max(0, x - 32)\n return min(x, 128)\n else:\n return 255",
"def fix_constants(self, fuzz=1e-5, prec=1):\n @np.vectorize\n def is_const(x):\n return abs(x - round(x, prec)) < fuzz\n\n def set_const(M, Mconst):\n M_mask = is_const(M)\n Mconst[M_mask] = True\n Mconst[~M_mask] = False\n\n set_const(self.A, self.Aconst)\n set_const(self.B, self.Bconst)\n if self.C.size != 0:\n set_const(self.C, self.Cconst)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Implements autocontrast from PIL using torch ops.
|
def auto_contrast(image, cutoff, grayscale=True):
w, h = image.shape[-2:]
if grayscale:
reference = VF.rgb_to_grayscale(image)
else:
reference = image
hist = uint8_histc(reference)
hist = hist.cumsum(-1)
hist = hist / hist[...,-1:]
if cutoff:
lo = (hist <= cutoff).sum(-1)
hi = 256.0 - (hist >= 1 - cutoff).sum(-1)
else:
lo = (hist == 0).sum(-1)
hi = 256.0 - (hist == 1).sum(-1)
lo = lo[:,:,None,None]
hi = hi[:,:,None,None]
scale = 255.0 / (hi - lo)
offset = - lo * scale
scale = scale.expand(-1,-1,w,h)
offset = offset.expand(-1,-1,w,h)
scaled = image * scale + offset
scaled = torch.clamp(scaled, 0.0, 255.0)
return image.masked_scatter(hi > lo, scaled)
|
[
"def adjust_contrast(image):\n\n # 0.5 <= alpha <= 2.0\n # These values found empirically\n alpha = 0.5 + 1.5 * random.random()\n image = cv2.convertScaleAbs(image, alpha=alpha, beta=0)\n\n return image",
"def predict_single():\n path = 'outputs/gray/img-8-epoch-29.jpg'\n img = Image.open(path)\n img = img.resize((224,224))\n img_original = np.array(img)\n\n gray = rgb2gray(img_original)\n x = TF.to_tensor(gray).float()\n x.unsqueeze_(0)\n model = ColorizationUpsampling()\n model.load_state_dict(torch.load('checkpoints/model-epoch-22-losses-0.002910.pth',\n map_location=torch.device('cpu')))\n\n output = model(x)\n\n output = output.detach()\n color_image = torch.cat((x[0], output[0]), 0).numpy()\n color_image = color_image.transpose((1, 2, 0)) # rescale for matplotlib\n color_image[:, :, 0:1] = color_image[:, :, 0:1] * 100\n color_image[:, :, 1:3] = color_image[:, :, 1:3] * 255 - 128\n color_image = lab2rgb(color_image.astype(np.float16))\n\n color_image_bgr = color_image.astype(np.float32)\n color_image_bgr = cv2.cvtColor(color_image_bgr, cv2.COLOR_RGB2BGR)\n color_image_bgr = cv2.resize(color_image_bgr, (380, 240))\n\n normalized_array = (color_image_bgr - np.min(color_image_bgr)) / (\n np.max(color_image_bgr) - np.min(color_image_bgr)) # this set the range from 0 till 1\n color_image_bgr = (normalized_array * 255).astype(np.uint8)\n gray = cv2.resize(gray, (380, 240))\n gray = np.stack((gray,) * 3, axis=-1)\n\n gray = (gray - np.min(gray)) / (\n np.max(gray) - np.min(gray)) # this set the range from 0 till 1\n gray = (gray * 255).astype(np.uint8)\n vis = np.concatenate((gray, color_image_bgr), axis=1)\n\n frame_normed = np.array(vis, np.uint8)\n\n cv2.imwrite(path[:-4]+\"out.jpg\", frame_normed)\n cv2.imshow(\"out\", frame_normed)\n cv2.waitKey(0)\n cv2.destroyAllWindows()",
"def contrast_jitter(var, images):\n alpha = 1.0 + np.random.uniform(-var, var)\n\n img_gray = grayscale(images)\n #img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)\n img_gray[:] = np.mean(img_gray, axis=(1, 2, 3),dtype=np.float32, keepdims=True)\n images = blend(images, img_gray, alpha)\n return images",
"def contrast(cfg):\n # Set up environment.\n du.init_distributed_training(cfg)\n if cfg.RNG_SEED != -1:\n random.seed(cfg.RNG_SEED)\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n torch.cuda.manual_seed(cfg.RNG_SEED)\n torch.cuda.manual_seed_all(cfg.RNG_SEED)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n torch.backends.cudnn.benchmark = True\n\n # Setup logging format.\n timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')\n logging.setup_logging(os.path.join(cfg.LOG_DIR, f\"log-{timestamp}.txt\"))\n\n # Print config.\n logger.info(\"Contrastive Task with config:\")\n logger.info(pprint.pformat(cfg))\n\n # Audio-visual model for pretraining.\n model = build_model(cfg)\n optimizer = optim.construct_optimizer(model, cfg)\n\n # Create the audio-visual pretrain loader.\n pretrain_loader = loader.construct_loader(cfg, 'pretrain')\n\n num_batches_per_epoch = len(pretrain_loader)\n # Priority : MAX_EPOCH > NUM_STEPS\n # Priority : WARMUP_STEPS > WARMUP_EPOCHS > WARMUP_PROPORTION\n if cfg.SOLVER.MAX_EPOCH != -1:\n num_optimizer_epochs = cfg.SOLVER.MAX_EPOCH\n num_optimizer_steps = (\n num_optimizer_epochs * num_batches_per_epoch\n )\n if cfg.SOLVER.WARMUP_STEPS != -1:\n num_warmup_steps = cfg.SOLVER.WARMUP_STEPS\n num_warmup_epochs = num_warmup_steps / num_batches_per_epoch\n elif cfg.SOLVER.WARMUP_EPOCHS != -1:\n num_warmup_epochs = cfg.SOLVER.WARMUP_EPOCHS\n num_warmup_steps = (\n num_warmup_epochs * num_batches_per_epoch\n )\n else:\n num_warmup_steps = (\n num_optimizer_steps * cfg.SOLVER.WARMUP_PROPORTION\n )\n num_warmup_epochs = num_warmup_steps / num_batches_per_epoch\n num_epochs = cfg.SOLVER.MAX_EPOCH\n num_steps = num_epochs * num_batches_per_epoch\n else:\n num_optimizer_steps = cfg.SOLVER.NUM_STEPS\n num_optimizer_epochs = num_optimizer_steps / num_batches_per_epoch\n if cfg.SOLVER.WARMUP_STEPS != -1:\n num_warmup_steps = cfg.SOLVER.WARMUP_STEPS\n num_warmup_epochs = num_warmup_steps / num_batches_per_epoch\n elif cfg.SOLVER.WARMUP_EPOCHS != -1:\n num_warmup_epochs = cfg.SOLVER.WARMUP_EPOCHS\n num_warmup_steps = (\n num_warmup_epochs * num_batches_per_epoch\n )\n else:\n num_warmup_steps = (\n num_optimizer_steps * cfg.SOLVER.WARMUP_PROPORTION\n )\n num_warmup_epochs = num_warmup_steps / num_batches_per_epoch\n num_steps = cfg.SOLVER.NUM_STEPS\n num_epochs = math.ceil(num_steps / num_batches_per_epoch)\n\n start_epoch = 0\n global_step = 0\n\n if cfg.PRETRAIN.PREEMPTIBLE:\n pretrain_checkpoint_file_path = os.path.join(\n cfg.SAVE_DIR,\n \"epoch_latest.pyth\",\n )\n else:\n pretrain_checkpoint_file_path = cfg.PRETRAIN.CHECKPOINT_FILE_PATH\n\n if os.path.isfile(pretrain_checkpoint_file_path) and 'epoch' in pretrain_checkpoint_file_path:\n logger.info(\n \"=> loading checkpoint '{}'\".format(\n pretrain_checkpoint_file_path\n )\n )\n # Load the checkpoint on CPU to avoid GPU mem spike.\n checkpoint = torch.load(\n pretrain_checkpoint_file_path, map_location='cpu'\n )\n cu.load_checkpoint(\n model,\n checkpoint['state_dict'],\n cfg.NUM_GPUS > 1,\n )\n start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n global_step = checkpoint['epoch'] * len(pretrain_loader)\n logger.info(\n \"=> loaded checkpoint '{}'\".format(\n pretrain_checkpoint_file_path,\n )\n )\n\n writer = None\n if du.is_master_proc():\n writer = SummaryWriter(cfg.LOG_DIR)\n\n # Create meters\n pretrain_meter = ContrastMeter(\n writer,\n len(pretrain_loader),\n num_epochs,\n num_steps,\n cfg,\n )\n\n # Perform the pretraining loop.\n logger.info(\"Start epoch: {}\".format(start_epoch+1))\n\n for cur_epoch in range(start_epoch, num_epochs):\n # Shuffle the dataset.\n loader.shuffle_dataset(pretrain_loader, cur_epoch)\n # Pretrain for one epoch.\n global_step = contrast_epoch(\n pretrain_loader,\n model,\n optimizer,\n pretrain_meter,\n cur_epoch,\n global_step,\n num_steps,\n num_optimizer_steps,\n num_warmup_steps,\n cfg,\n )\n\n sd = \\\n model.module.state_dict() if cfg.NUM_GPUS > 1 else \\\n model.state_dict()\n\n ckpt = {\n 'epoch': cur_epoch + 1,\n 'state_dict': sd,\n 'optimizer': optimizer.state_dict(),\n }\n\n if cfg.PRETRAIN.PREEMPTIBLE and du.get_rank() == 0:\n cu.save_checkpoint(\n ckpt, filename=os.path.join(cfg.SAVE_DIR, \"epoch_latest.pyth\")\n )\n\n if (cur_epoch + 1) % cfg.PRETRAIN.SAVE_EVERY_EPOCH == 0 and du.get_rank() == 0:\n cu.save_checkpoint(\n ckpt,\n filename=os.path.join(cfg.SAVE_DIR, f\"epoch{cur_epoch+1}.pyth\")\n )\n\n if global_step == num_steps:\n break",
"def adjust_contrast(input_image_path,\n output_image_path,\n factor=1.7):\n image = Image.open(input_image_path)\n enhancer_object = ImageEnhance.Contrast(image)\n out = enhancer_object.enhance(factor)\n out.save(output_image_path)",
"def increase_contrast(img, percent):\r\n return np.copy(img).astype(float)*(1.0+percent/100.0)\r\n pass",
"def adjustContrast(self):\n if self.image_label.pixmap() != None:\n self.contrast_adjusted = True",
"def applyNormalisation(image):\n #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n #image[:,:,3] = clahe.apply(image[:,:,3])\n return image / 255.",
"def adjust_contrast(img, contrast_factor):\n check_type(img)\n assert img.shape[-1] == 3, \"Image should have 3 channels, RGB\"\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n mean = int(gray.mean() + 0.5)\n degenerate = np.ones_like(img) * mean\n return _blend(degenerate, img, contrast_factor)",
"def contrast_tag(self):\r\n con=self.readinfo('Contrast')\r\n process=self.processed\r\n \r\n if process:\r\n trast=None\r\n else:\r\n if con==None:\r\n trast=None\r\n elif 'GADOVIST' in con:\r\n trast=1\r\n else:\r\n trast=0\r\n self.contrast=trast\r\n return",
"def inc_contrast(self,*args ,mask = None, plot = True):\n\n array, band_names = self._call_channels(*args)\n if type(mask) == np.ndarray:\n mask = mask\n elif type(self._mask) == np.ndarray:\n mask = self._mask\n\n img_contrast = inc_contrast(array, mask = mask, plot=plot)#imd.inc_contrast(self._img[:,:,bands], mask = mask, plot=plot)\n return img_contrast",
"def convert(self):\r\n\t\tself.image.convert_alpha()",
"def predict_own(image: Any) -> Any:\n model = load_own()\n original_dims = (image.shape[1], image.shape[0])\n L_orig = cv.split(cv.cvtColor(image, cv.COLOR_RGB2LAB))[0]\n input = prepare_image(image)\n prediction = predict_image(model, input)\n prediction = (255 * prediction).astype(\"uint8\")\n pred_resized = cv.resize(prediction, original_dims)\n _, a_pred, b_pred = cv.split(cv.cvtColor(pred_resized, cv.COLOR_RGB2LAB))\n final_lab = cv.merge((L_orig, a_pred, b_pred))\n final_rgb = cv.cvtColor(final_lab, cv.COLOR_LAB2RGB)\n return final_rgb",
"def fancyConvert(image):",
"def main():\n original_img = image.Image('pres_casey.gif')\n red_image = red_filter(original_img)\n win = image.ImageWin(original_img.getWidth(), original_img.getHeight())\n red_image.draw(win)\n\n grayscale_img = grayscale(original_img)\n grayscale_img.draw(win)\n\n cycle_colors_img = cycle_colors(original_img)\n cycle_colors_img.draw(win)\n\n negative_img = negative(original_img)\n negative_img.draw(win)\n\n brightness_img = brightness(original_img, 90)\n brightness_img.draw(win)\n\n increase_contrast_img = increase_contrast(original_img)\n increase_contrast_img.draw(win)\n\n vertical_flip_image = vertical_flip(original_img)\n vertical_flip_image.draw(win)\n\n posterize_image = posterize(original_img)\n posterize_image.draw(win)\n\n scroll_image = scroll(original_img, 10)\n scroll_image.draw(win)\n\n horizontal_mirror_image = horizontal_mirror(original_img)\n horizontal_mirror_image.draw(win)\n\n obamafy_image = obamafy(original_img)\n obamafy_image.draw(win)",
"def applyImageProcessing(self):\n if self.contrast_adjusted == True or self.brightness_adjusted == True:\n contrast = self.contrast_spinbox.value()\n brightness = self.brightness_spinbox.value()\n self.cv_image = cv2.convertScaleAbs(\n self.cv_image, self.processed_cv_image, contrast, brightness)\n if self.image_smoothing_checked == True:\n kernel = np.ones((5, 5), np.float32) / 25\n self.cv_image = cv2.filter2D(self.cv_image, -1, kernel)\n if self.edge_detection_checked == True:\n self.cv_image = cv2.Canny(self.cv_image, 100, 200)\n self.convertCVToQImage(self.cv_image)\n\n # Repaint the updated image on the label # ? Why this need update?\n self.image_label.repaint()",
"def infer_image(cfg, name, model, content_img, style_img, logger, output_dir, ch, cw, save_orig=False, alpha=1.0, mask_img=None, style_interp_weights=[]):\n if mask_img:\n # 1 content img, N style img, 1 mask img\n content_img, style_img = content_img.to(cfg.DEVICE), [each.to(cfg.DEVICE) for each in style_img]\n res_img = method_factory[cfg.MODEL.NAME](\n logger=logger,\n model=model, \n content_img=content_img, \n style_img=style_img, \n alpha=alpha,\n mask_img=mask_img,\n ch=ch,\n cw=cw\n )\n # save images \n if save_orig:\n save_image(content_img, os.path.join(output_dir, '{}_content.jpg'.format(name)), nrow=1)\n for i, each_style in enumerate(style_img):\n save_image(each_style, os.path.join(output_dir, '{}_style_{}.jpg'.format(name, i)), nrow=1)\n save_image(mask_img, os.path.join(output_dir, '{}_mask.jpg'.format(name)), nrow=1)\n if torch.is_tensor(res_img):\n save_image(res_img, os.path.join(output_dir, '{}_generated.jpg'.format(name)), nrow=1)\n else:\n res_img.save(os.path.join(output_dir, '{}_generated.jpg'.format(name)))\n elif style_interp_weights:\n content_img, style_img = content_img.to(cfg.DEVICE), [each.to(cfg.DEVICE) for each in style_img]\n res_img = method_factory[cfg.MODEL.NAME](\n logger=logger,\n model=model,\n content_img=content_img,\n style_img=style_img,\n alpha=alpha,\n style_interp_weights=style_interp_weights,\n ch=ch,\n cw=cw\n )\n # save images\n if save_orig:\n save_image(content_img, os.path.join(output_dir, '{}_content.jpg'.format(name)), nrow=1)\n for i, each_style in enumerate(style_img):\n save_image(each_style, os.path.join(output_dir, '{}_style_{}.jpg'.format(name, i)), nrow=1)\n if torch.is_tensor(res_img):\n save_image(res_img, os.path.join(output_dir, '{}_generated.jpg'.format(name)), nrow=1)\n else:\n res_img.save(os.path.join(output_dir, '{}_generated.jpg'.format(name)))\n else:\n content_img, style_img = content_img.to(cfg.DEVICE), style_img.to(cfg.DEVICE)\n res_img = method_factory[cfg.MODEL.NAME](\n logger=logger,\n model=model, \n content_img=content_img, \n style_img=style_img, \n alpha=alpha, \n ch=ch, \n cw=cw\n )\n # save images\n if save_orig:\n save_image(content_img, os.path.join(output_dir, '{}_content.jpg'.format(name)), nrow=1)\n save_image(style_img, os.path.join(output_dir, '{}_style.jpg'.format(name)), nrow=1)\n if torch.is_tensor(res_img):\n save_image(res_img, os.path.join(output_dir, '{}_generated.jpg'.format(name)), nrow=1)\n else:\n res_img.save(os.path.join(output_dir, '{}_generated.jpg'.format(name)))\n \n return res_img",
"def rescaled_image():",
"def transform(image, clicked_idx, min_max):\n image.undraw()\n\n #for switch rgb to work properly the function needs to iterate through the pixels\n if clicked_idx == 1:\n image = switch_rgb_channels(image)\n\n #iterates through all pixels going col by col from left to right\n for i in range(image.getWidth()):\n for j in range(image.getHeight()):\n\n rgb = image.getPixel(i,j)\n\n #invert colors\n if clicked_idx == 0:\n\n rgb = invert_pixel_color(rgb)\n\n #for switch rgb to work properly the function needs to iterate through the pixels\n\n #contrast change\n elif clicked_idx == 2:\n for g in range(3):\n rgb[g] = normalize(rgb[g], min_max[g][0] + 25, min_max[g][1] - 25 )\n\n #turn list to color object, set pixel\n rgb = color_rgb(rgb[0], rgb[1], rgb[2])\n image.setPixel(i, j, rgb)\n\n return image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Count the number of missing values in a vector
|
def count_missing(vec):
#print('hello')
null_vec = isna(vec)
return sum(null_vec)
|
[
"def null_count_alt(df):\n x = [test_df[col].isna().sum() for col in test_df.columns]\n y = 0\n for _ in x:\n y += _\n return y",
"def nonzero_count(my_list):\n counter = 0\n for value in my_list:\n if value != 0:\n counter += 1\n return counter",
"def n_non_zero(self) -> int:\n return len(self.non_zero_idx_pairs)",
"def num_empty(state):\n return sum([row.count(0) for row in state])",
"def _count_values_gt_zero(distribution):\n as_count = (\n methodcaller(\"N\")\n if isinstance(distribution, ConditionalFreqDist)\n else lambda count: count\n )\n # We explicitly check that values are > 0 to guard against negative counts.\n return sum(\n 1 for dist_or_count in distribution.values() if as_count(dist_or_count) > 0\n )",
"def num_empty(self) -> int:\n return np.count_nonzero(self.state == EMPTY)",
"def count_value(arr, axis=None, id_fcn = lambda x: ~np.isnan(x), **kwargs):\n return np.count_nonzero(id_fcn(arr),axis=axis,**kwargs)",
"def _missing_count(album):\n return (album.albumtotal or 0) - len(album.items())",
"def numNonZeroInRow(self, i):\n return len(self.A[i])",
"def count_unassigned():\n unassigned_terms = 0\n for k in Term.values.keys():\n if Term.values[k] is None: unassigned_terms += 1\n return unassigned_terms",
"def get_num_zeros(self):\n return self.get_num_values(0)",
"def getNumNaNs( self ):\n return self.numNaNs",
"def count_values(dic):\n values = dic.values()\n check = []\n count = 0\n for i in values:\n if i not in check:\n count += 1\n check.append(i)\n return count",
"def number_of_negatives(L):\n count = 0\n for l in L:\n if l < 0:\n count += 1\n \n return count",
"def countFloatEntries(col):\n floats = col[~np.isnan(col)]\n return len(floats)",
"def n_missing_abstract(data):\n n_missing = 0\n if data.abstract is None:\n return None, None\n if data.labels is None:\n n_missing_included = None\n else:\n n_missing_included = 0\n\n for i in range(len(data.abstract)):\n if len(data.abstract[i]) == 0:\n n_missing += 1\n if data.labels is not None and data.labels[i] == 1:\n n_missing_included += 1\n\n return n_missing, n_missing_included",
"def nanlen(array,axis):\n\n #find all Nan's\n temp = np.isnan(array)\n temp2 = temp.sum(axis=axis)\n\n #want to find opposite, so subtract from \"total\"\n total = np.shape(array)[axis]\n return total - temp2",
"def getNan(array):\n len = array.shape[1]\n res = np.zeros((array.shape[1], 1))\n for i in range(len):\n for j in range(array.shape[0]):\n if isNaN(array[j][i]):\n res[i][0] += 1\n\n return res",
"def find_missing_number(array):\n actual_sum = (len(array) + 1)*(len(array) + 2) / 2\n sum = 0\n for element in array:\n sum += element\n return actual_sum - sum"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds starting or ending index of date from targetDate if it exists in data loaded.
|
def find_date_index(self, targetDate: datetime.date, starting: bool = True) -> int:
if type(targetDate) == datetime:
targetDate = targetDate.date()
if starting:
iterator = list(enumerate(self.data))
else:
iterator = reversed(list(enumerate(self.data)))
for index, data in iterator:
if data['date_utc'].date() == targetDate:
return index
return -1
|
[
"def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))",
"def get_index(start_date, end_date):\n delta_days = (end_date - start_date).days + 1\n index = [\n (start_date + timedelta(day)).strftime(JSON_DATE_FORMAT)\n for day in xrange(delta_days)\n ]\n return index",
"def find_start_end(data: np.ndarray):\n n_pages = data.shape[0]\n n_days = data.shape[1]\n start_idx = np.full(n_pages, -1, dtype=np.int32)\n end_idx = np.full(n_pages, -1, dtype=np.int32)\n for page in range(n_pages):\n # scan from start to the end\n for day in range(n_days):\n if not np.isnan(data[page, day]) and data[page, day] > 0:\n start_idx[page] = day\n break\n # reverse scan, from end to start\n for day in range(n_days - 1, -1, -1):\n if not np.isnan(data[page, day]) and data[page, day] > 0:\n end_idx[page] = day\n break\n # todo start_idx= [0 2 0 ... 0 0 0], end_idx= [804 804 804 ... 804 804 804]\n # 返回的是每个网页浏览量非0的 起止日期\n # print(f\"start_idx=start_idxz{start_idx}, end_idx={end_idx}\")\n return start_idx, end_idx",
"def get_end_index(self, endDate: datetime.date) -> int:\n if endDate:\n endDateIndex = self.find_date_index(endDate, starting=False)\n if endDateIndex == -1:\n raise IndexError(\"Date not found.\")\n elif endDateIndex < 1:\n raise IndexError(\"You need at least one data period.\")\n elif endDateIndex <= self.startDateIndex:\n raise IndexError(\"Ending date index cannot be less than or equal to start date index.\")\n else:\n return endDateIndex\n else:\n return len(self.data) - 1",
"def get_time_index(target_time, time_records, nearest=False):\n record_length = len(time_records)\n pos = np.searchsorted(time_records, target_time, side='right')\n\n if nearest and 0 < pos < record_length:\n diff = abs(time_records[pos] - target_time)\n otherdiff = abs(time_records[pos - 1] - target_time)\n if diff > otherdiff:\n pos -= 1\n\n return min(pos, record_length - 1)",
"def __dateindex(self, mjd):\n idx = (np.abs(self.lookahead_mjds - mjd)).argmin()\n return idx",
"def _index_date(date, dates):\n if isinstance(date, string_types):\n date = date_parser(date)\n try:\n if hasattr(dates, 'indexMap'): # 0.7.x\n return dates.indexMap[date]\n else:\n date = dates.get_loc(date)\n try: # pandas 0.8.0 returns a boolean array\n len(date)\n return np.where(date)[0].item()\n except TypeError: # expected behavior\n return date\n except KeyError as err:\n freq = _infer_freq(dates)\n if freq is None:\n #TODO: try to intelligently roll forward onto a date in the\n # index. Waiting to drop pandas 0.7.x support so this is\n # cleaner to do.\n raise ValueError(\"There is no frequency for these dates and \"\n \"date %s is not in dates index. Try giving a \"\n \"date that is in the dates index or use \"\n \"an integer\" % date)\n\n # we can start prediction at the end of endog\n if _idx_from_dates(dates[-1], date, freq) == 1:\n return len(dates)\n\n raise ValueError(\"date %s not in date index. Try giving a \"\n \"date that is in the dates index or use an integer\"\n % date)",
"def get_start_index(self, startDate: datetime.date) -> int:\n if startDate:\n startDateIndex = self.find_date_index(startDate)\n if startDateIndex == -1:\n raise IndexError(\"Date not found.\")\n else:\n return startDateIndex\n else:\n return 0",
"def get_calendardate_index(start: pd.datetime, end: pd.datetime):\n calendardate_index = []\n m_d_list = [[3,31],[6,30],[9, 30],[12, 31]]\n month_of_first_filing = start.month\n for i, year in enumerate(range(start.year, end.year + 1)):\n if i == 0:\n index_of_first_filing_in_m_d_list = [3,6,9,12].index(month_of_first_filing)\n for month_day in m_d_list[index_of_first_filing_in_m_d_list:]:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n continue\n for month_day in m_d_list:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n\n # Need to drop dates after end\n for j, date in enumerate(calendardate_index):\n if date > end:\n del calendardate_index[j]\n\n return calendardate_index",
"def find_nearest(data, target, align='start', max_time_diff=None):\n\n assert(align in ['start','end'])\n\n if data is None or data.shape[0] == 0:\n return None\n\n if isinstance(target, pd.Timestamp):\n target = target.to_pydatetime()\n\n if align == 'start':\n diff = data.index.to_pydatetime() - target\n else:\n diff = pd.to_datetime(data['end']) - target\n\n diff = np.abs(diff)\n min_diff = diff.min()\n \n if max_time_diff is not None and min_diff > max_time_diff:\n return None\n else:\n return diff.argmin()",
"def find_date_ndx(self, date):\n\n d = date.toordinal()\n pos = np.nonzero(self.time == d)\n if not np.all(np.isnan(pos)):\n return int(pos[0])\n else:\n return None",
"def match_start_date(self, start, end, match):\n pass",
"def find_date_idx(date, data):\n if type(date) == int:\n d_time = dt.datetime(date, 1, 1)\n elif type(date) == tuple:\n d_time = dt.datetime(date[0], date[1], date[2])\n else:\n raise ValueError('Date wrong format, please check input')\n times = data.variables['time']\n return nC.date2index(d_time, times, select='nearest')",
"def match_indexes(self, dts):\n return np.where(self.matches(dts))[0]",
"def first_and_last_position(self, arr, target):\n start = 0\n end = len(arr) - 1\n res = []\n while start <= end:\n mid = (start + end) // 2\n if arr[mid] == target:\n return [self.findLeft(arr, 0, mid, target), self.findRight(arr, mid, len(arr) - 1, target)]\n else:\n if target < arr[mid]:\n end = mid - 1\n else:\n start = mid + 1\n return [-1, -1]",
"def _get_index(self, source, target):\r\n return [source.index(u) for u in target]",
"def index(self, x, start = 0, end=None):",
"def _idx_from_dates(d1, d2, freq):\n from pandas import DatetimeIndex\n return len(DatetimeIndex(start=d1, end=d2,\n freq = _freq_to_pandas[freq])) - 1",
"def offset_range(index_key, start_time, end_time):\n start, end = 0, COLUMN_HEIGHT\n tbase = index_key.get_tbase()\n if tbase == base_time(start_time): start = offset_time(start_time)\n if tbase == base_time(end_time): end = offset_time(end_time)\n return start, end"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns index of start date based on startDate argument.
|
def get_start_index(self, startDate: datetime.date) -> int:
if startDate:
startDateIndex = self.find_date_index(startDate)
if startDateIndex == -1:
raise IndexError("Date not found.")
else:
return startDateIndex
else:
return 0
|
[
"def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))",
"def find_date_index(self, targetDate: datetime.date, starting: bool = True) -> int:\n if type(targetDate) == datetime:\n targetDate = targetDate.date()\n\n if starting:\n iterator = list(enumerate(self.data))\n else:\n iterator = reversed(list(enumerate(self.data)))\n\n for index, data in iterator:\n if data['date_utc'].date() == targetDate:\n return index\n return -1",
"def get_index(start_date, end_date):\n delta_days = (end_date - start_date).days + 1\n index = [\n (start_date + timedelta(day)).strftime(JSON_DATE_FORMAT)\n for day in xrange(delta_days)\n ]\n return index",
"def get_start_day(self) -> int:\n return self.start_date.day",
"def index(self, date) -> int:\n return self.__dates__.index(date)",
"def _index_date(date, dates):\n if isinstance(date, string_types):\n date = date_parser(date)\n try:\n if hasattr(dates, 'indexMap'): # 0.7.x\n return dates.indexMap[date]\n else:\n date = dates.get_loc(date)\n try: # pandas 0.8.0 returns a boolean array\n len(date)\n return np.where(date)[0].item()\n except TypeError: # expected behavior\n return date\n except KeyError as err:\n freq = _infer_freq(dates)\n if freq is None:\n #TODO: try to intelligently roll forward onto a date in the\n # index. Waiting to drop pandas 0.7.x support so this is\n # cleaner to do.\n raise ValueError(\"There is no frequency for these dates and \"\n \"date %s is not in dates index. Try giving a \"\n \"date that is in the dates index or use \"\n \"an integer\" % date)\n\n # we can start prediction at the end of endog\n if _idx_from_dates(dates[-1], date, freq) == 1:\n return len(dates)\n\n raise ValueError(\"date %s not in date index. Try giving a \"\n \"date that is in the dates index or use an integer\"\n % date)",
"def get_start_date(self):\n return date.fromordinal(self.start_date_ordinal)",
"def __dateindex(self, mjd):\n idx = (np.abs(self.lookahead_mjds - mjd)).argmin()\n return idx",
"def getStartStart(self):\n for ro in self.lstOfRows:\n if (ro.classification==\"start_codon\"):\n return(int(ro.start))",
"def get_start_date(self, kwargs):\n ex_date = pd.to_datetime(kwargs['execution_date'])\n midnight_date = pd.to_datetime(datetime.date(ex_date))\n start_date = midnight_date\n\n return start_date",
"def get_calendardate_index(start: pd.datetime, end: pd.datetime):\n calendardate_index = []\n m_d_list = [[3,31],[6,30],[9, 30],[12, 31]]\n month_of_first_filing = start.month\n for i, year in enumerate(range(start.year, end.year + 1)):\n if i == 0:\n index_of_first_filing_in_m_d_list = [3,6,9,12].index(month_of_first_filing)\n for month_day in m_d_list[index_of_first_filing_in_m_d_list:]:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n continue\n for month_day in m_d_list:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n\n # Need to drop dates after end\n for j, date in enumerate(calendardate_index):\n if date > end:\n del calendardate_index[j]\n\n return calendardate_index",
"def start_index(self):\n return self.stoi.get(self.start_symbol, -1)",
"def find_date_ndx(self, date):\n\n d = date.toordinal()\n pos = np.nonzero(self.time == d)\n if not np.all(np.isnan(pos)):\n return int(pos[0])\n else:\n return None",
"def find_valid_period_start_date(dates, date, period):\n\t\n\tperiod_start_date = date - period\n\tperiod_dates = dates[dates >= period_start_date]\n\tfirst_date = period_dates.iloc[0]\n\treturn first_date",
"def school_year_start_for_date(date = date.today()):\n return SchoolYear.school_year_boundary_for_date(True, date)",
"def current_start_index(self):\n return self._page.start_index()",
"def start_date(self):\n return self._moment.get(\"startDate\")",
"def load_DayStart_indexer(self):\n for ticker in self._mkts_tickers:\n origin_TimeIndex=self._price_df[ticker].index\n shifted_TimeIndex=origin_TimeIndex+pd.offsets.Hour(6)\n shifted_TimeIndex_df=shifted_TimeIndex.to_frame(index=False)\n grouped_by_trading_date=shifted_TimeIndex_df.groupby(shifted_TimeIndex.date).idxmin()\n res_list=[i for i in grouped_by_trading_date.iloc[:,0].values]\n self._DayStart_indexer[ticker]=res_list",
"def start_period(self, date, period):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns index of end date based on endDate argument.
|
def get_end_index(self, endDate: datetime.date) -> int:
if endDate:
endDateIndex = self.find_date_index(endDate, starting=False)
if endDateIndex == -1:
raise IndexError("Date not found.")
elif endDateIndex < 1:
raise IndexError("You need at least one data period.")
elif endDateIndex <= self.startDateIndex:
raise IndexError("Ending date index cannot be less than or equal to start date index.")
else:
return endDateIndex
else:
return len(self.data) - 1
|
[
"def get_index(start_date, end_date):\n delta_days = (end_date - start_date).days + 1\n index = [\n (start_date + timedelta(day)).strftime(JSON_DATE_FORMAT)\n for day in xrange(delta_days)\n ]\n return index",
"def get_end_day(self) -> int:\n return self.end_date.day",
"def get_calendardate_index(start: pd.datetime, end: pd.datetime):\n calendardate_index = []\n m_d_list = [[3,31],[6,30],[9, 30],[12, 31]]\n month_of_first_filing = start.month\n for i, year in enumerate(range(start.year, end.year + 1)):\n if i == 0:\n index_of_first_filing_in_m_d_list = [3,6,9,12].index(month_of_first_filing)\n for month_day in m_d_list[index_of_first_filing_in_m_d_list:]:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n continue\n for month_day in m_d_list:\n calendardate_index.append(datetime(year=year, month=month_day[0], day=month_day[1]))\n\n # Need to drop dates after end\n for j, date in enumerate(calendardate_index):\n if date > end:\n del calendardate_index[j]\n\n return calendardate_index",
"def find_date_index(self, targetDate: datetime.date, starting: bool = True) -> int:\n if type(targetDate) == datetime:\n targetDate = targetDate.date()\n\n if starting:\n iterator = list(enumerate(self.data))\n else:\n iterator = reversed(list(enumerate(self.data)))\n\n for index, data in iterator:\n if data['date_utc'].date() == targetDate:\n return index\n return -1",
"def current_end_index(self):\n return self._page.end_index()",
"def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))",
"def end_index(self):\n return self.stoi.get(self.end_symbol, -1)",
"def endIndex(self):\n return self.__index + len(self.__lines)",
"def GetEndIndex(self) -> \"unsigned long long\":\n return _ITKIOImageBaseBasePython.itkNumericSeriesFileNames_GetEndIndex(self)",
"def end_date(self):\n return self.end.date()",
"def _calc_end_index(self) -> Tuple[int, int]:\n end_row_index = self.start_row_index + self.df.shape[0] + 1\n end_column_index = self.start_column_index + self.df.shape[1]\n return (end_row_index, end_column_index)",
"def getEndPosition(self, i: int) -> int:\n ...",
"def get_end_date(self):\n if (self.end_date != date.max):\n return self.end_date\n else:\n return None",
"def end_date(self) -> Optional[datetime.date]:\n if not self.intervals:\n return None\n return self.end_datetime().date()",
"def get_end_year(self) -> int:\n return self.end_date.year",
"def end_point(self) -> int:\n return self._end_point",
"def load_DayEnd_indexer(self):\n for ticker in self._mkts_tickers:\n origin_TimeIndex=self._price_df[ticker].index\n shifted_TimeIndex=origin_TimeIndex+pd.offsets.Hour(6)\n shifted_TimeIndex_df=shifted_TimeIndex.to_frame(index=False)\n grouped_by_trading_date=shifted_TimeIndex_df.groupby(shifted_TimeIndex.date).idxmax()\n res_list=[i for i in grouped_by_trading_date.iloc[:,0].values]\n self._DayEnd_indexer[ticker]=res_list",
"def get_start_index(self, startDate: datetime.date) -> int:\n if startDate:\n startDateIndex = self.find_date_index(startDate)\n if startDateIndex == -1:\n raise IndexError(\"Date not found.\")\n else:\n return startDateIndex\n else:\n return 0",
"def end_date(self):\r\n return self._end_date"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the current backtester price and period based on index provided.
|
def set_indexed_current_price_and_period(self, index: int):
self.currentPeriod = self.data[index]
self.currentPrice = self.data[index]['open']
|
[
"def exit_backtest(self, index: int = None):\n if index is None:\n index = self.endDateIndex\n\n self.currentPeriod = self.data[index]\n self.currentPrice = self.currentPeriod['close']\n\n if self.inShortPosition:\n self.buy_short(\"Exited short position because backtest ended.\")\n elif self.inLongPosition:\n self.sell_long(\"Exited long position because backtest ended.\")",
"def backtest(self):\n # Cut off most recent history closing price since it is not complete and would effect the calculations\n #kline_array = self.client.get_historical_klines(symbol=pair, interval=Client.KLINE_INTERVAL_5MINUTE, start_str= '1' + ' month ago UTC')\n kline_array = self.client.get_historical_klines(symbol=self.pair, interval=self.asset_interval, start_str= self.time_look_back)\n self.closing_times = [dt.datetime.utcfromtimestamp(x[6]/1000) for x in kline_array][0:-1]\n self.closing_price_array = [float(x[4]) for x in kline_array][0:-1]\n self.checked_prices = []\n\n gain, loss = 0, 0\n for x in range(0, len(self.closing_price_array)-1):\n change = self.closing_price_array[x+1] - self.closing_price_array[x]\n self.checked_prices.append(self.closing_price_array[x+1])\n self.checked_times.append(self.closing_times[x+1])\n if change > 0:\n gain += change\n elif change < 0:\n loss += abs(change)\n\n #Get first rsi simple moving average\n if x == self.rsi_period:\n self.avg_gain = self.simple_moving_average(gain, self.rsi_period)\n self.avg_loss = self.simple_moving_average(loss, self.rsi_period)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n #Use wilders moving average to continue calculating rsi values\n elif x > self.rsi_period:\n self.avg_gain = self.wilders_moving_average(self.rsi_period, gain, self.avg_gain)\n self.avg_loss = self.wilders_moving_average(self.rsi_period, loss, self.avg_loss)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n # When there are enough rsi values begin to calculate stoch_rsi\n if len(self.rsi_array) >= self.stoch_period:\n k_fast = self.k_fast_stoch(self.rsi_array[len(self.rsi_array) - self.stoch_period:])\n self.k_fast_array['k_fast'].append(k_fast)\n self.k_fast_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_FAST values begin to calculate %K_SLOW values = sma of n %K_FAST values\n if len(self.k_fast_array['k_fast']) >= self.k_slow_period:\n k_slow = self.simple_moving_average(self.k_fast_array['k_fast'][-1*self.k_slow_period:], self.k_slow_period)\n self.k_slow_array['k_slow'].append(k_slow)\n self.k_slow_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_SLOW values begin to calculate %D_SLOW values = sma of n %K_SLOW values\n if len(self.k_slow_array['k_slow']) >= self.d_slow_period:\n d_slow = self.simple_moving_average(self.k_slow_array['k_slow'][-1*self.d_slow_period:], self.d_slow_period)\n self.d_slow_array['d_slow'].append(d_slow)\n self.d_slow_array['time'].append(self.closing_times[x])\n\n self.bollinger_bands(self.checked_prices, self.sma_period, self.deviation, self.checked_times[x])\n\n #Once all values start to be calculated we can determine whether to buy or sell until we hit the last\n self.buy_sell(current_time = self.checked_times[x])\n\n self.plot_orders() #Plot orders on graph",
"def value_factor_backtest_monthly(real_yields, price_data, zscore_lookback, z_score_smoothening, n_securities, long_short, sample_start, sample_end):\n #Calculate Z-Score of Real Yields\n ry_zscore = (real_yields - real_yields.rolling(260*zscore_lookback).mean())/real_yields.rolling(260*zscore_lookback).std(ddof=1)\n ry_zscore = ry_zscore.dropna().rolling(z_score_smoothening).mean()\n \n #Merge Z-Score & Total Return Indices Data\n data = ry_zscore.merge(price_data, on='DATE').dropna()\n \n #Convert Data Frequency to Rebalancing Frequency\n #data = data1.asfreq(\"\"+str(rebalancing_period)+\"D\")\n month1 = pd.Series(data.index.month)\n month2 = pd.Series(data.index.month).shift(-1)\n mask = (month1 != month2)\n data = data[mask.values]\n data = data[sample_start:sample_end]\n data.dropna(inplace=True)\n \n #Rename Columns for better understanding\n data.columns = ['IG 1-3 Yield', 'IG 3-5 Yield', 'IG 7-10 Yield', 'US HY Yield',\n 'Crossover Yield', 'EM High Yield', 'UST 1-3 Yield', 'UST Int Yield',\n 'UST 7-10 Yield', 'UST Long Yield', 'IG 1-3 YieldP', 'IG 3-5 YieldP', 'IG 7-10 YieldP', 'US HY YieldP',\n 'Crossover YieldP', 'EM High YieldP', 'UST 1-3 YieldP', 'UST Int YieldP',\n 'UST 7-10 YieldP', 'UST Long YieldP']\n \n #Calculate Backtest Returns based on Long/Short or Long/Only and the no. of securities\n rets = data['Crossover Yield'].copy()*0\n rets.name = 'Value Strategy'\n \n if long_short == 'No':\n for i in range(len(data)-1):\n if n_securities == 1:\n rets[i+1] = data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1\n elif n_securities == 2:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i]-1)/2\n elif n_securities == 3:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i]-1 + + data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'P'][i]-1)/3\n \n if long_short == 'Yes':\n for i in range(len(data)-1):\n if n_securities == 1:\n rets[i+1] = data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1 - data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i]-1\n elif n_securities == 2:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i]-1)/2 - (data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'P'][i]-1)/2\n elif n_securities == 3:\n rets[i+1] = (data[str(data.iloc[i,:10].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].idxmax())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:9].idxmax())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[:8].idxmax())+'P'][i]-1)/3 - (data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[0:].idxmin())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[1:].idxmin())+'P'][i]-1 + data[str(data.iloc[i,:10].sort_values()[2:].idxmin())+'P'][i+1]/data[str(data.iloc[i,:10].sort_values()[2:].idxmin())+'P'][i]-1)/3\n \n \n #Merge Value Factor Returns Data with original data and other individual securities returns \n data = data.merge(rets, on='DATE')\n data.columns = ['IG 1-3 Yield', 'IG 3-5 Yield', 'IG 7-10 Yield', 'US HY Yield',\n 'Crossover Yield', 'EM High Yield', 'UST 1-3 Yield', 'UST Int Yield',\n 'UST 7-10 Yield', 'UST Long Yield', 'IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY',\n 'Crossover', 'EM HY', 'UST 1-3', 'UST Int',\n 'UST 7-10', 'UST Long', 'Value Strategy']\n m_rets = data[['IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY', 'Crossover', 'EM HY', 'UST 1-3', 'UST Int', 'UST 7-10', 'UST Long']].pct_change().dropna().merge(rets, on='DATE')\n \n #Add Equally Weighted Portfolio Returns for comparison as well\n m_rets['EW'] = m_rets[['IG 1-3', 'IG 3-5', 'IG 7-10', 'US HY', 'Crossover', 'EM HY', 'UST 1-3', 'UST Int', 'UST 7-10', 'UST Long']].mean(axis=1)\n \n return m_rets",
"def fi_vix_allocator(duration, high_yield, vix, lookback_period, vix_smooth_period, sample_start, sample_end):\n\n price_data = duration.merge(high_yield, on='Date')\n cols=list(price_data.columns)\n vix = vix\n vix.index.name='Date'\n vix.columns = ['VIX']\n data = vix\n #data = data[:sample_end]\n data.dropna(inplace=True)\n \n data = ((data.rolling(lookback_period*252).max()).merge(data['VIX'], on='Date')).dropna()\n data = (data['VIX_y']/data['VIX_x']).rolling(vix_smooth_period).mean().dropna()\n \n #data1 = pd.DataFrame(data).merge(price_data, on='DATE')\n data = pd.DataFrame(data).merge(price_data, on='Date')\n cols.insert(0, 'VIX')\n data.columns = cols \n \n month1 = pd.Series(vix.index.month)\n month2 = pd.Series(vix.index.month).shift(-1)\n\n \n mask = (month1 != month2)\n mask = mask.astype(int)\n mask.index = vix.index\n mask.name='Rebalance'\n \n indices_ret = price_data.pct_change().dropna()\n strat = price_data.join(data['VIX'], on='Date').join(mask, on='Date').dropna()\n #strat = strat[date(strat.index.year[1], strat.index.month[1], strat.index.day[:30].max()):]\n \n strat['Portfolio'] = strat['VIX']*0+100\n strat['HY Portfolio'] = strat['VIX']*0\n strat['Dur Portfolio'] = strat['VIX']*0\n \n for i in range (1,len(strat['Dur Portfolio'])):\n if strat['Rebalance'][i-1] == 1:\n strat['Dur Portfolio'][i] = strat['Portfolio'][i-1] * strat['VIX'][i] / strat[list(duration.columns)].mean(axis=1)[i-1]\n strat['HY Portfolio'][i] = strat['Portfolio'][i-1] * (1-strat['VIX'][i])/strat[list(high_yield.columns)].mean(axis=1)[i-1]\n strat['Portfolio'][i] = strat['Dur Portfolio'][i] * strat[list(duration.columns)].mean(axis=1)[i] + strat['HY Portfolio'][i] * strat[list(high_yield.columns)].mean(axis=1)[i] \n elif strat['Rebalance'][i-1] == 0:\n strat['Dur Portfolio'][i] = strat['Dur Portfolio'][i-1]\n strat['HY Portfolio'][i] = strat['HY Portfolio'][i-1]\n strat['Portfolio'][i] = strat['Dur Portfolio'][i] * strat[list(duration.columns)].mean(axis=1)[i] + strat['HY Portfolio'][i] * strat[list(high_yield.columns)].mean(axis=1)[i]\n \n return pd.DataFrame(strat['Portfolio'].pct_change()).merge(price_data.pct_change(), on='Date').dropna()[sample_start :sample_end]",
"def run_simple_backtest(symbol, rule_variant=['EWMAC', '2,8'], start_date=dt.date(2017, 1, 1), end_year=dt.date.today().year,\r\n starting_capital=1000000.0, volatility_target=0.25):\r\n auth_token = 'g1CWzGxxg2WxNVbV5n9y'\r\n\r\n # set scalar variables\r\n start_year = start_date.year\r\n instrument_weight = 1.0\r\n instrument_diversifier_multiplier = 1.0\r\n position_inertia = 0.1\r\n\r\n # determine which rule for which to run forecast\r\n forecast_inputs = get_forecast_inputs(symbol, start_year, end_year)\r\n if rule_variant[0] == 'CARRY':\r\n df = calc_carry_forecasts(forecast_inputs)\r\n elif rule_variant[0] == 'EWMAC':\r\n fast = int(rule_variant[1].split(',')[0])\r\n slow = int(rule_variant[1].split(',')[1])\r\n df = calc_ewmac_forecasts(forecast_inputs, fast, slow)\r\n elif rule_variant[0] == 'RSI':\r\n span = int(rule_variant[1])\r\n df = calc_rsi_forecasts(forecast_inputs, span)\r\n df['InstrumentForecast'] = df['ForecastCapped']\r\n\r\n # calculate instrument value vol\r\n futures_info = get_futures_info()\r\n df['BlockSize'] = futures_info.loc[futures_info['Symbol'] == symbol, 'BlockSize'].values[0]\r\n df['BlockValue'] = df['SettleRaw'] * df['BlockSize'] * 0.01\r\n df['InstrumentCurVol'] = df['BlockValue'] * df['PriceVolatilityPct'] * 100\r\n # incorporate historical fx rates\r\n fx_symbol = futures_info.loc[futures_info['Symbol'] == symbol, 'FX'].values[0]\r\n if fx_symbol != 'USD':\r\n fx_symbol = 'CURRFX/' + futures_info.loc[futures_info['Symbol'] == symbol, 'FX'].values[0]\r\n fx_rates = quandl.get(fx_symbol, authtoken=auth_token)\r\n # fx_rates.columns = ['Rate']\r\n df = df.merge(fx_rates, how='left', left_index=True, right_index=True)\r\n df = df.fillna(method='ffill') # fill in missing FX rates\r\n else:\r\n df['Rate'] = 1.0\r\n df['InstrumentValueVol'] = df['InstrumentCurVol'] / df['Rate']\r\n\r\n # begin backtest\r\n # cutoff first 90 trading days of data\r\n df = df.iloc[90:]\r\n # placeholders below\r\n df['PortfolioValue'] = starting_capital\r\n df['DailyCashTargetVol'] = starting_capital * volatility_target / (256 ** 0.5)\r\n df['VolatilityScalar'] = 0.0\r\n df['SubsystemPosition'] = 0.0\r\n df['SystemPosition'] = 0.0\r\n df['StartingPosition'] = 0.0\r\n df['EndingPosition'] = 0.0\r\n df['PositionChange'] = 0.0\r\n df['PositionCost'] = 0.0\r\n df['PositionValue'] = 0.0\r\n df['GainLossCum'] = 0.0\r\n\r\n # iterate through each date in df to retrieve ForecastCapped and InstrumentValueVol\r\n for i in list(range(0, len(df))):\r\n active_date = df.index[i]\r\n\r\n # update capital balance and volatility targets based on gain loss in backtest_df\r\n if i != 0: # skip first day\r\n df.loc[active_date, 'PositionCost'] += df['PositionCost'][prev_date]\r\n df.loc[active_date, 'PositionValue'] += df['PositionValue'][prev_date]\r\n df.loc[active_date, 'GainLossCum'] += df['GainLossCum'][prev_date]\r\n df.loc[active_date, 'PortfolioValue'] = starting_capital + df['GainLossCum'][active_date]\r\n df.loc[active_date, 'DailyCashTargetVol'] = df['PortfolioValue'][active_date] * \\\r\n volatility_target / (256 ** 0.5)\r\n df.loc[active_date, 'VolatilityScalar'] = df['DailyCashTargetVol'][active_date] / df['InstrumentValueVol'][active_date]\r\n df.loc[active_date, 'SubsystemPosition'] = df['InstrumentForecast'][active_date] / 10.0 * df['VolatilityScalar'][active_date]\r\n df.loc[active_date, 'SystemPosition'] = df['SubsystemPosition'][active_date] * instrument_weight * \\\r\n instrument_diversifier_multiplier\r\n if i != 0: # skip first day\r\n df.loc[active_date, 'StartingPosition'] = df['EndingPosition'].loc[prev_date]\r\n\r\n # determine trade based on starting_position, ending_position and system_position\r\n # define variable to minimize space\r\n starting_position = df['StartingPosition'][active_date]\r\n ending_position = starting_position\r\n system_position = df['SystemPosition'][active_date]\r\n block_size = df['BlockSize'][active_date]\r\n block_price = df['SettleRaw'][active_date]\r\n fx_rate = df['Rate'][active_date]\r\n\r\n if starting_position == 0 or (np.abs((system_position - starting_position) / starting_position) >\r\n position_inertia):\r\n ending_position = np.round(system_position, 0)\r\n df.loc[active_date, 'EndingPosition'] = ending_position\r\n df.loc[active_date, 'PositionChange'] = ending_position - starting_position\r\n if i != 0: # skip first day; else set PositionCost equal to previous value\r\n df.loc[active_date, 'PositionCost'] = df['PositionCost'].loc[prev_date]\r\n df.loc[active_date, 'PositionCost'] += (ending_position - starting_position) * block_size * block_price\r\n # reset PositionCost when contracts roll\r\n if i != 0 and df.loc[active_date, 'Contract'] != df['Contract'][prev_date]:\r\n df.loc[active_date, 'PositionCost'] = ending_position * block_price * block_size - \\\r\n (df['GainLossCum'][prev_date] * fx_rate)\r\n df.loc[active_date, 'PositionValue'] = ending_position * block_size * block_price\r\n df.loc[active_date, 'GainLossCum'] = (df['PositionValue'][active_date] - df['PositionCost'][active_date]) / fx_rate\r\n prev_date = active_date\r\n\r\n # calculate backtest summary statistics\r\n df['PortfolioReturnDayPct'] = df['PortfolioValue'] / df['PortfolioValue'].shift(1) - 1.0\r\n rule = rule_variant[0]\r\n variant = rule_variant[1] if rule == 'EWMAC' else ''\r\n trading_days = df.shape[0]\r\n annualized_return = np.exp(np.nansum(np.log(1 + df['PortfolioReturnDayPct']))) ** (256.0 / trading_days) - 1.0\r\n annualized_volatility = np.std(df['PortfolioReturnDayPct']) * np.sqrt(256.0)\r\n sharpe_ratio = annualized_return / annualized_volatility\r\n blocks_traded = np.sum(np.abs(df['PositionChange']))\r\n avg_position = np.average(np.abs(df['EndingPosition']))\r\n annualized_turnover = blocks_traded / (2 * avg_position) * 256.0 / trading_days\r\n results_df = pd.DataFrame({'Symbol': symbol, 'Rule': rule, 'Variant': variant, 'AnnReturn': annualized_return,\r\n 'AnnVol': annualized_volatility, 'Sharpe': sharpe_ratio, 'Trades': blocks_traded,\r\n 'AvgPosition': avg_position, 'AnnTurnover': annualized_turnover,\r\n 'StartingCapital': starting_capital, 'TargetVolatility': volatility_target,\r\n 'TradingDays': trading_days}, index=[0])\r\n return results_df[['Symbol', 'Rule', 'Variant', 'AnnReturn', 'AnnVol', 'Sharpe', 'Trades', 'AvgPosition',\r\n 'AnnTurnover', 'TradingDays']], df",
"def __call__(self, index=None):\n if index == None:\n print(\"Select PreAmp gain from: \")\n print(self.gains)\n choice = input('> ')\n else:\n choice = index\n sdk.SetPreAmpGain(choice)\n self._gain = {\"index\": choice, \"value\": self.gains[choice]}",
"def calculate_beta(stock, ind, full_stock):\n # path = os.path.join(os.getcwd(), \"Data\")\n\n stock[\"% Return of Company\"] = (\n (full_stock[\"Close Price\"] / full_stock['Close Price'].shift(-1))-1)*100\n\n full_stock[\"% Return of Company\"] = (\n (full_stock[\"Close Price\"] / full_stock['Close Price'].shift(-1))-1)*100\n\n ind[\"Date\"] = pd.to_datetime(ind[\"Date\"])\n stock[\"Date\"] = pd.to_datetime(stock[\"Date\"])\n\n s = full_stock.Date.head(1).values[0]\n e = full_stock.Date.tail(1).values[0]\n ind = ind[ind.Date.between(e, s)]\n ind = ind.iloc[::-1]\n ind.rename(columns={'Close': 'Close Price of SP500',\n '% Return': '% Return of SP500'}, inplace=True)\n ind.drop(['Open', 'High', 'Low', '% YTD'], axis=1, inplace=True)\n ind[\"Date\"] = pd.to_datetime(ind[\"Date\"])\n inddf = ind.copy()\n stock = stock.set_index(\"Date\")\n inddf = inddf.set_index(\"Date\")\n full_stock = full_stock.set_index(\"Date\")\n for date, row in stock.iterrows():\n try:\n stock.loc[date, 'Close Price of SP500'] = inddf.loc[date,\n 'Close Price of SP500']\n stock.loc[date, '% Return of SP500'] = inddf.loc[date,\n '% Return of SP500']\n except:\n pass\n stock = stock.reset_index()\n full_stock = full_stock.reset_index()\n inddf = inddf.reset_index()\n sp500 = inddf[\"% Return of SP500\"]\n company = full_stock[\"% Return of Company\"]\n results = list()\n for i in range(stock.shape[0]):\n # cov = np.cov(company[i:],sp500[i:])[0][1]\n cov = np.ma.cov(np.ma.masked_invalid(\n np.array(company[i:], sp500[i:-1])), rowvar=False)\n var = np.nanvar(sp500[i:-1])\n res = var/cov\n results.append(res)\n stock[\"Beta\"] = results\n return stock",
"def set_next_price(bundle_item):\r\n prev_price = bundle_item",
"def set_priced_current_price_and_period(self, price):\n self.currentPeriod = {\n 'date_utc': None,\n 'open': price,\n 'close': price,\n 'high': price,\n 'low': price\n }\n self.currentPrice = price",
"def set_balance(new_balance):\n\n # get the current balance to calculate the revenue\n current_balance = Trades().get_current_balance()\n if current_balance:\n\n # get the revenue\n revenue = get_revenue(new_balance, current_balance['Balance'])\n\n # insert the new balance\n inserted_record = Trades().set_balance_record(new_balance, revenue)\n \n txt = \"revenue generated: \" + str(revenue)\n print_formatted_text(html_label(txt))\n else:\n # if no balance was found, this means it's the first record.\n revenue = 0.00\n inserted_record = Trades().set_balance_record(new_balance, revenue)\n \n txt = \"record inserted: \" + str(inserted_record)\n print_formatted_text(html_label(txt))",
"def set_value(self, index, new_value):\n assert index >= 0\n self._fill_ahead(index + 1)\n self.__ahead[index] = (self.__ahead[index][0], new_value)",
"def backtest_portfolio(self):\n raise NotImplementedError(\"backtest_portfolio() method needs to be\" \\\n \"implemented!\")",
"def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n\n self.__print('===buy===')\n self.__print(dataframe.tail(1))\n dataframe.loc[\n (\n ),\n 'buy'] = 1\n\n return dataframe",
"def reset_pricing_impv(self, chain_symbol: str) -> None:\n chain = self.portfolio.get_chain(chain_symbol)\n atm_index = chain.atm_index\n\n for index in chain.indexes:\n call = chain.calls[index]\n put = chain.puts[index]\n\n if index >= atm_index:\n otm = call\n else:\n otm = put\n\n call.pricing_impv = otm.mid_impv\n put.pricing_impv = otm.mid_impv\n\n self.update_pricing_impv(chain_symbol)",
"def test_gbce_all_share_index(self):\n\n # Create some mock Stocks and Trades for each.\n stocks = []\n stocks.append(Stock('TEA', 'common', 0, nan, 100))\n stocks.append(Stock('POP', 'common', 8, nan, 100))\n stocks.append(Stock('ALE', 'common', 23, nan, 60))\n stocks.append(Stock('GIN', 'preferred', 8, 0.02, 100))\n stocks.append(Stock('JOE', 'common', 13, nan, 250))\n self.assertEqual(len(stocks), 5)\n\n # Add some Trades.\n trades = [\n [(1, 10, 95, datetime.datetime.now()), # TEA\n (-1, 20, 90, datetime.datetime.now()),\n (1, 45, 120, datetime.datetime.now())],\n [(1, 90, 95, datetime.datetime.now()), # POP\n (1, 65, 90, datetime.datetime.now()),\n (-1, 200, 100, datetime.datetime.now())],\n [(1, 35, 50, datetime.datetime.now()), # ALE\n (-1, 50, 10, datetime.datetime.now())],\n [(1, 100, 1000, datetime.datetime.now() - # GIN\n datetime.timedelta(minutes=14))]]\n\n for stock_index, trade_list in enumerate(trades):\n for trade in trade_list:\n stocks[stock_index]._record_trade(*trade)\n\n # Check that the stock (ticker) price for each stock is correct.\n self.assertEqual(stocks[0].stock_price(),\n (10*95 + 20*90 + 45*120)/(10+20+45))\n self.assertEqual(stocks[1].stock_price(),\n (90*95 + 65*90 + 200*100)/(90+65+200))\n self.assertEqual(stocks[2].stock_price(),\n (35*50 + 50*10)/(35+50))\n self.assertEqual(stocks[3].stock_price(), 1000)\n self.assertEqual(stocks[4].stock_price(),\n stocks[4].par_value) # zero recorded trades\n\n # The geometric mean calculation should be correct.\n # We do this calculation in log space in Stock.gbce_all_share_index(),\n # so check against a calculation without the transformation here.\n stock_price = [(10*95 + 20*90 + 45*120)/(10+20+45),\n (90*95 + 65*90 + 200*100)/(90+65+200),\n (35*50 + 50 * 10)/(35+50),\n 1000, stocks[4].par_value]\n\n self.assertAlmostEqual(gbce_all_share_index(stocks),\n (stock_price[0] * stock_price[1] *\n stock_price[2] * stock_price[3] *\n stock_price[4]) ** (1./5))",
"def backtest_portfolio(self):\r\n\r\n # Set the portfolio object to have the same time period\r\n # as the positions DataFrame\r\n portfolio = pd.DataFrame(index=self.positions.index)\r\n pos_diff = self.positions.diff()\r\n\r\n # Work out the intraday profit of the difference\r\n # in open and closing prices and then determine\r\n # the daily profit by longing if an up day is predicted\r\n # and shorting if a down day is predicted\r\n portfolio['price_diff'] = self.bars['Close'] - self.bars['Open']\r\n portfolio['price_diff'][0:5] = 0.0\r\n portfolio['profit'] = self.positions[self.symbol] * portfolio['price_diff']\r\n\r\n # Generate the equity curve and percentage returns\r\n portfolio['total'] = self.initial_capital + portfolio['profit'].cumsum()\r\n portfolio['returns'] = portfolio['total'].pct_change()\r\n return portfolio",
"def _back_adjust(self, fields, first_day_of_next_contract_index, futures_chain, data_frame):\n # Define an index, which would point to the dates, when the data correction occurs. In most of the cases it\n # would be the list of expiry dates - 1. However, in case if any of these dates would point to a date, which\n # is not available in the data_frame (e.g. Saturday, Sunday etc.), we would adjust it to point to an older date.\n # We also need to ensure, that the shifted expiration date would not point to the previous contract, which may\n # occur if there would be no price in data_frame for a certain contract (asof in this case will look at the\n # previous contract).\n\n last_date_with_valid_price = [data_frame.index.asof(date - pd.Timedelta(days=1)) for date in\n first_day_of_next_contract_index]\n\n expiration_dates = list(\n zip(last_date_with_valid_price, [last_date_with_valid_price[0]] + list(first_day_of_next_contract_index))\n )\n\n expiration_dates = [max(x, y) for x, y in expiration_dates]\n # In case if the first date in the expiration_dates would not be a valid date, but a NaT instead, shift the\n # lists to the first valid date. In case if no valid expiration dates exist (e.g. there is only one price bar)\n # return the original data_frame\n valid_expiration_dates = (index for index, item in enumerate(expiration_dates) if not pd.isnull(item))\n try:\n index_of_first_valid_date = next(valid_expiration_dates)\n except StopIteration:\n return data_frame\n expiration_dates = expiration_dates[index_of_first_valid_date:]\n first_day_of_next_contract_index = first_day_of_next_contract_index[index_of_first_valid_date:]\n futures_chain = futures_chain[index_of_first_valid_date:]\n\n previous_contracts_close_prices = QFSeries(\n [self._get_last_available_price(future.data, time)\n for time, future in zip(first_day_of_next_contract_index, futures_chain)],\n index=expiration_dates\n )\n\n next_contracts_open_prices = QFSeries(\n [self._get_first_available_price(future.data, time)\n for time, future in zip(first_day_of_next_contract_index, futures_chain[1:])],\n index=expiration_dates)\n\n # We compute the delta values as the difference between the Open prices of the new contracts and Close prices\n # of the old contracts\n delta_values = next_contracts_open_prices - previous_contracts_close_prices\n differences = delta_values.reindex(data_frame.index).fillna(0)\n\n differences = differences.iloc[::-1].cumsum(axis=0).iloc[::-1]\n\n # Restrict the adjusted fields to Open, High, Low, Close prices\n fields = [f for f in fields if f in (PriceField.Open, PriceField.High, PriceField.Close, PriceField.Low)]\n\n for field in fields:\n data_frame[field] = data_frame[field].add(differences)\n\n return data_frame",
"def trade(pf,p,q,x,t):\n\n if len(pf) == 1:\n b = float(pf['Balance'])\n else:\n b = float(pf['Balance'].tail(1))\n\n # if t = 1, means buy \n if t > 0 :\n b = b - p*q\n print(f'bought {q} units of {x} at price {p}, remaining balance is {b}')\n else:\n b = b + p*q\n print(f'sold {q} units of {x} at price {p}, remaining balance is {b}')\n \n pf = pf.append({'Date':str(dt.datetime.today().date()),'Balance':b,'Price':p,'Qty':q,'Stock':x},ignore_index=True)\n print('appended to pf')\n return(pf)",
"def simulate_trading(self):\n out_filename = \"multiBacktestResults.csv\"\n resultsDir = settings.OUTPUT_RESULTS_DIR\n out_file = os.path.join(resultsDir, out_filename)\n out = open(out_file, \"w\")\n \n spl = len(self.strat_params_list)\n for i, sp in enumerate(self.strat_params_list):\n print(\"Strategy %s out of %s...\" % (i+1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self._output_performance()\n pprint.pprint(stats)\n \n tot_ret = float(stats[0][1].replace(\"%\",\"\"))\n cagr = float(stats[1][1].replace(\"%\",\"\"))\n sharpe = float(stats[2][1])\n max_dd = float(stats[3][1].replace(\"%\",\"\"))\n dd_dur = int(stats[4][1])\n \n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" % (\n sp[\"short_window\"], sp[\"long_window\"],\n tot_ret, cagr, sharpe, max_dd, dd_dur))\n \n out.close()\n \n \n self._run_backtest()\n self._output_performance()\n print(\"Backtest complete.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Auxiliary function to set current period and price to price provided.
|
def set_priced_current_price_and_period(self, price):
self.currentPeriod = {
'date_utc': None,
'open': price,
'close': price,
'high': price,
'low': price
}
self.currentPrice = price
|
[
"def set_indexed_current_price_and_period(self, index: int):\n self.currentPeriod = self.data[index]\n self.currentPrice = self.data[index]['open']",
"def update_price(self, company: Company):\n pass",
"def do_setPrice(self, args):\n weekday = input(\"Enter weekday price: \")\n weekend = input(\"Enter weekend price: \")\n self._setPrice(weekday, weekend)",
"def update_price(origin_price: float, price: float):\n return (get_current_price() / origin_price) * price",
"def _set_base_price(self, price=None, region=\"us\"):\n\n if price is None or price == \"\" or price == self.R_NOT_RELATIVE:\n self._base_price = None\n\n elif price == self.R_RETAIL_PRICE:\n if region == \"uk\":\n self._base_price = self._si.original_price_uk\n else:\n self._base_price = self._si.original_price_us\n elif price == self.R_START_PRICE:\n self._base_price = self._get_price_from_date(self._base_date)\n elif price == self.R_END_PRICE:\n self._base_price = self._get_price_from_date(self._base_date)\n else:\n self.price = syt.int_zero(price)\n\n if self._base_price is None and not (self._report_options[\"Base Price\"] == self.R_NOT_RELATIVE or self._report_options[\"Base Price\"] == None):\n raise ValueError(\"No Price to evaluate from [Base Price = {} | Report Type = {}]\".format(price, self._report_options[\"Base Price\"]))",
"def set_price(edition_date):\n weekday_price = '£1.20'\n weekend_price = '£1.50'\n is_saturday = edition_date.isoweekday() == 6\n price = weekend_price if is_saturday else weekday_price\n set_frame_contents('Price', price)",
"def update_position_price(self):\r\n ticker_cur = self.ticker.prices[self.currency_pair]\r\n \r\n if self.position_type == 'long':\r\n self.cur_price = Decimal(str(ticker_cur['bid']))\r\n else:\r\n self.cur_price = Decimal(str(ticker_cur['ask']))\r\n \r\n self.profit_base = self.calculate_profit_base()\r\n self.profit_perc = self.calculate_profit_perc()",
"def setPrices(self, prices, decimals):\n if not isinstance(prices, list):\n raise TypeError, utils.mapping(_(\"Prices ($1) must be a list: $2\"),\n (str(prices), self.__code))\n for index in range(len(prices)):\n _price_date = prices[index]\n _price_date = self._validate_price_date(_price_date, decimals)\n prices[index] = _price_date\n self.__prices = prices",
"def update_range_price(self):\n self.update_range_price_button.click(self)",
"def _add_price(self):\n\n instrument = self._instrument\n date = self._price_date\n rate = self._price\n market = acm.FParty['internal']\n\n existing_price = None\n prices = acm.FPrice.Select('instrument = {0}'.format(instrument.Name()))\n for price in prices:\n if price.Market() == market and price.Day() == date:\n if not self._recalculate:\n raise ValueError('Rate already exists for this date.')\n else:\n existing_price = price\n break\n\n if existing_price:\n # If self._recalculate is False, an exception would be raised\n # That means we're recalculating.\n price = existing_price\n else:\n price = acm.FPrice()\n price.Instrument(instrument)\n price.Day(date)\n price.Market(market)\n price.Currency(acm.FInstrument['ZAR'])\n\n price.Ask(rate)\n price.Bid(rate)\n price.High(rate)\n price.Low(rate)\n price.Settle(rate)\n price.Last(rate)\n price.Commit()\n\n log('The price was updated in SACPI.')",
"def set_next_price(bundle_item):\r\n prev_price = bundle_item",
"def set_pricing(driver_type, driver_name, pricing):\r\n\r\n PRICING_DATA[driver_type][driver_name] = pricing",
"def update(self, target):\n change = (self.coeff * (target - self.price) +\n self.momentum * self.last_change)\n self.last_change = change\n \n limiter = self.buyer and min or max\n self.price = int(limiter(self.price + change, self.limit))",
"def setPriceDozen(self,price):\n self.priceDozen=float(price)",
"async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]",
"def _adjust_price(self):\n\n # Go through each topping and add the money amount for topping\n topping_additional_money = 0\n for topping in self._toppings:\n topping_additional_money += topping.getPrice()\n\n self._price = self._base_price + topping_additional_money",
"def set_period(self, end=None, period=None):\n end = end if end is not None else self.end\n period = period if period is not None else self.__period / 60\n self.__period = period * 60\n (self.start, self.end) = self.get_period(end=end)",
"def test_set_price(self):\n\n test_price = 100.0\n test_quantity = 1\n\n # Grab the first part\n p = Part.list(self.api)[0]\n\n # Grab all internal prices for the part\n ip = InternalPrice.list(self.api, part=p.pk)\n\n # Delete any existsing prices\n for price in ip:\n self.assertEqual(type(price), InternalPrice)\n price.delete()\n\n # Ensure that no part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 0)\n\n # Set the internal price\n p.setInternalPrice(test_quantity, test_price)\n\n # Ensure that the part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 1)\n \n # Grab the internal price\n ip = ip[0]\n\n self.assertEqual(ip.quantity, test_quantity)\n self.assertEqual(ip.part, p.pk)\n ip_price_clean = float(ip.price)\n self.assertEqual(ip_price_clean, test_price)",
"def set_price_changes(self):\n self.market_data['pricechange'] = self.market_data['adj_close'].diff(1)\n self.market_data['percentchange'] = (np.log(self.market_data['adj_close']) - np.log(self.market_data['adj_close'].shift(1))).fillna(0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets stop loss equal to the counter provided.
|
def set_stop_loss_counter(self, counter: int):
self.stopLossCounter = self.initialStopLossCounter = counter
|
[
"def reset_smart_stop_loss(self):\n self.stopLossCounter = self.initialStopLossCounter",
"def stop(self, iterations):\n self.stop_count += iterations",
"def setStopSweep(self,stop):\r\n self.isSIUnit(stop)\r\n self.stopFreqSweep = stop",
"def set_stop_criterion(self,criterion):\n\t\tself._criterion = criterion",
"def stop_sign(self, stop_sign):\n self._stop_sign = stop_sign",
"def setStopCondition(self, val):\r\n self.__scl.acquire()\r\n self.__stopCondition = val\r\n self.__scl.release()",
"def set_previous_stop(self, stop):\n self.previous_stop = stop",
"def _stopping(self):\n \n self.__state = runlevel.STATE_STOPPING",
"def stop(self, stop: SourceLocation):\n if stop is None:\n raise ValueError(\"Invalid value for `stop`, must not be `None`\") # noqa: E501\n\n self._stop = stop",
"def logStop(self, stop):\n\n self.shutoff = stop",
"def set_next_stop(self, stop):\n self.next_stop = stop",
"def stop_sweep(self):\n raise NotImplementedError",
"def _set_scan_stop(self, value):\n ao_ch, _ = self._verify_scan_channels()\n if ao_ch is None: # if _verify_scan_channels() returns nothing that means channel is invalid or not found\n return\n value = self.analog_out(ao_ch, value, verify_only=True)\n self.properties['scan']['stop'] = value\n self._set_scan_step()",
"def should_stop(self, model, val_loss):\n if val_loss < self.best_val_loss:\n self.epochs_since_decrease = 0\n self.best_val_loss = val_loss\n self.best_weights = copy.deepcopy(model.state_dict())\n else:\n self.epochs_since_decrease += 1\n if self.epochs_since_decrease >= self.early_stopping_epochs:\n model.load_state_dict(self.best_weights)\n return True\n\n return False",
"def stop(self):\n self.set_speed(Vector(0., 0.))",
"def update_loss(self, loss):\n self.loss += loss\n self.num_loss_attempts += 1",
"def stop_poisoning(self):\n self.stop = True\n # self.stop_thread = threading.Thread(target=self.restore_network)",
"def stop(self):\n return _wavelet_swig.wvps_ff_sptr_stop(self)",
"def set_stop_wavelength(self,val): #documented\n if self.__is_int_or_float(val) and self.__is_between(val,600,1800):\n if val < self.get_start_wavelength():\n self.__verbose_output( \"error: stop wavelength can not be set to < start wavelength\",1)\n else:\n self.send_message(\"STO %.1f\"%(val)) \n else:\n self.__verbose_output( \"error: set_stop_wavelength() - invalid argument\",1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resets smart stop loss and sets it equal to initial stop loss counter.
|
def reset_smart_stop_loss(self):
self.stopLossCounter = self.initialStopLossCounter
|
[
"def set_stop_loss_counter(self, counter: int):\n self.stopLossCounter = self.initialStopLossCounter = counter",
"def _reset(self):\n self.monitor_op = lambda a, b: (a - b) < -self.epsilon\n self.best_loss = 1e15\n self.cooldown_counter = 0\n self.wait = 0",
"def reset(self):\n self._epochs = 0\n self._iterations = 0\n self._must_stop = False",
"def Reset():\n global stopper\n stopper = False",
"def _stopping(self):\n \n self.__state = runlevel.STATE_STOPPING",
"def reset(self):\n self.losses = []\n self.batch_sizes = []",
"def reset_budget_counters(self):\n self.training_time_spent = 0.0\n self.total_epochs_spent = 0",
"def reset_loss_fn(self):\n\t\tself.loss_fn = self.original_loss_fn",
"def reset_tuning(self):\n return",
"def setStopSweep(self,stop):\r\n self.isSIUnit(stop)\r\n self.stopFreqSweep = stop",
"def set_previous_stop(self, stop):\n self.previous_stop = stop",
"def _reset(self):\n self.start_time = None\n self.backoff_time = None",
"def reset(self) -> None:\n self.time_counted = 0\n self.last_start_time = 0\n self.is_running = False",
"def reset(self):\n\n self.timestep = 0\n self.historyLayer.reset()",
"def reset(self):\n self.numb = self.starter",
"def reset(self):\n self.stop()\n self.program.reset()\n self.can_reset = False",
"def stop_poisoning(self):\n self.stop = True\n # self.stop_thread = threading.Thread(target=self.restore_network)",
"def reset(self, do_resets=None):\n pass",
"def reset_cycle(self):\n self.cycle = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Ends a backtest by exiting out of a position if needed.
|
def exit_backtest(self, index: int = None):
if index is None:
index = self.endDateIndex
self.currentPeriod = self.data[index]
self.currentPrice = self.currentPeriod['close']
if self.inShortPosition:
self.buy_short("Exited short position because backtest ended.")
elif self.inLongPosition:
self.sell_long("Exited long position because backtest ended.")
|
[
"def end_test(self):",
"def back(self):\n self.position -= 1\n if self.position > len(self.document.characters):\n raise CursorTopError",
"def ending(self):\n if self.view_index:\n self.view_index = self.end()",
"def end_while_true(self):\n seen_close = 0\n while ((self.program[self.pc] != '[' or seen_close > 0) and \\\n self.pc >= 0):\n self.pc -= 1\n if (self.program[self.pc] == ']'):\n seen_close += 1\n elif (self.program[self.pc] == '[' and seen_close > 0):\n seen_close -= 1\n\n # because runStep will increment the program counter after\n # this method finishes, it needs to be offset by 1 so the\n # loop test will occur properly\n self.pc -= 1",
"def end_to_end(context):\n run('nosetests -c config/noserc -a \"end_to_end\"')",
"def _endloop(self):\n if not self._band.get():\n self._loop_stack[0:1] = []\n else:\n self._pointer = self._loop_stack[0]",
"def test_end_turn_wrap_around(self):\n self.game.setCurrentPlayerIndex(len(self.game.getPlayers()) - 1)\n self.game.setMoveIndex(5)\n\n self.game.endTurn()\n\n self.assertEqual(self.game.getCurrentPlayerIndex(), 0)\n self.assertEqual(self.game.getMoveIndex(), 6)",
"def position_at_end(self, bblk):\n _core.LLVMPositionBuilderAtEnd(self.ptr, bblk.ptr)",
"def end_step_population(self, pop):\n pass",
"def end_ifeq(self):\n pass",
"def step_back(self):\n if len(self.history) > 0:\n self.round, self.game_pointer, self.round_counter, self.dealer, self.public_cards, self.players = self.history.pop()\n self.stage = Stage(self.round_counter)\n return True\n return False",
"def has_ending(neg_idx, guesses, pattern, extra=0):\n num = len(pattern) + extra\n at_position = neg_idx == num - 1\n return at_position and has_pattern(guesses[-num:], pattern)",
"def end_test(self, line):\n if self._testcase is None:\n raise Exception(\"Invalid current testcase\")\n if self._test is None:\n raise Exception(\"Invalid current test\")\n failed = \"[ FAILED ]\" in line\n\n # windows crash is a failure\n seh = False\n for line in self._output:\n if \"error: SEH exception\" in line:\n seh = True\n break\n outcome = PASSED\n if seh:\n outcome = CRASHED\n self._output = [\"SEH Exception\"] + self._output\n elif failed:\n outcome = FAILED\n\n self._tests[self._test] = (\n outcome,\n self._output[:-1], # cut the [ OK/FAILED ] line\n self._error[:],\n )\n\n if failed:\n self._fail_count += 1\n self.out(\"X\" if seh else \"F\", end=\"\", verbose=0)\n else:\n self._pass_count += 1\n self.out(\".\", end=\"\", verbose=0)\n self._test = None\n self._output = []\n self._error = []",
"def testBoundaryReverseGo(self):\n self.crawler.go_oldest()\n self.assertRaises(IndexError, self.crawler.go_next, 1)\n self.crawler.go_previous()\n self.assertRaises(IndexError, self.crawler.go_next, 2)",
"def EndTest(self):\n self.end_time = self._GetTimeString()\n self._SummaryTestToRecord()\n self._WriteToReport()",
"def endgame(self):\n # TODO Write something for an endgame screen\n pass",
"def backstep(self):\n\n self.timestep -= 1\n self.historyLayer.backstep()",
"def end_of_track(self, track=None):\n pass",
"def at_end(self):\n if self.__peek is not NO_VALUE:\n return False\n try:\n self.__peek=self.__iterator.next()\n except StopIteration as se:\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simulate a long hold position if no strategies are provided.
|
def simulate_hold(self, testLength: int, divisor: int, thread=None):
for index in range(self.startDateIndex, self.endDateIndex, divisor):
if thread and not thread.running:
raise RuntimeError("Backtest was canceled.")
self.currentPeriod = self.data[index]
self.currentPrice = self.currentPeriod['open']
if not self.inLongPosition:
self.buy_long("Entered long to simulate a hold.")
thread.signals.activity.emit(thread.get_activity_dictionary(self.currentPeriod, index, testLength))
self.exit_backtest()
|
[
"def func(self):\r\n rand = random.random()\r\n if rand < 0.5:\r\n self.caller.msg(\"You nudge at the lid. It seems stuck.\")\r\n elif 0.5 <= rand < 0.7:\r\n self.caller.msg(\"You move the lid back and forth. It won't budge.\")\r\n else:\r\n self.caller.msg(\"You manage to get a nail under the lid.\")\r\n self.caller.execute_cmd(\"open lid\")",
"def arm_and_takeoff_nogps(aTargetAltitude):\r\n\r\n ##### CONSTANTS #####\r\n DEFAULT_TAKEOFF_THRUST = 0.7\r\n SMOOTH_TAKEOFF_THRUST = 0.6\r\n\r\n print \"Basic pre-arm checks\"\r\n # Don't let the user try to arm until autopilot is ready\r\n # If you need to disable the arming check, just comment it with your own responsibility.\r\n while not vehicle.is_armable:\r\n print \" Waiting for vehicle to initialise...\"\r\n time.sleep(1)\r\n\r\n\r\n print \"Arming motors\"\r\n # Copter should arm in GUIDED_NOGPS mode\r\n vehicle.mode = VehicleMode(\"GUIDED_NOGPS\")\r\n vehicle.armed = True\r\n\r\n while not vehicle.armed:\r\n print \" Waiting for arming...\"\r\n time.sleep(1)\r\n\r\n print \"Taking off!\"\r\n\r\n thrust = DEFAULT_TAKEOFF_THRUST\r\n while True:\r\n current_altitude = vehicle.location.global_relative_frame.alt\r\n print \" Altitude: \", current_altitude\r\n if current_altitude >= aTargetAltitude*0.95: # Trigger just below target alt.\r\n print \"Reached target altitude\"\r\n break\r\n elif current_altitude >= aTargetAltitude*0.6:\r\n thrust = SMOOTH_TAKEOFF_THRUST\r\n set_attitude(thrust = thrust)\r\n time.sleep(0.2)",
"def arm_and_takeoff_nogps(aTargetAltitude):\r\n\r\n ##### CONSTANTS #####\r\n DEFAULT_TAKEOFF_THRUST = 0.7\r\n SMOOTH_TAKEOFF_THRUST = 0.6\r\n\r\n print(\"Arming motors\")\r\n # Copter should arm in GUIDED_NOGPS mode\r\n vehicle.mode = VehicleMode(\"GUIDED_NOGPS\")\r\n vehicle.armed = True\r\n\r\n while not vehicle.armed:\r\n print(\" Waiting for arming...\")\r\n vehicle.armed = True\r\n time.sleep(1)\r\n\r\n print(\"Taking off!\")\r\n\r\n thrust = DEFAULT_TAKEOFF_THRUST\r\n while True:\r\n current_altitude = vehicle.location.global_relative_frame.alt\r\n print(\" Altitude: %f Desired: %f\" %\r\n (current_altitude, aTargetAltitude))\r\n if current_altitude >= aTargetAltitude*0.95: # Trigger just below target alt.\r\n print(\"Reached target altitude\")\r\n break\r\n elif current_altitude >= aTargetAltitude*0.6:\r\n thrust = SMOOTH_TAKEOFF_THRUST\r\n set_attitude(thrust = thrust)\r\n time.sleep(0.2)",
"def _choose_strategy_stay(self, options):\n option = self.get_latest_choice()\n self.memory.append(option)\n return option",
"def autospawnRandom(self, dt):\n if not self.paused:\n choice = random.randint(0, 1)\n if choice:\n self.spawnMob(\"Q\", free=True)\n else:\n self.spawnMob(\"E\", free=True)",
"def test_state_break_larger():\n sim = Sim()\n sys = VanDerPol()\n sys.add_break_greater(\"y\",1.0)\n sim.add_system(sys)\n sim.simulate(20,0.01)\n\n #If correct the simulation should break at time 0.79\n assert sys.res.time[-1] == 0.79",
"def hold(args):\n _change_status(args, HOLD)",
"def placing_ball():\n\tglobal object_held\n\tobject_held = 0",
"def test_player_runs_into_adversary(self):\n\n game_state, _ = self.create_basic_game_state()\n\n player1 = game_state.occupants[\"p1\"]\n adversary1 = game_state.occupants[\"a1\"]\n\n new_location = (8,12)\n\n game_state.update(\"p1\", new_location)\n\n self.assertTrue(player1.is_alive)\n\n game_state.update(\"a1\", new_location)\n\n self.assertTrue(player1.is_alive)",
"def manual_hold(self, actor, reason):\n\n try:\n if (self.cart['cart_status']['manual_hold'] == 0):\n raise CartInvalid(\"Cart may not be held.\")\n c = get_cursor()\n c.execute(\"\"\"\n update cart\n set manual_hold = %s\n where cart_id = %s\"\"\",\n (reason,\n self.cart['cart_id']))\n self.cart['manual_hold'] = reason\n if (reason):\n self.log(\"Manual Hold: \" + reason, actor)\n else:\n self.log(\"Manual Hold Released\", actor)\n return { 'manual_hold': reason, 'cart_logs': self.get_logs() }\n except CartInvalid as e:\n raise CartInvalid(e)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")",
"def arm_and_takeoff(aTargetAltitude):\r\n\r\n print(\"Basic pre-arm checks\")\r\n # Don't let the user try to arm until autopilot is ready\r\n while not vehicle.is_armable:\r\n print(\" Waiting for vehicle to initialise...\")\r\n time.sleep(1)\r\n\r\n \r\n print(\"Arming motors\")\r\n # Copter should arm in GUIDED mode\r\n vehicle.mode = VehicleMode(\"GUIDED\")\r\n vehicle.armed = True\r\n\r\n while not vehicle.armed: \r\n print(\" Waiting for arming...\")\r\n time.sleep(1)\r\n\r\n print(\"Taking off!\")\r\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\r\n\r\n # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command \r\n # after Vehicle.simple_takeoff will execute immediately).\r\n while True:\r\n print(\" Altitude: \", vehicle.location.global_relative_frame.alt) \r\n if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.\r\n print(\"Reached target altitude\")\r\n break\r\n time.sleep(1)",
"def test_block_on_lease_never_ready(self, fake_sleep):\n fake_lease = MagicMock()\n fake_lease.state = 'foo'\n\n with self.assertRaises(RuntimeError):\n virtual_machine._block_on_lease(fake_lease)",
"def test_not_refreshing_long_lock_invalidates_lock(self):\n time, thing, locker = self._create_lock_for_user1()\n\n start = time.now()\n\n while time.now() <= start + self.LONG_TIMEOUT:\n self.assertEqual(self.user1, locker.get_holder(thing),\n \"user1 holds the lock\")\n\n locker.refresh_lock(thing, self.user1, False)\n\n time.tick(self.SHORT_TIMEOUT)\n\n self.assertEqual(None, locker.get_holder(thing),\n \"The lock is now not held as the long lock has expired.\")",
"def starter_strategy(self, game_state):\n # First, place basic defenses\n self.build_starter_defences(game_state)\n\n if game_state.turn_number < 3:\n self.build_base_scramblers(game_state)\n else:\n self.build_self_destruct(game_state)\n self.spawn_self_destruct(game_state)\n self.upgrade(game_state)\n\n self.counter_spawn(game_state)\n # self.dumb_offense(game_state)\n self.place_offensive_units(game_state)\n self.place_encryptors(game_state)\n # Now let's analyze the enemy base to see where their defenses are concentrated.\n # If they have many units in the front we can build a line for our EMPs to attack them at long range.\n \"\"\"if self.detect_enemy_unit(game_state, unit_type=None, valid_x=None, valid_y=[14, 15]) > 10:\n self.emp_line_strategy(game_state)\n else:\n # They don't have many units in the front so lets figure out their least defended area and send Pings there.\n\n # Only spawn Ping's every other turn\n # Sending more at once is better since attacks can only hit a single ping at a time\n if game_state.turn_number % 2 == 1:\n # To simplify we will just check sending them from back left and right\n ping_spawn_location_options = [[13, 0], [14, 0]]\n best_location = self.least_damage_spawn_location(game_state, ping_spawn_location_options)\n game_state.attempt_spawn(PING, best_location, 1000)\n\n # Lastly, if we have spare cores, let's build some Encryptors to boost our Pings' health.\n # encryptor_locations = [[13, 2], [14, 2], [13, 3], [14, 3]]\n # game_state.attempt_spawn(ENCRYPTOR, encryptor_locations)\n \"\"\"",
"def reset(self, random_seed=None, vehicle_config=None, pos: np.ndarray = None, heading: float = 0.0):\n if random_seed is not None:\n self.seed(random_seed)\n self.sample_parameters()\n if vehicle_config is not None:\n self.update_config(vehicle_config)\n map = self.engine.current_map\n if pos is None:\n lane = map.road_network.get_lane(self.config[\"spawn_lane_index\"])\n pos = lane.position(self.config[\"spawn_longitude\"], self.config[\"spawn_lateral\"])\n heading = np.rad2deg(lane.heading_at(self.config[\"spawn_longitude\"]))\n self.spawn_place = pos\n heading = -np.deg2rad(heading) - np.pi / 2\n self.set_static(False)\n self.origin.setPos(panda_position(Vec3(*pos, self.HEIGHT / 2 + 1)))\n self.origin.setQuat(LQuaternionf(math.cos(heading / 2), 0, 0, math.sin(heading / 2)))\n self.update_map_info(map)\n self.body.clearForces()\n self.body.setLinearVelocity(Vec3(0, 0, 0))\n self.body.setAngularVelocity(Vec3(0, 0, 0))\n self.system.resetSuspension()\n self._apply_throttle_brake(0.0)\n # np.testing.assert_almost_equal(self.position, pos, decimal=4)\n\n # done info\n self._init_step_info()\n\n # other info\n self.throttle_brake = 0.0\n self.steering = 0\n self.last_current_action = deque([(0.0, 0.0), (0.0, 0.0)], maxlen=2)\n self.last_position = self.spawn_place\n self.last_heading_dir = self.heading\n\n self.update_dist_to_left_right()\n self.takeover = False\n self.energy_consumption = 0\n\n # overtake_stat\n self.front_vehicles = set()\n self.back_vehicles = set()\n\n assert self.navigation",
"def hold_piece(self):\r\n if self.pieces[2]:\r\n self.pieces[0], self.pieces[2] = self.pieces[2], self.pieces[0]\r\n else:\r\n self.pieces[:3] = [self.pieces[1], Piece(), self.pieces[0]]\r\n\r\n self.pieces[0].reset()\r\n self.pieces[2].hold()\r\n self.hold = False",
"def _move_lift(self, cmd):\n # action = self._vector.behavior.set_lift_height(height=cmd.data,\n # duration=0.2, in_parallel=True)\n action = self._vector.behavior.set_lift_height(height=cmd.data,\n duration=0.2)\n # action.wait_for_completed()",
"def set_random_short_ped_task(self):\n self.__spread_done()\n self.__stop_robot()\n d = {}\n d[\"start\"] = self.__set_random_robot_pos()\n\n #Finding valid position on map in small radius\n valid = False\n count = 0\n while not valid:\n x = d[\"start\"][0] + random.uniform(3, math.floor(count/10) + 5)*random.choice([-1, 1])\n y = d[\"start\"][1] + random.uniform(3, math.floor(count/10) + 5)*random.choice([-1, 1])\n valid = self.__is_pos_valid(x, y, self.__map)\n count+=1\n self.__publish_goal(x, y, 0)\n d[\"goal\"] = [x, y, 0]\n\n while not self.__is_new_path_available(d[\"goal\"], d[\"start\"]):\n self.__spread_done()\n self.__stop_robot()\n d = {}\n d[\"start\"] = self.__set_random_robot_pos()\n\n # Finding valid position on map in small radius\n valid = False\n count = 0\n while not valid:\n x = d[\"start\"][0] + random.uniform(3, math.floor(count / 10) + 5) * random.choice([-1, 1])\n y = d[\"start\"][1] + random.uniform(3, math.floor(count / 10) + 5) * random.choice([-1, 1])\n valid = self.__is_pos_valid(x, y, self.__map)\n count += 1\n self.__publish_goal(x, y, 0)\n d[\"goal\"] = [x, y, 0]\n\n\n\n self.__spawn_random_peds_on_path()\n\n d[\"peds\"] = self.__peds\n d[\"path\"] = self.__path\n self.__spread_new_task()\n return d",
"def test_uses_standard_board_if_not_added_lazy(self):\n lp = cs.LazyPlayer()\n lp.move()\n assert lp.position != 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles trailing prices based on the current price.
|
def handle_trailing_prices(self):
if self.longTrailingPrice is not None and self.currentPrice > self.longTrailingPrice:
self.longTrailingPrice = self.currentPrice
if self.shortTrailingPrice is not None and self.currentPrice < self.shortTrailingPrice:
self.shortTrailingPrice = self.currentPrice
|
[
"def _adjust_price(self):\n\n # Go through each topping and add the money amount for topping\n topping_additional_money = 0\n for topping in self._toppings:\n topping_additional_money += topping.getPrice()\n\n self._price = self._base_price + topping_additional_money",
"def final_price(self):\n return self.price - self.price * self.discount",
"def inverse_price(self) -> \"Decimal\":\n quantization = Decimal(10) ** -self.pair.quote.precision\n return Decimal(self.price ** Decimal(-1)).quantize(quantization)",
"def update_price(origin_price: float, price: float):\n return (get_current_price() / origin_price) * price",
"def unpad_price(self, price: str, base_id: int, quote_id: int) -> Decimal:\n return Decimal(price) * Decimal(f\"1e{self._digits[base_id] - self._digits[quote_id]}\")",
"def _parse_price_original(self, response, add_xpath=None):\n xpathes = '//*[@id=\"price\"]/.//*[contains(@class, \"a-text-strike\")]' \\\n '/text()'\n\n if add_xpath:\n xpathes += ' |' + add_xpath\n\n price_original = self._is_empty(\n response.xpath(xpathes).extract()\n )\n\n if price_original:\n price_original = self._is_empty(\n re.findall(\n FLOATING_POINT_RGEX,\n price_original\n ), 0.00\n )\n try:\n price_original = float(price_original)\n except ValueError:\n price_original = None\n\n return price_original",
"def proper_price(self, price):\r\n str_price = str(price)\r\n if ',' in str_price:\r\n str_price.replace(',', '.')\r\n if '.' not in str_price:\r\n currency_str_price = f'{str_price}.00'\r\n elif str_price[-2] == '.':\r\n currency_str_price = f'{str_price}0'\r\n else:\r\n currency_str_price = str_price\r\n return currency_str_price",
"def onPriceChange(priceTable, price, priceFix, operation):\n # filter operation\n if operation == 'delete':\n Logme()('Ignoring deleted price updates.', 'DEBUG')\n return\n try:\n market = acm.FParty[price.ptynbr.ptyid]\n # filter running without market param\n if not priceFix.market:\n Logme()('Ignoring price update, no market specified.', 'DEBUG')\n return\n elif priceFix.market.Oid() != market.Oid():\n Logme()('Ignoring price update, market \"%s\" does not match.' %\n (market.Name()), 'DEBUG')\n return\n instrument = acm.FInstrument[price.insaddr.insid]\n currency = acm.FInstrument[price.curr.insid]\n currencyPair = acm.FCurrencyPair.Select01(\n \"currency1='%s' and currency2='%s'\" % (instrument.Oid(),\n currency.Oid()), 'Currency pair not found.')\n if ((currencyPair not in priceFix.currencyPairs) and\n (instrument not in priceFix.instruments)):\n Logme()('Ignoring price update, instrument \"%s\" does not match.' %\n (instrument.Name()), 'DEBUG')\n return\n # Determine a good last price using last, or mid, or bid or ask.\n last = price.last\n if last == 0.0:\n if price.bid == 0.0:\n last = price.ask\n elif price.ask == 0.0:\n last = price.bid\n else:\n last = (price.bid + price.ask) / 2.0 # Both m/b nonzero.\n Logme()('Real-time price: \\n\\t %s' % (formatAelPrice(price)))\n priceFix.fixPrice(priceFix.fixingSource, instrument, currency,\n priceFix.universalCutoffTime, last)\n except:\n traceback.print_exc()\n priceFix.cleanup()\n raise",
"def calculate_prorated_price(self, line):\n start_date = date_helper.convert_to_date(line.subscription_start_date)\n end_date = date_helper.convert_to_date(line.subscription_end_date)\n \n #First case -> same month\n if start_date.month == end_date.month:\n last_day = date_helper.get_last_day_month(end_date)\n\n #Normal case : 1 to end of month\n if start_date.day == 1 :\n if end_date.day == last_day.day:\n return line.price_unit\n #TODO : pay less if cancelled < 1 month ?\n else:\n return line.price_unit\n else:\n #We should never be there\n return line.price_unit\n\n #Second case -> more than 1 month\n else:\n difference = (end_date - start_date).days\n #If its more than 1 month of difference, we modify the price\n if difference > 31:\n pro_rated_days = difference - 31\n pro_rated_price = line.price_unit / 31\n total = line.price_unit + round(pro_rated_price * pro_rated_days)\n return total\n else:\n return line.price_unit\n\n return line.price_unit",
"def add_prices(self):\n for i in range(self.parameters[\"number_of_products\"]):\n self.product_space.nodes[i][\"price\"] = \\\n self.product_space.nodes[i][\"delta\"] / max(\n self.product_space.nodes[i][\"firms\"], 1)",
"def calculate_final_price(self):\n final_price = self.price\n if self.discount.specify_discount_status() == 'Active':\n if self.discount.type == '$':\n final_price = self.price - self.discount.value\n elif self.discount.type == '%':\n if self.discount.value != 0:\n final_price = self.price - ((self.discount.value / 100) * self.price)\n else:\n pass\n return int(final_price)",
"def get_final_price(price, discount_percentage=10):\n return price-( price* discount_percentage / 100)",
"def set_next_price(bundle_item):\r\n prev_price = bundle_item",
"def sellPrice(self):\n return self.initial_btcprice * (1 + FEE + self.strategy)",
"def price_difference(self):\n return self.price - self.lineitem_price",
"def get_adjusted_prices(price):\r\n adj_sell_price = price * (1-slippage)\r\n adj_buy_price = price * (1+slippage)\r\n return adj_sell_price,adj_buy_price",
"def _value_discount(base_price):\n if base_price <= 1000.0:\n return .03\n elif base_price < 3000.0:\n return .05\n elif base_price < 10000.0:\n return .07\n elif base_price < 50000.0:\n return .1\n else:\n return .15",
"def apply_price_rule(price_region_item: PriceRegionItem):\n inventory_item = price_region_item.inventory_item\n price_rule = price_region_item.price_rule\n\n # Calculate and apply new prices for PriceRegionItem and record the\n # SellPriceChange.\n price_change = SellPriceChange(price_region_item)\n for level in range(PriceRegionItem.PRICE_LEVELS):\n # Get the price basis and multiplication factor for the given price\n # level in the PriceRule.\n basis = price_rule.price_basis(level)\n factor = price_rule.price_factor(level)\n\n # Select the base price for the price calculation depending on the\n # PriceBasis used in the price rule.\n base_price = None\n if basis == PriceBasis.REPLACEMENT_COST:\n base_price = inventory_item.replacement_cost\n elif basis == PriceBasis.RRP_EXCL_TAX:\n base_price = price_region_item.rrp_excl_tax\n elif basis == PriceBasis.RRP_INCL_TAX:\n base_price = price_region_item.rrp_incl_tax\n elif basis == PriceBasis.EXISTING_PRICE_0:\n base_price = price_region_item.price_0\n elif basis == PriceBasis.EXISTING_PRICE_1:\n base_price = price_region_item.price_1\n elif basis == PriceBasis.EXISTING_PRICE_2:\n base_price = price_region_item.price_2\n elif basis == PriceBasis.EXISTING_PRICE_3:\n base_price = price_region_item.price_3\n elif basis == PriceBasis.EXISTING_PRICE_4:\n base_price = price_region_item.price_4\n\n # Throw an exception if the base price doesn't exist.\n if base_price is None:\n raise Exception(\n f\"No base price for {inventory_item}, {price_region_item}\")\n\n # Calculate the rounded price.\n price_now = round_price(\n dec(base_price) * dec(factor),\n tax_exempt=(price_region_item.tax_code == TaxCode.EXEMPT))\n\n # Fetch the old price before applying the new price.\n price_was = price_region_item.price(level)\n price_region_item.set_price(level, price_now)\n\n # Record the price difference in the SellPriceChange.\n price_diff = price_now - price_was\n price_change.price_diffs.append(price_diff)\n\n # Return the price change if the price has actually changed, otherwise\n # discard it.\n if price_change.price_differs:\n return price_change\n return None",
"def update_prices(self):\n symbols = [a.get(\"symbol\") for a in self.portfolio]\n prices = self.get_last_prices(symbols)\n for asset in self.portfolio:\n asset[\"last_price\"] = prices.get(asset[\"symbol\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns stop loss for short position.
|
def _get_short_stop_loss(self) -> Union[float, None]:
if self.lossStrategy == TRAILING:
return self.shortTrailingPrice * (1 + self.lossPercentageDecimal)
elif self.lossStrategy == STOP:
return self.sellShortPrice * (1 + self.lossPercentageDecimal)
elif self.lossStrategy is None:
return None
else:
raise ValueError("Invalid type of loss strategy provided.")
|
[
"def get_stop_loss(self) -> Union[float, None]:\n self.handle_trailing_prices()\n if self.inShortPosition:\n self.previousStopLoss = self._get_short_stop_loss()\n return self.previousStopLoss\n elif self.inLongPosition:\n self.previousStopLoss = self._get_long_stop_loss()\n return self.previousStopLoss\n else:\n return None",
"def _get_long_stop_loss(self) -> Union[float, None]:\n if self.lossStrategy == TRAILING:\n return self.longTrailingPrice * (1 - self.lossPercentageDecimal)\n elif self.lossStrategy == STOP:\n return self.buyLongPrice * (1 - self.lossPercentageDecimal)\n elif self.lossStrategy is None:\n return None\n else:\n raise ValueError(\"Invalid type of loss strategy provided.\")",
"def stop(self):\n return _raw_util.raw_divide_ff_sptr_stop(self)",
"def train_loss(self):\n return self._train_loss",
"def DownstreamStepFrameLossUnit(self):\r\n\t\treturn self._get_attribute('downstreamStepFrameLossUnit')",
"def DownstreamStopTestOnHighLoss(self):\r\n\t\treturn self._get_attribute('downstreamStopTestOnHighLoss')",
"def stop(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_stop(self)",
"def stop(self):\n return _raw_util.raw_regenerate_peak2_sptr_stop(self)",
"def stop(self):\n return _raw_util.raw_regenerate_peak3_sptr_stop(self)",
"def get_stopwn(self):\r\n command = \":scan:stop?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(13)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = float(answer[:-6])\r\n self.Stat = self.Stat._replace(stopwn=rlvalue)\r\n return rlvalue",
"def reset_smart_stop_loss(self):\n self.stopLossCounter = self.initialStopLossCounter",
"def UpstreamStopTestOnHighLoss(self):\r\n\t\treturn self._get_attribute('upstreamStopTestOnHighLoss')",
"def loss(min_offer, predicted):\n return MAX_GAIN-min_offer if predicted < min_offer else predicted - min_offer",
"def get_last_train_loss(self) -> float:\n loss = self.training_loss\n self.training_loss = 0\n return loss",
"def get_stop_probability(self) -> float:",
"def stop(self):\n return _wavelet_swig.squash_ff_sptr_stop(self)",
"def stop(self):\n return len(self.index) if self.original else self._stop",
"def get_stop_wavelength(self): #documented\n return self.__query_float(\"STO?\")",
"def DownstreamBinaryFrameLossUnit(self):\r\n\t\treturn self._get_attribute('downstreamBinaryFrameLossUnit')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns stop loss for long position.
|
def _get_long_stop_loss(self) -> Union[float, None]:
if self.lossStrategy == TRAILING:
return self.longTrailingPrice * (1 - self.lossPercentageDecimal)
elif self.lossStrategy == STOP:
return self.buyLongPrice * (1 - self.lossPercentageDecimal)
elif self.lossStrategy is None:
return None
else:
raise ValueError("Invalid type of loss strategy provided.")
|
[
"def get_stop_loss(self) -> Union[float, None]:\n self.handle_trailing_prices()\n if self.inShortPosition:\n self.previousStopLoss = self._get_short_stop_loss()\n return self.previousStopLoss\n elif self.inLongPosition:\n self.previousStopLoss = self._get_long_stop_loss()\n return self.previousStopLoss\n else:\n return None",
"def _get_short_stop_loss(self) -> Union[float, None]:\n if self.lossStrategy == TRAILING:\n return self.shortTrailingPrice * (1 + self.lossPercentageDecimal)\n elif self.lossStrategy == STOP:\n return self.sellShortPrice * (1 + self.lossPercentageDecimal)\n elif self.lossStrategy is None:\n return None\n else:\n raise ValueError(\"Invalid type of loss strategy provided.\")",
"def DownstreamStepFrameLossUnit(self):\r\n\t\treturn self._get_attribute('downstreamStepFrameLossUnit')",
"def stop(self):\n return _raw_util.raw_divide_ff_sptr_stop(self)",
"def get_stopwn(self):\r\n command = \":scan:stop?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(13)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = float(answer[:-6])\r\n self.Stat = self.Stat._replace(stopwn=rlvalue)\r\n return rlvalue",
"def train_loss(self):\n return self._train_loss",
"def op_execute_position_stop(self, end):\n\n op_positions = copy.deepcopy(self.trader.op_positions)\n for _, positions in op_positions.items():\n for _, pos in positions.items():\n\n start = pos['op_open_time'] + timedelta(seconds=1)\n ohlcv = self.trader.ohlcvs[self.ex][pos['market']][self.trader.config['indicator_tf']][start:end]\n\n if len(ohlcv) > 0:\n\n if pos['stop_loss']:\n stop_loss = ()\n target_low = pos['op_open_price'] * (1 - pos['stop_loss']) # for buy\n target_high = pos['op_open_price'] * (1 + pos['stop_loss']) # for sell\n\n # Check buy stop loss\n if pos['side'] == 'buy' \\\n and ohlcv.low.min() <= target_low:\n bar = ohlcv[ohlcv.low <= target_low].iloc[0]\n stop_loss = (bar.name, target_low)\n\n # Check sell stop loss\n elif pos['side'] == 'sell' \\\n and ohlcv.high.max() >= target_high:\n bar = ohlcv[ohlcv.high >= target_high].iloc[0]\n stop_loss = (bar.name, target_high)\n\n if stop_loss:\n pos['op_close_time'] = stop_loss[0]\n pos['op_close_price'] = stop_loss[1]\n self.append_op(self.trader.op_close_position(pos, pos['op_close_time']))\n\n if self._config['mode'] == 'debug':\n logger.debug(f\"Stop {pos['side']} loss @ {pos['op_close_price']:.3f} ({pos['op_close_time']})\")\n\n continue\n\n if pos['stop_profit']: # If stop_loss is not applied, check stop profit\n stop_profit = ()\n\n if pos['side'] == 'buy':\n diff_low = pos['op_open_price'] * pos['stop_profit']\n\n for dt, oh in ohlcv.iterrows():\n cur_high = ohlcv[:dt + timedelta(seconds=1)].high.max()\n target_low = cur_high - diff_low\n\n if target_low > pos['op_open_price'] and oh.low < target_low:\n # if stop_profit is not set\n # if the close datetime (dt) is earlier, use that dt\n if not stop_profit \\\n or (stop_profit and stop_profit[0] > dt):\n stop_profit = (dt, target_low)\n\n elif pos['side'] == 'sell':\n diff_high = pos['op_open_price'] * pos['stop_profit']\n\n for dt, oh in ohlcv.iterrows():\n cur_low = ohlcv[:dt + timedelta(seconds=1)].low.min()\n target_high = cur_low + diff_high\n\n if target_high < pos['op_open_price'] and oh.high > target_high:\n # if stop_profit is not set\n # if the close datetime (dt) is earlier, use that dt\n if not stop_profit \\\n or (stop_profit and stop_profit[0] > dt):\n stop_profit = (dt, target_high)\n\n if stop_profit:\n pos['op_close_time'] = stop_profit[0]\n pos['op_close_price'] = stop_profit[1]\n self.append_op(self.trader.op_close_position(pos, pos['op_close_time']))\n\n if self._config['mode'] == 'debug':\n logger.debug(f\"Stop {pos['side']} profit @ {pos['op_close_price']:.3f} ({pos['op_close_time']})\")",
"def get_last_train_loss(self) -> float:\n loss = self.training_loss\n self.training_loss = 0\n return loss",
"def DownstreamStopTestOnHighLoss(self):\r\n\t\treturn self._get_attribute('downstreamStopTestOnHighLoss')",
"def stop(self):\n return _raw_util.raw_regenerate_peak2_sptr_stop(self)",
"def stop(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_stop(self)",
"def stop(self) -> SourceLocation:\n return self._stop",
"def stop(self):\n return len(self.index) if self.original else self._stop",
"def stop(self):\n return _raw_util.raw_peak_detector_fb_sptr_stop(self)",
"def reset_smart_stop_loss(self):\n self.stopLossCounter = self.initialStopLossCounter",
"def DownstreamBinaryFrameLossUnit(self):\r\n\t\treturn self._get_attribute('downstreamBinaryFrameLossUnit')",
"def stop(self):\n return _raw_util.raw_regenerate_peak3_sptr_stop(self)",
"def stop(self):\n return _frame_detection_swig.deinterleaver_bb_sptr_stop(self)",
"def get_stop_probability(self) -> float:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns stop loss value.
|
def get_stop_loss(self) -> Union[float, None]:
self.handle_trailing_prices()
if self.inShortPosition:
self.previousStopLoss = self._get_short_stop_loss()
return self.previousStopLoss
elif self.inLongPosition:
self.previousStopLoss = self._get_long_stop_loss()
return self.previousStopLoss
else:
return None
|
[
"def train_loss(self):\n return self._train_loss",
"def get_stop_probability(self) -> float:",
"def get_last_train_loss(self) -> float:\n loss = self.training_loss\n self.training_loss = 0\n return loss",
"def _get_long_stop_loss(self) -> Union[float, None]:\n if self.lossStrategy == TRAILING:\n return self.longTrailingPrice * (1 - self.lossPercentageDecimal)\n elif self.lossStrategy == STOP:\n return self.buyLongPrice * (1 - self.lossPercentageDecimal)\n elif self.lossStrategy is None:\n return None\n else:\n raise ValueError(\"Invalid type of loss strategy provided.\")",
"def stop_prob(self) -> np.ndarray:\n return self._stop_prob",
"def stop(self):\n return _raw_util.raw_divide_ff_sptr_stop(self)",
"def get_stopwn(self):\r\n command = \":scan:stop?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(13)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = float(answer[:-6])\r\n self.Stat = self.Stat._replace(stopwn=rlvalue)\r\n return rlvalue",
"def validation_loss(self):\n return self._validation_loss",
"def _get_short_stop_loss(self) -> Union[float, None]:\n if self.lossStrategy == TRAILING:\n return self.shortTrailingPrice * (1 + self.lossPercentageDecimal)\n elif self.lossStrategy == STOP:\n return self.sellShortPrice * (1 + self.lossPercentageDecimal)\n elif self.lossStrategy is None:\n return None\n else:\n raise ValueError(\"Invalid type of loss strategy provided.\")",
"def DownstreamStepFrameLossUnit(self):\r\n\t\treturn self._get_attribute('downstreamStepFrameLossUnit')",
"def loss(self, result, config=None):\r\n return result.get('loss', None)",
"def loss(self):\n return np.mean(self.scores['loss'])",
"def stop(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_stop(self)",
"def loss(self, prediction: Prediction):\n return - (1e8 * (optimality(prediction.conditions())) * self.failure(prediction))",
"def loss(self) -> float:\n\n rews = self.transform_rewards()\n value_loss = self.value_loss(rews)\n return torch.cat(value_loss).sum()",
"def stopping_op(self):\n with tf.GradientTape() as g:\n dz_dv = g.gradient(self.loss(), self.log_pitches)\n norms = tf.nn.l2_loss(dz_dv)\n return norms >= self.convergence_threshold",
"def reset_smart_stop_loss(self):\n self.stopLossCounter = self.initialStopLossCounter",
"def best_loss(self):\n return self._best_loss",
"def stop(self):\n return _wavelet_swig.wvps_ff_sptr_stop(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns net balance with current price of coin being traded. It factors in the current balance, the amount shorted, and the amount owned.
|
def get_net(self) -> float:
return self.coin * self.currentPrice - self.coinOwed * self.currentPrice + self.balance
|
[
"def getClientBalance(self, client, bot_config):\n currency = str(bot_config['currency'])\n pair = currency[len(currency)-4:len(currency)]\n if(pair == 'USDT'):\n balance = client.get_asset_balance(asset='USDT')\n else:\n balance = client.get_asset_balance(asset='BTC')\n return balance['free']",
"def balance(self):\n return Amount(self._balance, \"usd\")",
"def net_debt(self):\n return (self.short_term_debt + self.long_term_debt\n + self.other_lt_liabilities * self.other_lt_liability_debt_multiplier # noqa: W503\n - self.cash - self.short_term_investments) # noqa: W503",
"def get_coin_balance(self, coin):\r\n totals = self.get_all_balances()\r\n if coin in totals.keys():\r\n if self.debug == 1:\r\n print coin\r\n\r\n return float(totals[coin])\r\n else:\r\n return 'Bad Coin'",
"def net_total_money(self):\n return self._net_total_money",
"def get_balance(self):\n current_balance = 0\n\n for item in self.ledger:\n current_balance += item[\"amount\"]\n\n return current_balance",
"def net(income, tax): # Net income is the gross income minus the total tax\n\t\n\tnet = income - tax\n\treturn net",
"async def networth(self, ctx):\n worths = {}\n for stock, owner, quantity in self.db.get_all_holdings():\n if owner not in worths:\n worths[owner] = self.db.get_balance(owner)\n worths[owner] += quantity * self.stock_value(self.db.get_rating(stock))\n top = [sorted(worths.values(), reverse=True), sorted(worths, key=worths.get, reverse=True)]\n title = \"Net Worth Leaderboards\"\n headers = [\"#\", \"User\", \"Worth\"]\n count = 0\n data = []\n for i in range(len(top[0])):\n worth, owner = top[0][i], top[1][i]\n count += 1\n member = utils.get(ctx.guild.members, id=int(owner))\n data.append([str(count), member.name if member else self.db.get_handle(int(owner)), \"$%.2f\" % worth])\n await paginator.Paginator(data, headers, title).paginate(ctx, self.client)",
"def get_worth(self, current_price):\n\t\treturn self.cash + current_price * self.asset",
"def getBalance(self):\n\n balance = 0\n for item in self.ledger:\n balance += item[\"amount\"]\n\n return balance",
"def get_usdt_balance(client):\n return float(client.get_asset_balance(asset='USDT')[\"free\"])",
"def get_buy_amount(self):\r\n return self.balance / 3",
"def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand",
"def get_net_to_client_amount(self):\n net_to_client_mask = self._fuzzy_match_series(self.closeout_df['item'], 'net to client', errors=3)\n net_to_client_amount = self.closeout_df[net_to_client_mask]['amount'].values[0]\n return net_to_client_amount",
"def getbalance(url):\n return Channel.get(url).our.nValue",
"def prepay_balance(self):\n self.response = requests.get(self.path(\"prepay_balance\"), params={\"api_key\": self.api_key})\n return self.response.json().get(\"balance\")",
"def prepaid_balance(self):\n return self._prepaid_balance",
"def get_balance(self):\n if self.available:\n return self.total_amount\n else:\n raise ValueError('This bank account is closed')",
"def deposit(self, amount):\n bal = self.get_balance()\n bal += amount\n return bal"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns trend based on the strategies provided.
|
def get_trend(self) -> Union[int, None]:
trends = [strategy.trend for strategy in self.strategies.values()]
if len(trends) == 0:
return None
if all(trend == BEARISH for trend in trends):
return BEARISH
elif all(trend == BULLISH for trend in trends):
return BULLISH
else:
return None
|
[
"def _calculate_trends(\n history: list[OrderedDict], measurements_to_use: int\n) -> dict[str, Any]:\n if measurements_to_use == -1:\n index_range = np.arange(0, len(history))\n else:\n index_range = np.arange(0, measurements_to_use)\n\n measured_attributes = set().union(*(d.keys() for d in history))\n metrics_to_trend = measured_attributes.intersection(list(METRICS_TO_TREND))\n\n trends = {}\n for attribute in metrics_to_trend:\n values = [\n float(value)\n for measurement in history\n for attr, value in measurement.items()\n if attr == attribute\n ]\n\n if measurements_to_use != -1:\n values = values[-measurements_to_use:]\n\n index_array = np.array(values)\n linear_fit = np.polyfit(\n index_range,\n index_array,\n 1,\n )\n slope = round(linear_fit[0], 2)\n\n metric = _get_normalized_metric_name(attribute)\n\n if slope > 0:\n trends[metric] = TREND_INCREASING\n elif slope < 0:\n trends[metric] = TREND_DECREASING\n else:\n trends[metric] = TREND_FLAT\n\n return trends",
"def generate_trend(trend_params: configparser.ConfigParser) -> Tuple[np.ndarray, np.ndarray]:\n function = trend_params[FUNC]\n data_points = int(trend_params[DATA_PTS])\n x = np.linspace(0, 1, data_points)\n y = eval(function)\n return x, np.array(y)",
"def to_trend(avg):\n past = self._past_temp[0]\n directions = list()\n for temp in self._past_temp:\n directions.append(cmp(past, temp))\n # Perturb if stable weather.\n if sum(abs(t) for t in directions) == 0:\n directions = (-1, 1)\n return front(past, random.choice(directions))",
"def get_strategies(reward_estimates):\n strategies = [\n EpsilonExplorer(0),\n EpsilonExplorer(0.01),\n EpsilonExplorer(0.1),\n DescendingEpsilonExplorer(0.1, 0.999),\n BoltzmannExplorer(1),\n BoltzmannExplorer(0.5),\n BoltzmannExplorer(2),\n ] \n for strat in strategies:\n strat.initialize(reward_estimates)\n return strategies",
"def get_trending_tracks(args, strategy):\n db = get_db_read_replica()\n with db.scoped_session() as session:\n current_user_id, genre, time = (\n args.get(\"current_user_id\"),\n args.get(\"genre\"),\n args.get(\"time\", \"week\"),\n )\n time_range = \"week\" if time not in [\"week\", \"month\", \"year\"] else time\n key = make_trending_cache_key(time_range, genre, strategy.version)\n\n # Will try to hit cached trending from task, falling back\n # to generating it here if necessary and storing it with no TTL\n (tracks, track_ids) = use_redis_cache(\n key,\n None,\n make_generate_unpopulated_trending(session, genre, time_range, strategy),\n )\n\n # populate track metadata\n tracks = populate_track_metadata(session, track_ids, tracks, current_user_id)\n tracks_map = {track[\"track_id\"]: track for track in tracks}\n\n # Re-sort the populated tracks b/c it loses sort order in sql query\n sorted_tracks = [tracks_map[track_id] for track_id in track_ids]\n\n if args.get(\"with_users\", False):\n user_id_list = get_users_ids(sorted_tracks)\n users = get_users_by_id(session, user_id_list, current_user_id)\n for track in sorted_tracks:\n user = users[track[\"owner_id\"]]\n if user:\n track[\"user\"] = user\n return sorted_tracks",
"def fit_trend_and_level(self, df):\n # extract the timeseries and begin forming the decomposition data frame\n decomposition_df = df.copy()\n\n # establish the \"grain\" (which cycle we're in) and the \"cycle\" (which point in the seasonal cycle)\n if self.seasonality == 'weekly':\n decomposition_df['GRAIN'] = decomposition_df.index//7\n decomposition_df['ROLLING_GRAIN_MEAN'] = decomposition_df[self.endog].rolling(7, min_periods=0).mean().tolist()\n decomposition_df['CYCLE'] = decomposition_df[self.date_header].dt.weekday\n else:\n print(\"Seasonal profile not set to 'weekly', unable to fit seasona profiling\")\n\n # extract the training timeseries specifically\n training_data = decomposition_df['ROLLING_GRAIN_MEAN']\n\n # set initial level and trend\n level = self.initial_level\n trend = self.initial_trend\n projected = [self.initial_level]\n\n # apply double exponential smoothing to decompose level and trend\n for ind in range(1, len(training_data)):\n # predict time step\n projection = level+trend\n # update level\n level_new = (1-self.level_smoothing)*(training_data[ind])+self.level_smoothing*(level+trend)\n # update trend\n trend_new = (1-self.trend_smoothing)*trend+self.trend_smoothing*(level_new-level)\n # append to projected\n projected.append(projection)\n\n # set to re-iterate\n trend = trend_new\n level = level_new\n\n # apply fit to the fit_df\n decomposition_df['LEVEL_TREND_DECOMPOSITION'] = projected\n\n # get the observed seasonality using the filtered values\n decomposition_df['OBSERVED_SEASONALITY'] = decomposition_df[self.endog]/decomposition_df['LEVEL_TREND_DECOMPOSITION']\n\n return decomposition_df, trend, level",
"def expected_results():\n return [\n {\n 'strategy': BuffedCoinStrategy,\n 'values': [\n 1318.21, 1250.13, 1318.79, 1355.47, 1560.75, 1694.85, 1918.27,\n 1866.54, 1888.66, 2039.06, 1967.42, 2184.11, 2326.3, 2461.91,\n 2589.18, 2544.36, 2420.49, 2778.22, 2958.32, 3313.64, 3686.43,\n 3704.98, 4091.39, 4395.39, 4085.4, 4770.42, 3487.72, 3384.36,\n 3546.08, 3664.02, 3820.51, 3976.37\n ],\n },\n {\n 'strategy': BuyHoldStrategy,\n 'values': [\n 1318.21, 1250.13, 1318.79, 1355.47, 1560.75, 1706.55, 1953.71,\n 2004.34, 1936.11, 2145.46, 1971.15, 2230.17, 2384.13, 2429.57,\n 2455.09, 2397.81, 2403.63, 2797.57, 2929.94, 3300.03, 3823.09,\n 3898.91, 4190.82, 4435.93, 3901.56, 4713.82, 3341.65, 3222.06,\n 3393.65, 3539.53, 3789.87, 3801.63,\n ],\n },\n {\n 'strategy': PeakRiderStrategy,\n 'values': [\n 1318.21, 1250.13, 1318.79, 1355.47, 1560.75, 1706.55, 1920.65,\n 1889.18, 1906.54, 2071.08, 1947.65, 2156.81, 2296.88, 2381.47,\n 2439.71, 2317.35, 2315.89, 2593.93, 2707.41, 2988.51, 3172.41,\n 3208.15, 3549.13, 3715.67, 3672.46, 4213.29, 3301.56, 3016.65,\n 3196.71, 3241.07, 3325.59, 3354.02,\n ],\n },\n ]",
"def findRates_synottip(odds):\r\n home = 0\r\n away = 0\r\n draw = 0\r\n odds1 = odds[0].text\r\n odds1 = float(odds1.replace(\",\", \".\"))\r\n odds2 = odds[1].text\r\n odds2 = float(odds2.replace(\",\", \".\"))\r\n if len(odds) == 2:\r\n home = odds1\r\n away = odds2\r\n if len(odds) == 3:\r\n home = odds1\r\n draw = odds2\r\n odds3 = odds[2].text\r\n odds3 = float(odds3.replace(\",\", \".\"))\r\n away = odds3\r\n return home, draw, away",
"def objective(weight_strategies):\r\n regularization = np.mean([(sum(weight_strategy)-1)**2 for weight_strategy in weight_strategies])\r\n weight_strategies = np.array(weight_strategies).T\r\n weight_strategies = dict(zip(portfolio.get_strategy_ids(), weight_strategies))\r\n weights = portfolio.get_weight_regime_format()\r\n for strategy_id in portfolio.strategies:\r\n weights[strategy_id] = regime.apply(lambda x: weight_strategies[strategy_id][x])\r\n portfolio.weights(weights)\r\n portfolio.fit()\r\n monthly_return = portfolio.extract_monthly_return().values\r\n return portfolio.sharpe_ratio() - regularization,",
"def get_strategy(name, stock_ratio=0.7):\n\n\tif name==\"Bogleheads\":\n\t\tstrat = {\n\t\t'VTI' : stock_ratio*.35,\n\t\t'SCHB' : stock_ratio*.35, \n\t\t'VXUS' : stock_ratio*.3,\n\t\t'BND' : (1-stock_ratio)*1.0,\n\t\t\t\t }\n\telif name==\"Betterment2016\":\n\t\tstrat = {\n\t\t'VTI' : stock_ratio*0.087,\n\t\t'SCHB' : stock_ratio*0.087, \n\t\t'VTV' : stock_ratio*0.178,\n\t\t'VOE' : stock_ratio*0.05,\n\t\t'VBR' : stock_ratio*0.05,\n\t\t'VEA' : stock_ratio*0.1025,\n\t\t'SCHF' : stock_ratio*0.1025,\n\t\t'VWO' : stock_ratio*0.03275,\n\t\t'IEMG' : stock_ratio*0.03275,\n\t\t'VXUS' : stock_ratio*0.2705,\n\t\t'BND' : (1-stock_ratio)*1.0,\n\t\t\t\t }\n\telif name==\"Betterment2018\":\n\t\tstrat = {\n\t\t'VTI' : stock_ratio*0.1765,\n\t\t'SCHB' : stock_ratio*0.1765, \n\t\t'VTV' : stock_ratio*0.094,\n\t\t'VOE' : stock_ratio*0.077,\n\t\t'VBR' : stock_ratio*0.065,\n\t\t'VEA' : stock_ratio*0.1295,\n\t\t'VWO' : stock_ratio*0.076,\n\t\t'VXUS' : stock_ratio*0.205,\n\t\t'BND' : (1-stock_ratio)*1.0,\n\t\t\t\t }\n\telse:\n\t\tstrat = dict()\n\t\twarnings.warn(\"No matching strategy found.\")\n\n\t## Normalize to exactly one\n\tif np.isclose(sum(list(strat.values())), 1, .01):\n\t\tnorm_val = 1.0/sum(strat.values())\n\t\tstrat = {key: val*norm_val for key, val in strat.items() }\n\n\treturn strat",
"def ts_trendline(ax,df,obsvar,modvar,start_date,end_date,sepvar='',sepvals=([]),lname='',sepunits='',\n ocols=('blue','darkviolet','teal','green','deepskyblue'),\n mcols=('fuchsia','firebrick','orange','darkgoldenrod','maroon'),labels='',error=False):\n #cleaning data and defining sub-functions\n def sin_model(t,A,B,C,D,E,F,G):\n x=(A*np.sin((2*np.pi*t)/365)+B*np.cos((2*np.pi*t)/365)+C*np.sin((4*np.pi*t)/365)+D*np.cos((4*np.pi*t)/365)+E*np.sin((6*np.pi*t)/365)+A*np.cos((6*np.pi*t)/365)+G)\n return x\n if len(lname)==0:\n lname=sepvar\n ps=list()\n df=df.dropna(axis=0,subset=[obsvar,modvar,'dtUTC'])\n #creating trendlines without separating values\n if len(sepvals)==0:\n obs0=et._deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[obsvar]])\n mod0=et._deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[modvar]])\n time0=et._deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['dtUTC']])\n yd0=et._deframe(df.loc[(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['YD']])\n if len(yd0) < 7:\n print('The number of data points is less than the number of variables in the model. no trendline will be plotted')\n # creating seperate trendlines for the observed and model variables\n else:\n if error == False:\n opar, opar_cov = opt.curve_fit(sin_model, yd0, obs0)\n mpar, mpar_cov = opt.curve_fit(sin_model, yd0, mod0)\n p0,=ax.plot(time0, sin_model(yd0, *opar), color=ocols[0], label=f'Observed {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n p0,=ax.plot(time0, sin_model(yd0, *mpar), color=mcols[0], label=f'Modeled {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n # creating a trendline of the error in the\n elif error == True:\n epar, epar_cov = opt.curve_fit(sin_model, yd0, mod0-obs0)\n p0,=ax.plot(time0, sin_model(yd0, *epar), color=ocols[0], label=f'Error {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n else:\n obs0=et._deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[obsvar]])\n mod0=et._deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[modvar]])\n time0=et._deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['dtUTC']])\n yd0=et._deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),['YD']])\n sep0=et._deframe(df.loc[(df[obsvar]==df[obsvar])&(df[modvar]==df[modvar])&(df[sepvar]==df[sepvar])&(df['dtUTC'] >= start_date)&(df['dtUTC']<= end_date),[sepvar]])\n sepvals=np.sort(sepvals)\n # less than min case:\n ii=0\n iii=sep0<sepvals[ii]\n if np.sum(iii)>0:\n #ll=u'{} < {} {}'.format(lname,sepvals[ii],sepunits).strip()\n if len(labels)>0:\n ll=labels[0]\n else:\n ll=u'{} $<$ {} {}'.format(lname,sepvals[ii],sepunits).strip() \n if len(yd0[iii]) < 7:\n print(\"The number of data points below min was less that the number of variables in the model. Therefore, data points below min have not been plotted \")\n else:\n if error == False:\n opar, opar_cov = opt.curve_fit(sin_model, yd0[iii], obs0[iii])\n mpar, mpar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *opar), color=ocols[ii], label=f'Observed {ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *mpar), color=mcols[ii], label=f'Modeled{ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n elif error == True:\n epar, epar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii]-obs0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *epar), color=ocols[ii], label=f'Error {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n # between min and max:\n for ii in range(1,len(sepvals)):\n iii=np.logical_and(sep0<sepvals[ii],sep0>=sepvals[ii-1])\n if np.sum(iii)>0:\n #ll=u'{} {} \\u2264 {} < {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip()\n if len(labels)>0:\n ll=labels[ii]\n else:\n ll=u'{} {} $\\leq$ {} $<$ {} {}'.format(sepvals[ii-1],sepunits,lname,sepvals[ii],sepunits).strip()\n if len(yd0[iii]) < 7:\n print(\"The number of data points inbetween min and max was less that the number of variables in the model. Therefore, data points inbetween min and max have not been plotted \") \n else:\n if error == False:\n opar, opar_cov = opt.curve_fit(sin_model, yd0[iii], obs0[iii])\n mpar, mpar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *opar), color=ocols[ii], label=f'Observed {ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *mpar), color=mcols[ii], label=f'Modeled {ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n elif error == True:\n epar, epar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii]-obs0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *epar), color=ocols[ii], label=f'Error {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n # greater than max:\n iii=sep0>=sepvals[ii]\n if np.sum(iii)>0:\n #ll=u'{} \\u2265 {} {}'.format(lname,sepvals[ii],sepunits).strip()\n if len(labels)>0:\n ll=labels[ii+1]\n else:\n ll=u'{} $\\geq$ {} {}'.format(lname,sepvals[ii],sepunits).strip()\n if len(yd0[iii]) < 7:\n print(\"The number of data points above max was less that the number of variables in the model. Therefore, data points above max have not been plotted \")\n else:\n if error == False:\n opar, opar_cov = opt.curve_fit(sin_model, yd0[iii], obs0[iii])\n mpar, mpar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *opar), color=ocols[ii+1], label=f'Observed {ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *mpar), color=mcols[ii+1], label=f'Modeled {ll}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n elif error == True:\n epar, epar_cov = opt.curve_fit(sin_model, yd0[iii], mod0[iii]-obs0[iii])\n p0,=ax.plot(time0[iii], sin_model(yd0[iii], *epar), color=ocols[ii+1], label=f'Error {lname}',alpha=0.7, linestyle='dashed')\n ps.append(p0)\n yearsFmt = mdates.DateFormatter('%d %b %y')\n ax.xaxis.set_major_formatter(yearsFmt)\n return ps",
"def trending(words,startYr,endYr):\n startLst=[]\n endLst=[]\n WordTrendLst=[]\n lst=[]\n for i in words:\n for j in words[i]:\n if j.year==startYr:\n if j.count>=1000:\n startLst.append(i)\n for i in startLst:\n for j in words[i]:\n if j.year==endYr:\n if j.count>=1000:\n endLst.append(i)\n for i in endLst:\n for j in words[i]:\n if j.year==startYr:\n trendValue=j.count\n if j.year==endYr:\n trendValue=j.count/trendValue\n WordTrendLst.append(WordTrend(i,trendValue))\n trendValue=0\n return(sorted(WordTrendLst,key=lambda WordTrend:WordTrend.trend))[::-1]",
"def eval_strategy_range(make_strategy, lower_bound, upper_bound):\n best_value, best_win_rate = 0, 0\n value = lower_bound\n while value <= upper_bound:\n strategy = make_strategy(value)\n win_rate = compare_strategies(strategy)\n print('Win rate against the baseline using', value, 'value:', win_rate)\n if win_rate > best_win_rate:\n best_win_rate, best_value = win_rate, value\n value += 1\n return best_value",
"def trendline(res, stab):\n\n if res.size != stab.size:\n print 'Failed in trendline.'\n sys.exit()\n\n delta = res.size*np.sum(res*res) - np.sum(res)*np.sum(res)\n intercept = (np.sum(res*res)*np.sum(stab) - np.sum(res)*np.sum(res*stab)) / delta\n slope = (res.size*np.sum(res*stab) - np.sum(res)*np.sum(stab)) / delta\n return np.array([slope, intercept])",
"def SuperTrend(df, period, multiplier, ohlc=['Open', 'High', 'Low', 'Close']):\n\n ATR(df, period, ohlc=ohlc)\n atr = 'ATR_' + str(period)\n st = 'ST_' + str(period) + '_' + str(multiplier)\n stx = 'STX_' + str(period) + '_' + str(multiplier)\n \n \"\"\"\n SuperTrend Algorithm :\n \n BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR\n BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR\n \n FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND))\n THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND)\n FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND)) \n THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND)\n \n SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN\n Current FINAL UPPERBAND\n ELSE\n IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN\n Current FINAL LOWERBAND\n ELSE\n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN\n Current FINAL LOWERBAND\n ELSE\n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN\n Current FINAL UPPERBAND\n \"\"\"\n \n # Compute basic upper and lower bands\n df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr]\n df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]\n\n # Compute final upper and lower bands\n df['final_ub'] = 0.00\n df['final_lb'] = 0.00\n for i in range(period, len(df)):\n df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df[ohlc[3]].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1]\n df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df[ohlc[3]].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1]\n \n # Set the Supertrend value\n df[st] = 0.00\n for i in range(period, len(df)):\n df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] <= df['final_ub'].iat[i] else \\\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] > df['final_ub'].iat[i] else \\\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] >= df['final_lb'].iat[i] else \\\n df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] < df['final_lb'].iat[i] else 0.00 \n \n # Mark the trend direction up/down\n df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)\n\n # Remove basic and final bands from the columns\n df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1)\n \n df.fillna(0, inplace=True)\n\n print(df[stx])\n return df",
"def trend(obj, dim=None, type='linear'):\n\tcoord = obj[dim]\n\tif _utils.is_datetime(coord.data):\n\t\t# Use the 1e-9 to scale nanoseconds to seconds (by default, xarray use\n\t\t# datetime in nanoseconds\n\t\tt = pd.to_numeric(coord) * 1e-9\n\telse:\n\t\tt = coord \t\n\tif type is 'constant':\n\t\tobj_trend = obj.mean(dim=dim)\n\t\t_, obj_trend = xr.broadcast(obj, obj_trend)\n\telif type is 'linear':\n\t\tslope, offset = linreg(obj, dim=dim)\n\t\tobj_trend = t * slope + offset\n\telif type is 'quadratic':\n\t\traise NotImplementedError\n\telse:\n\t\traise ValueError('This type of trend is not supported')\n\treturn obj_trend",
"def get_event_rates(self, t, y):\n rates = [r(t,y) for r in self.birth_rate_functions]\n rates += [r(t,y) for r in self.linear_rate_functions]\n if self.correct_for_dynamical_population_size:\n population_size = self.y0.sum()\n else:\n population_size = self.initial_population_size\n rates += [ r(t,self.y0)/population_size for r in self.quadratic_rate_functions ]\n rates = np.array(rates)\n\n return rates",
"def _generate_strategies(self):\n\n size = self._config.get(\"number_strategies\")\n return [Strategy(self._config) for _ in range(size)]",
"def make_generate_unpopulated_trending(session, genre, time_range, strategy):\n\n def wrapped():\n return generate_unpopulated_trending(session, genre, time_range, strategy)\n\n return wrapped"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints out strategies provided in configuration.
|
def print_strategies(self):
for strategyName, strategy in self.strategies.items():
print(f'\t{get_label_string(strategyName)}: {strategy.get_params()}')
|
[
"def show_strategy_menu(self):\n self.show_content(\"Please select a strategy for your opponent \"\n \"from the numbered list.\")\n self.show_content(\"\\t1 - The Rock: always plays 'rock'\")\n self.show_content(\"\\t2 - You'll never guess!\")\n self.show_content(\"\\t3 - Monkey see, monkey do: \"\n \"repeats every move you play!\")\n self.show_content(\"\\t4 - The Cylist: cycles through \"\n \"the moves in ordered manner.\")\n self.show_line(None)",
"def get_strategies() -> List[Dict[str, Any]]:\n check_module(\"lpot\")\n from lpot.strategy import STRATEGIES\n\n help_dict = load_help_lpot_params(\"strategies\")\n strategies = []\n for strategy in STRATEGIES.keys():\n help_msg = help_dict.get(f\"__help__{strategy}\", \"\")\n strategies.append({\"name\": strategy, \"help\": help_msg})\n return strategies",
"def print_config(config):\n print('#'*50)\n [print(f'# {key}: {value}') for key, value in config.items()]\n print('#'*50)",
"def print_config(self):\n # Print the general parameters\n print \"\\n[Configuration Parameters]\"\n for key, value in self.config_dict.iteritems():\n if isinstance(value, bool):\n print \" {0} : {1}\".format(key,value)\n elif isinstance(value, dict):\n print \" {0}\".format(key)\n for embedded_key, embedded_value in value.iteritems():\n print \" {0} : {1}\".format(embedded_key,embedded_value)\n elif isinstance(value, list):\n print \" {0}\".format(key)\n for embedded_value in value:\n print \" {0}\".format(embedded_value)\n # Print the supported Actions\n print \"\\n[Supported Actions]\"\n for action_name in sorted(self.supported_actions):\n print \" {0}\".format(action_name)\n # Print the supported Objects\n print \"\\n[Supported Objects]\"\n for object_type in sorted(self.supported_objects):\n supported_fields = self.supported_objects[object_type]\n print \" {0}\".format(object_type)\n required = supported_fields[\"required\"]\n mutually_exclusive_required = supported_fields[\"mutually_exclusive\"]\n optional = supported_fields[\"optional\"]\n if required:\n print \" Required Fields\"\n for field in sorted(required):\n print \" {0}\".format(field)\n if mutually_exclusive_required:\n print \" Mutually Exclusive (Required) Fields\"\n for field in sorted(mutually_exclusive_required):\n print \" {0}\".format(field)\n if optional:\n print \" Optional Fields\"\n for field in sorted(optional):\n print \" {0}\".format(field)",
"def non_test_mode_print(*args):\n if not test_mode:\n print(args)",
"def strategies(self) -> pulumi.Output[Optional[Sequence['outputs.OceanLaunchSpecStrategy']]]:\n return pulumi.get(self, \"strategies\")",
"def print_challenges(challenges_data):",
"def _print_resolvers(resolvers, host, port):\n\n print_lines = []\n mounted_endpoints = {\n \"endpoint\": \"POST\",\n \"playground\": \"GET\",\n }\n\n for name, method in mounted_endpoints.items():\n output = f\"Mounting GraphQL {name} at http://{host}:{port}/graphql [{method}]\"\n\n print_lines.append(output)\n LOG.info(output)\n\n for resolver in resolvers:\n output = f\"Resolving {resolver.object_type}.{resolver.field_name} using Lambda {resolver.function_name}\"\n\n print_lines.append(output)\n LOG.info(output)\n\n return print_lines",
"def print_configs(\n self\n ):\n\n if self._config_dict is None:\n return()\n\n logger.info(\"Interferometric Configurations\")\n for this_config in self._config_dict['interf_config'].keys():\n logger.info(\"... \"+this_config)\n this_arrays = self._config_dict['interf_config'][this_config]['array_tags']\n this_other_config = self._config_dict['interf_config'][this_config]['feather_config']\n scales_for_clean = self._config_dict['interf_config'][this_config]['clean_scales_arcsec']\n logger.info(\"... ... includes arrays \"+str(this_arrays))\n logger.info(\"... ... maps to feather config \"+str(this_other_config))\n logger.info(\"... ... clean these scales in arcsec \"+str(scales_for_clean))\n\n if 'feather_config' in self._config_dict:\n logger.info(\"Feather Configurations\")\n for this_config in self._config_dict['feather_config'].keys():\n logger.info(\"... \"+this_config)\n this_other_config = self._config_dict['feather_config'][this_config]['interf_config']\n logger.info(\"... ... maps to interferometer config \"+str(this_other_config))\n\n return()",
"def _display_config_file(self):\n print(f'\\n{ProctorConfig.config_file}:')\n with open(ProctorConfig.config_file) as f:\n print(f.read())",
"def execute(self): #This gets replaced by another vercion if another strategy is provided\r\n print(\"{} is used!!\".format(self.name))",
"def print_decision_tree(self, decision_tree):\n print(decision_tree)",
"def print_puzzle(self):\n self.initial_puzzle.print_puzzle()",
"def list_output_formats():\n format_string = \"{:<20}{}\"\n print(format_string.format(\"Format\", \"Description\"))\n print('-' * 80)\n for plugin in get_output_plugins():\n print(format_string.format(plugin.format_name, plugin.format_description))",
"def print_statistics(self):\n pass",
"def print_options(options):\n\n for key in options:\n print (key, ':', options[key])\n print ()",
"def print_results(self):\n for c in self._winners:\n print \"Elected %s\"%(c._name)",
"def view_conf() -> None:\n print(Config.get_conf())",
"def runners_print(runners_list):\n print(\"The runners are: \\n \")\n for runner in runners_list: \n print(\"- {}in box {}.\\n\".format(runner.name, runner.number))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints out backtest results.
|
def print_backtest_results(self, stdout=None):
previous_stdout = sys.stdout
if stdout is not None: # Temporarily redirects output to stdout provided.
sys.stdout = stdout
print("\nBacktest results:")
print(f'\tSymbol: {"Unknown/Imported Data" if self.symbol is None else self.symbol}')
print(f'\tElapsed: {round(self.endTime - self.startTime, 2)} seconds')
print(f'\tStart Period: {self.data[self.startDateIndex]["date_utc"]}')
print(f"\tEnd Period: {self.currentPeriod['date_utc']}")
print(f'\tStarting balance: ${round(self.startingBalance, self.precision)}')
print(f'\tNet: ${round(self.get_net(), self.precision)}')
print(f'\tCommissions paid: ${round(self.commissionsPaid, self.precision)}')
print(f'\tTrades made: {len(self.trades)}')
net = self.get_net()
difference = round(net - self.startingBalance, self.precision)
if difference > 0:
print(f'\tProfit: ${difference}')
print(f'\tProfit Percentage: {round(net / self.startingBalance * 100 - 100, 2)}%')
elif difference < 0:
print(f'\tLoss: ${-difference}')
print(f'\tLoss Percentage: {round(100 - net / self.startingBalance * 100, 2)}%')
else:
print("\tNo profit or loss incurred.")
# print(f'Balance: ${round(self.balance, 2)}')
# print(f'Coin owed: {round(self.coinOwed, 2)}')
# print(f'Coin owned: {round(self.coin, 2)}')
# print(f'Trend: {self.trend}')
sys.stdout = previous_stdout # revert stdout back to normal
|
[
"def print_results(self):\n self._write_term('\\nPassed {} of {} tests, {} failed.\\n'.format(self.passes,self.numTests,self.fails))",
"def print_test_results(self):\n for n, (name, result) in enumerate(zip(self._test_names, self._test_results)):\n print('Test {} ({}): {}'.format(n+1, name, result))\n print('{} test failure(s)'.format(self.fail_cnt))",
"def print_results(results):",
"def print_last_results(session):\n file_path = f\"perf_{session.python}_sponge_log.xml\"\n get_junitxml_results(file_path)",
"def backtest_with_runner():\n\n parser = argparse.ArgumentParser(\n description=(\n 'backtest an algorithm and publish '\n 'the trading history'))\n parser.add_argument(\n '-t',\n help=('ticker symbol'),\n required=False,\n dest='ticker')\n parser.add_argument(\n '-k',\n help=('s3_key'),\n required=False,\n dest='s3_key')\n parser.add_argument(\n '-b',\n help=('s3_bucket'),\n required=False,\n dest='s3_bucket')\n parser.add_argument(\n '-s',\n help=('start date format YYYY-MM-DD'),\n required=False,\n dest='start_date')\n parser.add_argument(\n '-c',\n help=('algo config file'),\n required=False,\n dest='algo_config')\n parser.add_argument(\n '-l',\n help=(\n 'run a backtest with the latest '\n 'pricing data'),\n required=False,\n dest='latest',\n action='store_true')\n parser.add_argument(\n '-d',\n help='debug',\n required=False,\n dest='debug',\n action='store_true')\n args = parser.parse_args()\n\n ticker = 'SPY'\n s3_bucket = (\n 'algohistory')\n s3_key = (\n 'trade_history_{ticker}')\n start_date = (\n '2019-01-01')\n algo_config = (\n '/opt/sa/cfg/default_algo.json')\n latest = False\n show_plot = True\n debug = False\n\n if args.ticker:\n ticker = args.ticker.upper()\n if args.s3_key:\n s3_key = args.s3_key\n if args.s3_bucket:\n s3_bucket = args.s3_bucket\n if args.start_date:\n start_date = args.start_date\n if args.algo_config:\n algo_config = args.algo_config\n if args.latest:\n latest = True\n start_date = ae_utils.get_last_close_str()\n if args.debug:\n debug = True\n\n history_loc = (\n f's3://{s3_bucket}/{s3_key}')\n\n log.info(\n f'building {ticker} trade history '\n f'start_date={start_date} '\n f'config={algo_config} '\n f'history_loc={history_loc}')\n\n runner = algo_runner.AlgoRunner(\n ticker=ticker,\n start_date=start_date,\n history_loc=history_loc,\n algo_config=algo_config,\n verbose_algo=debug,\n verbose_processor=False,\n verbose_indicators=False)\n\n trading_history_df = None\n if latest:\n trading_history_df = runner.latest()\n log.info(\n f'{ticker} latest:')\n print(trading_history_df[['minute', 'close']].tail(5))\n log.info(\n 'Other available columns to plot:')\n print(trading_history_df.columns.values)\n if show_plot:\n plot.plot_trading_history(\n title=(\n f'{ticker} at '\n f'${trading_history_df[\"close\"].iloc[-1]} '\n f'at: '\n f'{trading_history_df[\"minute\"].iloc[-1]}'),\n df=trading_history_df,\n red='high',\n blue='close')\n else:\n runner.start()\n\n sys.exit(0)",
"def printTestResult(self):\n splitter = \"=================================================================================================\"\n print(\"\\n\" + splitter)\n print(\"%-3s%-60s%11s\" % ('ID', 'Testcase Name', 'Test Result'))\n for i in range(len(self)):\n print(\"%-3d%-60s%11s\" % (i + 1, self[i].name, self[i].result))\n print(splitter + \"\\n\")",
"def print_verbose_report(tests):\n for t in tests:\n if t.result in ('*', 'f'):\n print( \"// === %s (result: %s) ===\" % (t.name, t.result) )\n print( \"// --- input ---\" )\n print( t.input )\n print( \"// --- expected output ---\" )\n print( t.output )\n print( \"// --- obtained output ---\" )\n print( t.stdoutdata )\n if t.result not in ('.', '*', 'f'):\n print( \"// === %s (result: %s) ===\" % (t.name, t.result) )\n print( \"// --- obtained stderr ---\" )\n print( t.stderrdata )",
"def generate_report(self):\n output_path = get_run_artifact_path(self.fips, \"backtest_result\")\n pdf = matplotlib.backends.backend_pdf.PdfPages(output_path)\n self.plot_backtest_results(self.backtest_results, pdf)\n self.plot_historical_predictions(self.historical_predictions, self.observations, pdf)\n pdf.close()",
"def print_results(self):\n\n tot_blocks = reduce(lambda x, y: x + y, map(lambda x: x[\"block_number\"], self.struct.partial_payoff.values()))\n tot_payoff = reduce(lambda x, y: x + y, map(lambda x: x[\"payoff\"], self.struct.partial_payoff.values()))\n\n print(\"==========\")\n for miner in self.miners:\n print(\"Miner: {}\".format(miner.name))\n print(\"Hash Power: {} ({:.2f}%)\".format(self.h[miner.name], self.h[miner.name] * 100 / self.tot_h))\n print(\"Block Number: {} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"block_number\"], self.struct.partial_payoff[miner.name][\"block_number\"] * 100 / tot_blocks if tot_blocks else 0))\n print(\"Payoff: {:.2f} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"payoff\"], self.struct.partial_payoff[miner.name][\"payoff\"] * 100 / tot_payoff if tot_payoff else 0))\n print(\"==========\")",
"def print_results(self, outputs=True, net=False):\n # Print outputs\n if (outputs):\n results = self.pso.best.outputs\n df = self.ideal.copy()\n df['results'] = results\n print(df.head(10))\n # Print the net\n if (net):\n self.pso.best.network.print_net()",
"def print_results(self):\n avg_rew = sum(self.rew_list)/self.num_ep\n print (\"Score over time: \" + str(avg_rew))\n print (\"Final Q-Table: \")\n print (self.Q)",
"def show_summary():\n for group_name, results in test_results:\n num_total = len(results)\n num_passed = sum(1 for x in results if x[0])\n num_failed = num_total - num_passed\n print(\"[STAT] Results for '%s' : %d%% [%d passed, %d failed] / %d total\" %\n (\n group_name,\n num_passed / num_total * 100,\n num_passed,\n num_failed,\n num_total\n )\n )",
"def generate_flaky_summary(cls):\n if TestWrapper.TEST_ERROR_TRACEBACKS:\n print(\"\\n===Flaky Test Report===\\n\")\n for traceback in TestWrapper.TEST_ERROR_TRACEBACKS:\n print(traceback)\n print(\"===End Flaky Test Report===\")",
"def list():\n print \"All tests: %s\\n\" % \" \".join(tests.keys())\n for name, test in tests.iteritems():\n print \"%s | %s \" % (name, test.description)",
"def printResults(resultList):\n # for FAR\n best = findBestErrorRate(resultList)\n\n # for MR\n # best = findBestErrorRateM(resultList)\n print \"best found feature subset / model parameters for \" + str(config[\"folds\"]) + \"-folded CV with \" + str(\n len(gammaVal)) + \" gamma values and \" + str(len(nuVal)) + \" nu values:\"\n print \"gamma : %s\" % str(best[1][0])\n print \"nu : %s\" % str(best[1][1])\n print \"feature subset : %s\" % str(best[1][2])\n print \"grid search results : %s%% false alarm rate, %s%% miss rate\" % (\n str(best[0][0] * 100), str(best[0][1] * 100))\n print \"------------------------------------------------------------\"",
"def _DisplayResults(self):\n print\n print '=' * 78\n print 'DIAGNOSTIC RESULTS'.center(78)\n print '=' * 78\n\n if 'latency' in self.results:\n print\n print '-' * 78\n print 'Latency'.center(78)\n print '-' * 78\n print ('Operation Size Trials Mean (ms) Std Dev (ms) '\n 'Median (ms) 90th % (ms)')\n print ('========= ========= ====== ========= ============ '\n '=========== ===========')\n for key in sorted(self.results['latency']):\n trials = sorted(self.results['latency'][key])\n op, numbytes = key.split('_')\n numbytes = int(numbytes)\n if op == 'METADATA':\n print 'Metadata'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DOWNLOAD':\n print 'Download'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'UPLOAD':\n print 'Upload'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DELETE':\n print 'Delete'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n\n if 'write_throughput' in self.results:\n print\n print '-' * 78\n print 'Write Throughput'.center(78)\n print '-' * 78\n write_thru = self.results['write_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(write_thru['file_size']),\n write_thru['num_copies'],\n MakeHumanReadable(write_thru['total_bytes_copied']))\n print 'Write throughput: %s/s.' % (\n MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8))\n\n if 'read_throughput' in self.results:\n print\n print '-' * 78\n print 'Read Throughput'.center(78)\n print '-' * 78\n read_thru = self.results['read_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(read_thru['file_size']),\n read_thru['num_times'],\n MakeHumanReadable(read_thru['total_bytes_copied']))\n print 'Read throughput: %s/s.' % (\n MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8))\n\n if 'listing' in self.results:\n print\n print '-' * 78\n print 'Listing'.center(78)\n print '-' * 78\n\n listing = self.results['listing']\n insert = listing['insert']\n delete = listing['delete']\n print 'After inserting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n insert['time_took'])\n print ' Number of listing calls made: %s' % insert['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in insert['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, insert['files_seen_after_listing'])))\n\n print 'After deleting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n delete['time_took'])\n print ' Number of listing calls made: %s' % delete['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in delete['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, delete['files_seen_after_listing'])))\n\n if 'sysinfo' in self.results:\n print\n print '-' * 78\n print 'System Information'.center(78)\n print '-' * 78\n info = self.results['sysinfo']\n print 'IP Address: \\n %s' % info['ip_address']\n print 'Temporary Directory: \\n %s' % info['tempdir']\n print 'Bucket URI: \\n %s' % self.results['bucket_uri']\n print 'gsutil Version: \\n %s' % self.results.get('gsutil_version',\n 'Unknown')\n print 'boto Version: \\n %s' % self.results.get('boto_version', 'Unknown')\n\n if 'gmt_timestamp' in info:\n ts_string = info['gmt_timestamp']\n timetuple = None\n try:\n # Convert RFC 2822 string to Linux timestamp.\n timetuple = time.strptime(ts_string, '%a, %d %b %Y %H:%M:%S +0000')\n except ValueError:\n pass\n\n if timetuple:\n # Converts the GMT time tuple to local Linux timestamp.\n localtime = calendar.timegm(timetuple)\n localdt = datetime.datetime.fromtimestamp(localtime)\n print 'Measurement time: \\n %s' % localdt.strftime(\n '%Y-%m-%d %I:%M:%S %p %Z')\n\n print 'Google Server: \\n %s' % info['googserv_route']\n print ('Google Server IP Addresses: \\n %s' %\n ('\\n '.join(info['googserv_ips'])))\n print ('Google Server Hostnames: \\n %s' %\n ('\\n '.join(info['googserv_hostnames'])))\n print 'Google DNS thinks your IP is: \\n %s' % info['dns_o-o_ip']\n print 'CPU Count: \\n %s' % info['cpu_count']\n print 'CPU Load Average: \\n %s' % info['load_avg']\n try:\n print ('Total Memory: \\n %s' %\n MakeHumanReadable(info['meminfo']['mem_total']))\n # Free memory is really MemFree + Buffers + Cached.\n print 'Free Memory: \\n %s' % MakeHumanReadable(\n info['meminfo']['mem_free'] +\n info['meminfo']['mem_buffers'] +\n info['meminfo']['mem_cached'])\n except TypeError:\n pass\n\n if 'netstat_end' in info and 'netstat_start' in info:\n netstat_after = info['netstat_end']\n netstat_before = info['netstat_start']\n for tcp_type in ('sent', 'received', 'retransmit'):\n try:\n delta = (netstat_after['tcp_%s' % tcp_type] -\n netstat_before['tcp_%s' % tcp_type])\n print 'TCP segments %s during test:\\n %d' % (tcp_type, delta)\n except TypeError:\n pass\n else:\n print ('TCP segment counts not available because \"netstat\" was not '\n 'found during test runs')\n\n if 'disk_counters_end' in info and 'disk_counters_start' in info:\n print 'Disk Counter Deltas:\\n',\n disk_after = info['disk_counters_end']\n disk_before = info['disk_counters_start']\n print '', 'disk'.rjust(6),\n for colname in ['reads', 'writes', 'rbytes', 'wbytes', 'rtime',\n 'wtime']:\n print colname.rjust(8),\n print\n for diskname in sorted(disk_after):\n before = disk_before[diskname]\n after = disk_after[diskname]\n (reads1, writes1, rbytes1, wbytes1, rtime1, wtime1) = before\n (reads2, writes2, rbytes2, wbytes2, rtime2, wtime2) = after\n print '', diskname.rjust(6),\n deltas = [reads2-reads1, writes2-writes1, rbytes2-rbytes1,\n wbytes2-wbytes1, rtime2-rtime1, wtime2-wtime1]\n for delta in deltas:\n print str(delta).rjust(8),\n print\n\n if 'tcp_proc_values' in info:\n print 'TCP /proc values:\\n',\n for item in info['tcp_proc_values'].iteritems():\n print ' %s = %s' % item\n\n if 'boto_https_enabled' in info:\n print 'Boto HTTPS Enabled: \\n %s' % info['boto_https_enabled']\n\n if 'using_proxy' in info:\n print 'Requests routed through proxy: \\n %s' % info['using_proxy']\n\n if 'google_host_dns_latency' in info:\n print ('Latency of the DNS lookup for Google Storage server (ms): '\n '\\n %.1f' % (info['google_host_dns_latency'] * 1000.0))\n\n if 'google_host_connect_latencies' in info:\n print 'Latencies connecting to Google Storage server IPs (ms):'\n for ip, latency in info['google_host_connect_latencies'].iteritems():\n print ' %s = %.1f' % (ip, latency * 1000.0)\n\n if 'proxy_dns_latency' in info:\n print ('Latency of the DNS lookup for the configured proxy (ms): '\n '\\n %.1f' % (info['proxy_dns_latency'] * 1000.0))\n\n if 'proxy_host_connect_latency' in info:\n print ('Latency connecting to the configured proxy (ms): \\n %.1f' %\n (info['proxy_host_connect_latency'] * 1000.0))\n\n if 'request_errors' in self.results and 'total_requests' in self.results:\n print\n print '-' * 78\n print 'In-Process HTTP Statistics'.center(78)\n print '-' * 78\n total = int(self.results['total_requests'])\n numerrors = int(self.results['request_errors'])\n numbreaks = int(self.results['connection_breaks'])\n availability = (((total - numerrors) / float(total)) * 100\n if total > 0 else 100)\n print 'Total HTTP requests made: %d' % total\n print 'HTTP 5xx errors: %d' % numerrors\n print 'HTTP connections broken: %d' % numbreaks\n print 'Availability: %.7g%%' % availability\n if 'error_responses_by_code' in self.results:\n sorted_codes = sorted(\n self.results['error_responses_by_code'].iteritems())\n if sorted_codes:\n print 'Error responses by code:'\n print '\\n'.join(' %s: %s' % c for c in sorted_codes)\n\n if self.output_file:\n with open(self.output_file, 'w') as f:\n json.dump(self.results, f, indent=2)\n print\n print \"Output file written to '%s'.\" % self.output_file\n\n print",
"def print_report():\n print_days_percent_errors()\n print \"\"\n print_popular_authors()\n print \"\"\n print_popular_articles()\n print \"\"",
"def main(resultsdb_url, frontend_url, timeparam):\n api = resultsdb_api.ResultsDBapi(resultsdb_url)\n\n results = []\n page = 0\n r = api.get_results(since=timeparam, page=page)\n while len(r[\"data\"]) != 0:\n results.extend(r[\"data\"])\n page += 1\n r = api.get_results(since=timeparam, page=page)\n\n passed = 0\n passed_types = {}\n failed = 0\n failed_types = {}\n failed_links = {}\n together = {}\n for result in results:\n test_case = result[\"testcase\"][\"name\"]\n if result[\"outcome\"] in OKAYISH:\n passed += 1\n passed_types[test_case] = passed_types.get(test_case, 0) + 1\n else:\n failed += 1\n failed_types[test_case] = failed_types.get(test_case, 0) + 1\n test_url = urljoin(frontend_url, \"results/%d\" % result[\"id\"])\n if test_case not in failed_links:\n failed_links[test_case] = [test_url]\n else:\n failed_links[test_case].append(test_url)\n together[test_case] = together.get(test_case, 0) + 1\n\n output = \"libtaskotron results\\n====================\\n\"\n output += \"Generated on: \" + socket.gethostname() + \"\\n\"\n [from_time, to_time] = timeparam.split(\",\")\n output += \"From: \" + from_time + \"\\n\"\n output += \"To: \" + to_time + \"\\n\\n\"\n output += \"Passed: %d\\nFailed: %d\\n\\n\" % (passed, failed)\n output += \"Passed checks:\\n--------------\\n\"\n for check in passed_types.keys():\n output += \"%s: %d\\n\" % (check, passed_types[check])\n output += \"\\n\"\n output += \"Failed checks:\\n--------------\\n\"\n for check in failed_types.keys():\n output += \"%s: %d\\n\" % (check, failed_types[check])\n output += \"\\n\"\n output += \"Links to failed checks:\\n-----------------------\\n\"\n for i, check in enumerate(failed_links.keys()):\n if i != 0:\n output += \"\\n\\n\"\n output += check + \":\\n\"\n output += \"\\n\".join(failed_links[check])\n return output",
"def print_result(self, allocations, non_executables):\n\n print \"\\nAllocations\"\n for i, a in enumerate(allocations):\n print \"Machine %i (%ds):\" % (i, a[self._TOT_DUR])\n if a[self._TEST_SET]:\n for t in a[self._TEST_SET]:\n print \"%s (%ss);\" % (t.title, t.duration),\n print\n else:\n print \"<Empty>\"\n if non_executables:\n print \"Non-Executable Tests:\"\n for t in non_executables:\n print t"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints out all the trades conducted so far.
|
def print_trades(self, stdout=sys.__stdout__):
previous_stdout = sys.stdout
if stdout is not None: # Temporarily redirects output to stdout provided.
sys.stdout = stdout
print("\nTrades made:")
for trade in self.trades:
print(f'\t{trade["date"].strftime("%Y-%m-%d %H:%M")}: (${trade["net"]}) {trade["action"]}')
sys.stdout = previous_stdout # revert stdout back to normal
|
[
"def print_all(self):\n print(\"Models:\", self._models)\n print(\"Scores:\", self._scores)\n print(\"MSE:\", self._mse)\n print()",
"def print_backtest_results(self, stdout=None):\n previous_stdout = sys.stdout\n if stdout is not None: # Temporarily redirects output to stdout provided.\n sys.stdout = stdout\n\n print(\"\\nBacktest results:\")\n print(f'\\tSymbol: {\"Unknown/Imported Data\" if self.symbol is None else self.symbol}')\n print(f'\\tElapsed: {round(self.endTime - self.startTime, 2)} seconds')\n print(f'\\tStart Period: {self.data[self.startDateIndex][\"date_utc\"]}')\n print(f\"\\tEnd Period: {self.currentPeriod['date_utc']}\")\n print(f'\\tStarting balance: ${round(self.startingBalance, self.precision)}')\n print(f'\\tNet: ${round(self.get_net(), self.precision)}')\n print(f'\\tCommissions paid: ${round(self.commissionsPaid, self.precision)}')\n print(f'\\tTrades made: {len(self.trades)}')\n net = self.get_net()\n difference = round(net - self.startingBalance, self.precision)\n if difference > 0:\n print(f'\\tProfit: ${difference}')\n print(f'\\tProfit Percentage: {round(net / self.startingBalance * 100 - 100, 2)}%')\n elif difference < 0:\n print(f'\\tLoss: ${-difference}')\n print(f'\\tLoss Percentage: {round(100 - net / self.startingBalance * 100, 2)}%')\n else:\n print(\"\\tNo profit or loss incurred.\")\n # print(f'Balance: ${round(self.balance, 2)}')\n # print(f'Coin owed: {round(self.coinOwed, 2)}')\n # print(f'Coin owned: {round(self.coin, 2)}')\n # print(f'Trend: {self.trend}')\n\n sys.stdout = previous_stdout # revert stdout back to normal",
"def print_results(self):\n for c in self._winners:\n print \"Elected %s\"%(c._name)",
"def print_results(self):\n\n tot_blocks = reduce(lambda x, y: x + y, map(lambda x: x[\"block_number\"], self.struct.partial_payoff.values()))\n tot_payoff = reduce(lambda x, y: x + y, map(lambda x: x[\"payoff\"], self.struct.partial_payoff.values()))\n\n print(\"==========\")\n for miner in self.miners:\n print(\"Miner: {}\".format(miner.name))\n print(\"Hash Power: {} ({:.2f}%)\".format(self.h[miner.name], self.h[miner.name] * 100 / self.tot_h))\n print(\"Block Number: {} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"block_number\"], self.struct.partial_payoff[miner.name][\"block_number\"] * 100 / tot_blocks if tot_blocks else 0))\n print(\"Payoff: {:.2f} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"payoff\"], self.struct.partial_payoff[miner.name][\"payoff\"] * 100 / tot_payoff if tot_payoff else 0))\n print(\"==========\")",
"def prints(self):\n print('Trip\\n\\tstart date: {}\\n\\tfinal date: {}\\n\\tgasoline: {}'.\n format(time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.start_date)),\n time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.end_date)),\n self.fuel))",
"def print_timeline(self):\n tweets = self.get_tweets_from_timeline()\n tweets = MakingActions.get_text_from_list(tweets)\n for items in tweets:\n print items",
"def print_report():\n print_days_percent_errors()\n print \"\"\n print_popular_authors()\n print \"\"\n print_popular_articles()\n print \"\"",
"def show_report(report):\n print()\n for line in report:\n print(line)\n print()",
"def print_results(self):\n avg_rew = sum(self.rew_list)/self.num_ep\n print (\"Score over time: \" + str(avg_rew))\n print (\"Final Q-Table: \")\n print (self.Q)",
"def page__trades(app, ctx):\r\n page = Template(ctx)\r\n page.add(H1(\"Trades\"))\r\n\r\n page.add(DIV(A(\"Download as CSV\", href=umap(\"@@TradesCSV\")), CLASS='right-link'))\r\n\r\n style = ctx.session.get('style', 'full')\r\n ledger = ctx.ledger\r\n\r\n for bt in ledger.booked_trades:\r\n\r\n legs_table = TABLE(\r\n THEAD(\r\n TR(TH(\"Date\"), TH(\"Units\"), TH(\"Price\"),\r\n TH(\"Amount\"), TH(\"Exchange Rate\"), TH(\"Report Amount (target CCY)\"))),\r\n CLASS=\"trades\")\r\n\r\n for leg in bt.legs:\r\n legs_table.add(\r\n TR(TD(str(leg.post.actual_date)),\r\n TD(hwallet(Wallet(bt.comm_book, leg.amount_book))),\r\n TD(hwallet(Wallet(leg.comm_price, leg.price))),\r\n TD(hwallet(Wallet(leg.comm_price, leg.amount_price))),\r\n TD(hwallet(Wallet('%s/%s' % (leg.comm_price, bt.comm_target or '-'), leg.xrate))),\r\n TD(hwallet(Wallet(bt.comm_target or leg.comm_price, leg.amount_target))),\r\n ))\r\n\r\n post_book = bt.post_book\r\n\r\n # Note: negate the final amounts as they were applied to flow values\r\n # (income/expense).\r\n legs_table.add(\r\n TR(TD('Gain(+) / Loss(-)'),\r\n TD(),\r\n TD(),\r\n TD(hwallet(-bt.post_book.amount_orig)),\r\n TD(),\r\n TD(hwallet(-bt.post_book.amount)),\r\n ))\r\n\r\n postings = [x.post for x in bt.legs]\r\n overrides = dict((x.post, Wallet(bt.comm_book, x.amount_book))\r\n for x in bt.legs)\r\n table = render_postings_table(postings, style,\r\n amount_overrides=overrides)\r\n title = '%s - %s %s ; %s' % (\r\n bt.close_date(),\r\n bt.comm_book,\r\n 'in %s' % bt.comm_target if bt.comm_target else '',\r\n bt.account.fullname)\r\n page.add(DIV(H2(title), legs_table, P(\"Corresponding transactions:\"), table,\r\n CLASS='btrade'))\r\n\r\n return page.render(app)",
"def view(self):\n # 1. Print all the records and report the balance.\n print(\"Date\"+\" \"*10+\"Categories\"+\" \"*10+\"Description\"+\" \"*10+\"Amount\")\n print(\"=================================\")\n for i in self._records:\n print(f\"{i.date:<20}{i.category:<20s} {i.description:^20s} {str(i.amount):>20s}\")\n print(\"=================================\")\n print('Now you have %d dollars.'%self._initial_money)",
"def print_results(self):\n self._write_term('\\nPassed {} of {} tests, {} failed.\\n'.format(self.passes,self.numTests,self.fails))",
"def printAll(self):\n StudentList.printAll(self)",
"def show_performed_experiments(self):\n for experiment in self.completed_tasks:\n print(experiment)",
"def printInfo(self ):\n table1 = PrettyTable(field_names=[\"Tickers\",\"Weights\"],header= True)\n for i in range(0,len(self.portSet.tickers)):\n table1.add_row([self.portSet.tickers[i],self.w[i]])\n print(table1)\n\n table2 = PrettyTable(field_names= [\"Portfolio Return\",\"Portfolio Risk\"],header=True)\n table2.add_row([self.getPortReturns(),self.getPortRisk()])\n print(table2)",
"def print(self):\n for card in self.deck:\n print(card)",
"def display(self):\r\n if self.formatter and self.verbose > 0:\r\n res = self.results\r\n if res:\r\n print >> self.out, \"\"\r\n for a in self.formatter(res, map(lambda x: x[0], self.headers)):\r\n print >> self.out, a\r\n print >> self.out, \"\"",
"def all_trips(self):\n print \"Total {0} non-iterating trips\".format(len(self.trips))\n for trip in self.trips:\n print trip",
"def print_round_overview(self):\n print('Round overview')\n for r in range(self.num_rounds):\n round_number = r + 1\n print('Auction round', round_number)\n print('Bids', self.bids[r])\n print('Winner: Bidder', self.winners[r])\n print('Payment:', self.payments[r])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a default backtest result file name.
|
def get_default_result_file_name(self):
backtestResultsFolder = 'Backtest Results'
symbol = 'Imported' if not self.symbol else self.symbol
dateString = datetime.now().strftime("%Y-%m-%d_%H-%M")
resultFile = f'{symbol}_backtest_results_{"_".join(self.interval.lower().split())}-{dateString}.txt'
os.chdir('../')
if not os.path.exists(backtestResultsFolder):
os.mkdir(backtestResultsFolder)
os.chdir(backtestResultsFolder)
counter = 0
previousFile = resultFile
while os.path.exists(resultFile):
resultFile = f'({counter}){previousFile}'
counter += 1
return resultFile
|
[
"def default_test_results_location():\n return os.path.join(repo_root(), \"test-results\")",
"def get_default_filename(cls) -> str:\n return cls.__open('default_filename')",
"def getDefaultBackupFile(self):\n def getName(index=0):\n \"\"\"\n Try to create an unique backup file name.\n\n @return: tar file name\n @rtype: string\n \"\"\"\n return 'zenbackup_%s%s.tgz' % (date.today().strftime('%Y%m%d'),\n (index and '_%s' % index) or '')\n backupDir = zenPath('backups')\n if not os.path.exists(backupDir):\n os.mkdir(backupDir, 0750)\n for i in range(MAX_UNIQUE_NAME_ATTEMPTS):\n name = os.path.join(backupDir, getName(i))\n if not os.path.exists(name):\n break\n else:\n self.log.critical(\n 'Cannot determine an unique file name to use in the backup '\n 'directory (%s). Use --outfile to specify location for the '\n 'backup file.\\n', backupDir)\n sys.exit(-1)\n return name",
"def result_file(self):\n return \"\"\"--result-file=file\"\"\"",
"def get_full_filepath(test_filename):\n file_path = os.path.dirname(os.path.abspath(__file__))\n return_filepath = os.path.abspath(file_path + \"/responses/\" + test_filename)\n return return_filepath",
"def test_case_name(expected_output_path):\n basename = os.path.basename(expected_output_path)\n basename = basename.replace('-out-', '-')\n basename = basename.replace('-in-', '-')\n basename = basename.replace('.txt', '')\n return basename",
"def name_file(self, output_filename):\n return self.output_path / output_filename",
"def default_save_as_fname(input_fname):\n parts = input_fname.split('.')\n if len(parts) == 1:\n return parts[0] + \"_hrv\"\n\n return '.'.join(parts[:-1]) + '_hrv'",
"def TemporaryDwfFileName(self) -> str:",
"def get_output_basename(self):\n cumf_base_name = self.options[\"full_task_name\"]\n cumf_base_name = re.sub(r\"[() ]\", r\"_\", cumf_base_name)\n if cumf_base_name.endswith(\"_\"):\n cumf_base_name = cumf_base_name[:-1]\n return \"ana.\" + cumf_base_name",
"def get_result_path():\n return os.getcwd() + '/' + _result_folder",
"def GetVtsHostTestScriptFileName(self):\n test_script_name = self._test_module_name + 'Test.py'\n return os.path.join(\n self.GetHalTestCasePath(ignore_profiling=True), test_script_name)",
"def _get_output_file_name(self):\n datetime_suffix = datetime.now().strftime('%Y%m%d_%H%M%S')\n\n # Only select the non-empty strings from the file name parts\n output_file_name = '_'.join([a for a in\n [self.output_file_name_prefix, self.output_file_name,\n self.output_file_name_suffix, datetime_suffix] if a\n ])\n\n return f\"{output_file_name}{self._get_output_file_extension()}\"",
"def get_test_baseline(file_name):\n return os.path.abspath(\n os.path.join(\n os.path.abspath(__file__),\n u'..',\n u'..',\n u'osqlcli',\n u'jsonrpc',\n u'contracts',\n u'tests',\n u'baselines',\n file_name))",
"def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename",
"def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')",
"def test_name(self) -> None:\n return self._test_name",
"def get_current_test_name():\n # PYTEST_CURRENT_TEST value will be of syntax \"FILE_NAME::FUNC_NAME (STAGE)\"\n full_name = os.getenv(\"PYTEST_CURRENT_TEST\", \"\").split(\" \")[0]\n return full_name.split(\"::\")[-1]",
"def get_default_path():\n\n # Try to read from app.yaml; if we can't find it, return the GAEUnit\n # default '/test'\n try:\n gae_config_file = open('/Users/jon/Projects/stremorshort/app.yaml', 'r')\n except IOError as e: pass\n else:\n loaded = yaml.load(gae_config_file.read())\n gae_config_file.close()\n\n for handler in loaded['handlers']:\n if 'script' in handler and handler['script'].startswith('gaeunit'):\n return re.sub(r'[^\\w/]', '', handler['url'])\n\n return '/test'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Writes backtest results to resultFile provided. If none is provided, it'll write to a default file name.
|
def write_results(self, resultFile=None) -> str:
currentPath = os.getcwd()
if not resultFile:
resultFile = self.get_default_result_file_name()
with open(resultFile, 'w') as f:
self.print_configuration_parameters(f)
self.print_backtest_results(f)
if self.outputTrades:
self.print_trades(f)
filePath = os.path.join(os.getcwd(), resultFile)
os.chdir(currentPath)
return filePath
|
[
"def write_results(filename):",
"def save_results(self):\n\n # Save the results\n self.output_results = '{}_results.dat'.format(self.method)\n with open(self.output_results, 'w') as f:\n f.write(str(self.result))\n print('Results file saved to {}'.format(self.output_results))",
"def write_results(results,file):\n np.savetxt(file,results)",
"def write_result(result: Sequence[eval_lib.EvaluationResult]) -> None:\n if FLAGS.name_override:\n file_name = FLAGS.name_override\n elif FLAGS.checkpoint_idx is not None:\n file_name = f'{FLAGS.agent}_{FLAGS.checkpoint_idx}'\n else:\n file_name = FLAGS.agent\n\n if FLAGS.num_shards > 1:\n file_name = f'{file_name}_{FLAGS.shard_idx}'\n file_name = f'{file_name}.json'\n\n dir_path = os.path.join(FLAGS.output_dir, FLAGS.suite)\n file_path = os.path.join(dir_path, file_name)\n\n indent = 2 if FLAGS.pretty_json else None\n\n\n os.makedirs(dir_path, exist_ok=True)\n with open(file_path, 'w') as f:\n json.dump(result, f, cls=eval_lib.EvalResultEncoder, indent=indent)",
"def write_tx_live_results_to_file(results_path, file_name, results):\n filec = os.path.join(results_path, file_name + \".tx\")\n with open(filec, \"a\") as result_file:\n result_file.write('{0},{1},{9},{2},{3},{4},{5},{6},{7},{8}\\n'\n .format(time.time(), results['StreamId'],\n results['FrameCount'], results['FrameRate'],\n results['ExpectedRxFrameCount'],\n results['OctetCount'], results['OctetRate'],\n results['BitCount'], results['BitRate'],\n results['BlockId']))",
"def get_default_result_file_name(self):\n backtestResultsFolder = 'Backtest Results'\n symbol = 'Imported' if not self.symbol else self.symbol\n dateString = datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n resultFile = f'{symbol}_backtest_results_{\"_\".join(self.interval.lower().split())}-{dateString}.txt'\n os.chdir('../')\n\n if not os.path.exists(backtestResultsFolder):\n os.mkdir(backtestResultsFolder)\n os.chdir(backtestResultsFolder)\n\n counter = 0\n previousFile = resultFile\n\n while os.path.exists(resultFile):\n resultFile = f'({counter}){previousFile}'\n counter += 1\n\n return resultFile",
"def write_to_file(path=None):\n if path is None:\n path = outpath\n suite.set_status()\n testoutput = ResultFromXml(suite, errors)\n testoutput.serialize_output(path)",
"def Write_results_out(ObjFunVal, file='results.out', workdir='.'):\n file = os.path.join(workdir, file)\n with open(file, 'w') as f:\n f.write(str(ObjFunVal))\n return",
"def save_results(self, name):\n\n # ----\n # Create savedir if needed\n try:\n os.mkdir(self.savedir)\n except OSError:\n pass\n \n print(\"Writing results to disk.\")\n savepath = os.path.join(self.savedir, name+\".hdf5\")\n \n write_hdf(self.results, savepath)",
"def parse_results_and_write_files(working_dir: WorkingDir, results_file: Path, fallback_time: datetime):\n results = parse_results(working_dir._working_dir / results_file)\n bots = load_all_bots_versioned(working_dir)\n for result in results:\n\n blue_time = fallback_time\n if result.blue in bots:\n blue_time = bots[result.blue].updated_date\n\n orange_time = fallback_time\n if result.orange in bots:\n orange_time = bots[result.orange].updated_date\n\n result_path = working_dir.get_version_specific_match_result_from_times(\n result.blue, blue_time, result.orange, orange_time)\n\n print(f'Writing result to {result_path}')\n result.write(result_path)",
"def benchmark_to_file(self, filename):\n benchmark = self.get_results()\n # sort all result lists by score in descending order\n for query in benchmark:\n benchmark[query].sort(key=lambda x: x[1], reverse=True)\n with open(filename, 'w') as f:\n for query, result_list in benchmark.items():\n query_row = '%d\\t' % int(query)\n for result, score in result_list:\n query_row += '%d=%d ' % (result, score)\n query_row += '\\n'\n f.write(query_row)",
"def write_rx_live_results_to_file(results_path, file_name, results):\n filec = os.path.join(results_path, file_name + \".rx\")\n with open(filec, \"a\") as result_file:\n result_file.write('{0},{3},{1},{2},{4},{5},{6},{7},{8},{9},{10},{11}\\n'\n .format(time.time(), results['DroppedFrameCount'],\n results['SeqRunLength'], results['RxPort'],\n results['AvgLatency'],\n results['DroppedFrameRate'],\n results['FrameCount'], results['FrameRate'],\n results['MaxLatency'], results['MinLatency'],\n results['OctetCount'], results['OctetRate']))",
"def __update_result_file(self):\n try:\n test_failure_reason = \"\"\n for key in self.result_dict:\n tcreason = self.result_dict[key]\n tc_id = self.tc_id + \"_\" + key\n if tcreason:\n tcstatus = \"FAIL\"\n message = \"Test Case ID: %s\" % tc_id + \"\\nTest Case\"\\\n \" Status: %s\" % tcstatus + \"\\nFailure \"\\\n \"Reason: %s\" % tcreason\n decorated_msg = self.common.get_decorated_message(\n message, \"-\", 70)\n LOG_OBJ.info(decorated_msg)\n print decorated_msg\n if tcreason not in test_failure_reason:\n test_failure_reason += tcreason\n else:\n tcstatus = \"PASS\"\n message = \"Test Case ID: %s\" % tc_id + \"\\nTest Case\"\\\n \" Status: %s\" % tcstatus\n decorated_msg = self.common.get_decorated_message(\n message, \"-\", 70)\n LOG_OBJ.info(decorated_msg)\n\n tcstatus = 'FAIL' if test_failure_reason else \"PASS\"\n # During stress testing don't update result file.\n if \"main\" not in threading.currentThread().getName().lower():\n StressTestHelper().stress_test_result_update(\n self.tc_id, tcstatus, test_failure_reason)\n return\n self.common.test_result_update(\n self.tc_id, tcstatus, test_failure_reason)\n except Exception as err:\n LOG_OBJ.exception(err)\n return \"Exception occurred while updating test result\"\\\n \" in result file.\"",
"def test_results_file(\n self, setup_teardown, file_regression: FileRegressionFixture\n ):\n for case in self.CASES.keys():\n prefix = f'{case}__results'\n outputs = setup_teardown\n file_regression.check(outputs[prefix], basename=prefix)",
"def result_file(self):\n return \"\"\"--result-file=file\"\"\"",
"def write_evaluation_to_file( outputfile, formal_stats, assembly_name ):\n\t\n\tprint \"writing results to file ... please wait!\"\n\twith open( outputfile, 'w' ) as out:\n\t\tout.write( 'assembly name: ' + assembly_name + '\\n\\n' )\n\t\t\n\t\tout.write( 'number of contigs:\\t' + str( formal_stats['number_of_contigs'] ) + '\\n' )\n\t\tout.write( 'average contig length:\\t' + str( formal_stats['mean_contig_length'] ) + '\\n' )\n\t\tout.write( 'minimal contig length:\\t' + str( formal_stats['minimal_contig_length'] ) + '\\n' )\n\t\tout.write( 'maximal contig length:\\t' + str( formal_stats['maximal_contig_length'] ) + '\\n\\n' )\n\t\t\n\t\tout.write( 'total number of bases:\\t' + str( formal_stats['total_number_of_bases'] ) + '\\n' )\n\t\tout.write( 'total number of bases without Ns:\\t' + str( formal_stats['number_of_bases_without_N'] ) + '\\n' )\n\t\tout.write( 'GC content:\\t' + str( formal_stats['gc_content'] ) + '\\n\\n' )\n\t\t\n\t\tout.write( 'N25:\\t' + str( formal_stats['N25'] ) + '\\n' )\n\t\tout.write( 'N50:\\t' + str( formal_stats['N50'] ) + '\\n' )\n\t\tout.write( 'N75:\\t' + str( formal_stats['N75'] ) + '\\n' )\n\t\tout.write( 'N90:\\t' + str( formal_stats['N90'] ) + '\\n\\n' )\n\t\t\n\tprint \"all results written to file.\"",
"def write_results_to_file(self, pretty=False):\n with open(self.results_file, 'w') as f:\n if pretty:\n f.write(json.dumps(self.results, f, sort_keys=True, indent=4, separators=(',', ': ')))\n else:\n f.write(json.dumps(self.results, f))",
"def __save_results(self):\n if not self.__files:\n COLOR_MANAGER.print_error(\"Looks like there is nothing to save to the files, try again.\", \"\\n\\t\", \"\\n\")\n return\n COLOR_MANAGER.print_success(f\"Saving Results to Output Files in {self.__folder} folder...\", \"\\n\\t\", \"\\n\")\n for file in self.__files.keys():\n path = os.path.join(self.__folder, file + \".txt\")\n with open(path, \"w\") as f:\n f.write(self.__files[file])",
"def write_scores(correlation, resultsfile, runresfile):\r\n\r\n filename = os.path.basename(resultsfile)\r\n\r\n with open(runresfile, 'a') as outfile:\r\n outfile.write(str(filename))\r\n outfile.write('\\t')\r\n outfile.write(str(correlation))\r\n outfile.write('\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Top k frequent occuring word in given file
|
def top_k_freq_words(self,file_names,top_k,seperator="#|#",return_word_only=True):
c = Counter()
for file_name in file_names:
print ("Reading file ",file_name)
with codecs.open(file_name, 'r',encoding='utf8') as fp:
for each_line in fp:
each_line = each_line.strip()
each_line = each_line.replace(seperator, " ")
each_line = each_line.split()
c.update(each_line)
most_common_words = c.most_common(top_k)
if return_word_only:
list_of_words = [x[0] for x in most_common_words]
return list_of_words
else:
return most_common_words
|
[
"def kTopWords(k,list):\r\n # Check at this point if the list is empty. If so, then the the program is exited.\r\n if list == []:\r\n print(\"Unable to continue:\\n1. Writing.txt is empty or\\n2. There is no word remaining after preprocessing.\")\r\n sys.exit()\r\n\r\n # Make sure the list parameter given starts with the first word in the list, otherwise\r\n # pop the first item in the list as it may be holding the value of total number of words\r\n # that were in the writing.\r\n try:\r\n list[0][1]\r\n except TypeError:\r\n list.pop(0)\r\n except IndexError:\r\n list.pop(0)\r\n\r\n # Also check if the input is an integer, otherwise prompt the user to re-input the k value.\r\n while type(k) is not int:\r\n try:\r\n k = int(input(\"How many top-most frequent words do I display: \"))\r\n except ValueError:\r\n pass\r\n except TypeError:\r\n pass\r\n\r\n # If the user asks for 0 top most frequent words, it would be nothing so the function is\r\n # exited.\r\n if k==0:\r\n sys.exit()\r\n # If the value of k is greater or equal to than the length of the list, then all the words\r\n # and its frequencies will be displayed.\r\n if k>=len(list):\r\n # Goes to the TopK function to get the top k words.\r\n # Time complexity of the function TopK is O(k).\r\n k=len(list)\r\n outputList=TopK(k,list)\r\n\r\n # outputList contains k elements only, so the loop below would have a complexity\r\n # of O(k).\r\n\r\n\r\n # Else if the value of k is less than the length of the list, the top k words\r\n # will be printed.\r\n else:\r\n # Goes to the TopK function to get the top k words.\r\n # Time complexity of the function TopK is O(k).\r\n outputList=TopK(k, list)\r\n # outputList contains k elements only, so the loop below would have a complexity\r\n # of O(k).\r\n\r\n\r\n\r\n list=outputList\r\n return list",
"def FrequentWordsFile(filename):\n with open(filename) as file:\n text = file.readline()\n k = int(file.readline())\n result = FrequentWords(text, k)\n with open('output.txt', 'w+') as fh:\n fh.write(result)\n print('Result written to file output.txt')",
"def wcount(lines, topn=10):\n dcount = defaultdict(int)\n for line in lines.splitlines():\n lst = [i.strip(string.punctuation) for i in line.split()]\n for word in lst:\n word = word.lower()\n dcount[word] += 1\n sor = sorted(dcount.items(), key=lambda t: t[1], reverse=True)\n if len(sor) >= topn:\n top = sor[:topn]\n else:\n top = sor\n for u in top:\n print(\"{}\\t{}\".format(*u))",
"def three_most_common_words(path):\n\n '''\n regex pattern details:\n \n (?:(?<=\\s)|(?<=^)) : Positive Lookbehind for space character or beginning of string\n ([a-zA-Z]+ : Match 1 or more alphabetic characters\n [-]? : Match 0 or 1 hyphens\n [a-zA-Z]*) - Match 0 or more alphabetic characters\n (?=\\s) - Positive Lookahead for space character\n '''\n word_pattern = re.compile(\"(?:(?<=\\s)|(?<=^))([a-zA-Z]+[-]?[a-zA-Z]*)(?=\\s)\")\n word_occurrences = {}\n\n try:\n with open(path) as file:\n for line in file:\n # find matching words and convert to lowercase\n words = [word.lower() for word in word_pattern.findall(line)]\n\n # increment word count for each word\n for word in words:\n if word in word_occurrences:\n word_occurrences[word] += 1\n else:\n word_occurrences[word] = 1\n\n # sort dictionary values and take top three\n three_tuples = sorted(word_occurrences.items(), key=operator.itemgetter(1), reverse=True)[:3]\n three_words = [i[0] for i in three_tuples]\n\n except FileNotFoundError:\n print(path + \": No such file or directory\")\n sys.exit(1)\n\n return three_words",
"def print_top_s(filename):\n word_count = words_count(filename)\n\n # Each item is a (word, count) tuple.\n # Sort them so the big counts are first using key=get_count() to extract count.\n items = sorted(word_count.items(), key= lambda w : w[1], reverse=True)\n\n # Print the first 20\n for item in items[:20]:\n print (item[0], item[1])",
"def top_10_words(hist, num = 10):\n t = most_common(hist)\n for freq, word in t[:num]:\n print(word,\"\\t\", freq)",
"def top_ngrams(tokenized_words, n=2, top=10):\n\tall_ngrams = []\n\tfor each in tokenized_words:\n\t\tall_ngrams += ngrams(each, n)\n\treturn FreqDist(all_ngrams).most_common(top)",
"def get_rare_words(corpus_file):\n word_counts = defaultdict(int)\n rare_words = []\n\n for l in corpus_file:\n line = l.strip()\n if line:\n linew = line.split(' ')\n if (linew[0]) in word_counts:\n word_counts[(linew[0])] += 1\n else:\n word_counts[(linew[0])] = 1\n \n for key in word_counts:\n if word_counts[key] < 5:\n rare_words.append(key)\n #print(rare_words)\n #print(len(rare_words))\n return rare_words",
"def frequent_words(text, k):\n\n l = len(text)\n d = {}\n\n for i in range(l - k + 1):\n kmer = text[i:i + k]\n if kmer in d:\n d[kmer] += 1\n else:\n d[kmer] = 1\n\n max_frequency = max(d.values())\n return [kmer for kmer, freq in d.items() if freq == max_frequency]",
"def most_frequent(s):\n words=[]\n words=s.split(\" \")\n words=sorted(words)\n word_count={}\n counts=[]\n for word in words:\n counts.append(words.count(word))\n m=counts.index(max(counts))\n return (words[m])\n \n # USING OrderedDict\n '''\n for word in words:\n word_count[word]=words.count(word)\n max_count=max(word_count.values())\n for word in OrderedDict(sorted(word_count.items(), key=lambda t:t[0])):\n if word_count[word]==ma\n x_count:\n return (\"Using OrderedDict:\", word)\n '''\n \n \n \n # HINT: Use the built-in split() function to transform the string s into an\n # array\n \n # HINT: Sort the new array by using the built-in sorted() function or\n # .sort() list method\n \n # HINT: Iterate through the array and count each occurance of every word\n # using the .count() list method\n \n # HINT: Find the number of times the most common word appears using max()\n \n # HINT: Locate the index of the most frequently seen word\n \n # HINT: Return the most frequent word. Remember that if there is a tie,\n # return the first (tied) word in alphabetical order.",
"def most_frequent_words(text, n, lowercase=False):\r\n # YOUR CODE HERE\r\n\r\n from collections import Counter\r\n\r\n if lowercase:\r\n words = [word.strip().lower() for word in text.split()]\r\n else:\r\n words = [word.strip() for word in text.split()]\r\n\r\n word_count = Counter(words)\r\n # most_freq = list(word_count.most_common(n))\r\n\r\n most_freq_list = []\r\n for i,j in word_count.most_common(n):\r\n most_freq_list.append(i)\r\n\r\n return most_freq_list\r\n\r\n pass",
"def get_top_verbs_in_path(path, top_size=10):\n all_words = get_all_words_in_path(path)\n verbs = flat([get_verbs_from_name(word) for word in all_words])\n return collections.Counter(verbs).most_common(top_size)",
"def wcount(lines, topn=10):\n\n for i in lines:\n if i>='A' and i<=\"Z\":\n continue\n elif i>=\"a\" and i<='z':\n continue\n elif i==' ':\n continue\n else:\n lines=lines.replace(i,' ')\n n1=lines.split(' ')\n for i in range(n1.count('')):\n n1.remove('')\n n2=set()\n for i in n1:\n n2.add((i,n1.count(i)))\n \n n3=[]\n for (i,o) in n2:\n n3.append(o)\n for i in range(topn):\n x=max(n3)\n n3.remove(x)\n y=max(n3)\n n4={}\n for (i,o) in n2:\n if o>y:\n n4[o]=i\n n5=list((n4.keys()))\n n5.sort(reverse=True)\n for i in n5:\n print (n4[i], '\\t', i)\n pass",
"def word_count(filename):\n with open(filename) as fh:\n text = fh.read().lower()\n wordList = re.compile('\\w+').findall(text) \n counter=collections.Counter(wordList)\n return sorted(counter.items())",
"def most_common_words(df, sentence, cl, label, **kwargs):\n\n df_ = df[df[cl]==label]\n df_ = df_[sentence].tolist()\n docx = ' '.join(str(x) for x in df_)\n docx = docx.split()\n word_counter = Counter(docx)\n\n top = 10\n\n for key, value in kwargs.items():\n if key == 'top':\n top = value\n\n for word, count in word_counter.most_common(top):\n print(word, ': ', count)",
"def most_words(self, n):\n big_tags = [[x[2], len(x[2].split(' '))] for x in self.data]\n big_tags = sorted(big_tags, key=lambda x: -int(x[1]))[:n]\n return collections.OrderedDict(big_tags)",
"def get_top_k_words(words, stopwords, k):\n # create dict of unique words to count them down\n un_words = {}\n for x in words:\n if x not in stopwords:\n if not un_words.__contains__(x):\n un_words[x] = 1\n else:\n un_words[x] += 1\n un_words = sorted(un_words.items(), key=operator.itemgetter(1), reverse=True)\n #top_k_words = []\n #for x in un_words[0:k]: # convert list into an array of words only\n # top_k_words.append(x[1])\n return un_words[0:k]",
"def get_top_n_words(corpus, n=None):\r\n vec = CountVectorizer().fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0)\r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)\r\n return words_freq[:n]",
"def getTopKWordsByLikelihood(self, K):\n pseudocounts = np.copy(self.n_vts)\n normalizer = np.sum(pseudocounts, (1, 2))\n pseudocounts /= normalizer[:, np.newaxis, np.newaxis]\n for t in range(self.numTopics):\n for s in range(self.numSentiments):\n topWordIndices = pseudocounts[:, t, s].argsort()[-1:-(K + 1):-1]\n vocab = self.vectorizer.get_feature_names()\n print(t, s, [vocab[i] for i in topWordIndices])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given top k words return word2vec for that file top_k_words = [word1, word2, ...]
|
def top_k_word2vec(self,word2vec_file_name,top_k_words,word2vec_dimension,new_file_name):
#word2vec = pd.read_csv("../../temp_results/a.txt",sep=' ', header=None, skiprows=range(1))
model = models.KeyedVectors.load_word2vec_format(word2vec_file_name, binary=False)
filtered_vectors = model[top_k_words]
word2vec_frame = pd.DataFrame({'name':top_k_words})
for i in range(word2vec_dimension):
word2vec_frame[i] = filtered_vectors[:,i]
word2vec_frame.to_csv(new_file_name,sep=" ",encoding='utf-8',index=False)
|
[
"def get_top_k_words(words, stopwords, k):\n # create dict of unique words to count them down\n un_words = {}\n for x in words:\n if x not in stopwords:\n if not un_words.__contains__(x):\n un_words[x] = 1\n else:\n un_words[x] += 1\n un_words = sorted(un_words.items(), key=operator.itemgetter(1), reverse=True)\n #top_k_words = []\n #for x in un_words[0:k]: # convert list into an array of words only\n # top_k_words.append(x[1])\n return un_words[0:k]",
"def getTopKWords(self, K):\n pseudocounts = np.copy(self.n_vts)\n normalizer = np.sum(pseudocounts, (0))\n pseudocounts /= normalizer[np.newaxis, :, :]\n for t in range(self.numTopics):\n for s in range(self.numSentiments):\n topWordIndices = pseudocounts[:, t, s].argsort()[-1:-(K + 1):-1]\n vocab = self.vectorizer.get_feature_names()\n print(t, s, [vocab[i] for i in topWordIndices])",
"def top_k_freq_words(self,file_names,top_k,seperator=\"#|#\",return_word_only=True):\n c = Counter()\n for file_name in file_names:\n print (\"Reading file \",file_name)\n with codecs.open(file_name, 'r',encoding='utf8') as fp:\n for each_line in fp:\n each_line = each_line.strip()\n each_line = each_line.replace(seperator, \" \")\n each_line = each_line.split()\n c.update(each_line)\n most_common_words = c.most_common(top_k)\n if return_word_only:\n list_of_words = [x[0] for x in most_common_words]\n return list_of_words\n else: \n return most_common_words",
"def most_similar(word, k):\n # convert the word to a vector\n word_index = vocab.index(word)\n word = vecs[word_index]\n\n dist_vec = np.zeros(vocab_size)\n\n for i, v in enumerate(vecs):\n if i == word_index: # skip the calculation of distance between the word to itself\n continue\n dist_vec[i] = dist(word, v)\n\n # extract most similar words according to the calculated results\n ind = np.argpartition(dist_vec, -k)[-k:]\n return zip([vocab[i] for i in ind], [dist_vec[i] for i in ind])",
"def getWordSuggestionsV2(word, fileName, n, topN):\n #find words that are within +/- 1 in length with respect to the given word.\n wordlist=getWordsOfSimLength(word,fileName,1)\n\n \n #find words that begin with the same n letters as the given word\n \n wordstart=getWordsWithSameStart(word, wordlist, n)\n \n \n \n #find words that end with the same n letters as the given word.\n wordend=getWordsWithSameEnd(word, wordlist, n)\n \n \n #makes a list that contains the words that are in all the above lists\n winners=set(wordend).intersection(set(wordstart))\n \n #order the list based on the word similarity measure\n similarityDictionary=getSimilarityDict(word,list(winners))\n #returns a list of the topN words\n \n \n return getBestWords(similarityDictionary, topN)",
"def getTopKWordsByLikelihood(self, K):\n pseudocounts = np.copy(self.n_vts)\n normalizer = np.sum(pseudocounts, (1, 2))\n pseudocounts /= normalizer[:, np.newaxis, np.newaxis]\n for t in range(self.numTopics):\n for s in range(self.numSentiments):\n topWordIndices = pseudocounts[:, t, s].argsort()[-1:-(K + 1):-1]\n vocab = self.vectorizer.get_feature_names()\n print(t, s, [vocab[i] for i in topWordIndices])",
"def fetch_top_k_words(k):\n\treturn redis_wcloud_cli.zrange(WORD_CLOUD_SET,0,k,desc=True,withscores=True)",
"def kTopWords(k,list):\r\n # Check at this point if the list is empty. If so, then the the program is exited.\r\n if list == []:\r\n print(\"Unable to continue:\\n1. Writing.txt is empty or\\n2. There is no word remaining after preprocessing.\")\r\n sys.exit()\r\n\r\n # Make sure the list parameter given starts with the first word in the list, otherwise\r\n # pop the first item in the list as it may be holding the value of total number of words\r\n # that were in the writing.\r\n try:\r\n list[0][1]\r\n except TypeError:\r\n list.pop(0)\r\n except IndexError:\r\n list.pop(0)\r\n\r\n # Also check if the input is an integer, otherwise prompt the user to re-input the k value.\r\n while type(k) is not int:\r\n try:\r\n k = int(input(\"How many top-most frequent words do I display: \"))\r\n except ValueError:\r\n pass\r\n except TypeError:\r\n pass\r\n\r\n # If the user asks for 0 top most frequent words, it would be nothing so the function is\r\n # exited.\r\n if k==0:\r\n sys.exit()\r\n # If the value of k is greater or equal to than the length of the list, then all the words\r\n # and its frequencies will be displayed.\r\n if k>=len(list):\r\n # Goes to the TopK function to get the top k words.\r\n # Time complexity of the function TopK is O(k).\r\n k=len(list)\r\n outputList=TopK(k,list)\r\n\r\n # outputList contains k elements only, so the loop below would have a complexity\r\n # of O(k).\r\n\r\n\r\n # Else if the value of k is less than the length of the list, the top k words\r\n # will be printed.\r\n else:\r\n # Goes to the TopK function to get the top k words.\r\n # Time complexity of the function TopK is O(k).\r\n outputList=TopK(k, list)\r\n # outputList contains k elements only, so the loop below would have a complexity\r\n # of O(k).\r\n\r\n\r\n\r\n list=outputList\r\n return list",
"def get_top_n_words(n):\n top_n_words = rdd_review_data\\\n .map(lambda x: x[\"text\"])\\\n .flatMap(lambda line: line.lower().split(' ')) \\\n .filter(lambda x: x not in stop_words)\\\n .map(lambda x: (trim(x), 1))\\\n .reduceByKey(lambda a, b: a + b)\\\n .sortBy(lambda x: -x[1])\\\n .keys()\\\n .take(n)\n\n results[\"E\"] = top_n_words",
"def most_similar(word, k):\n\n word_embedding_dict = ut1.WORD_EMBEDDINGS_DICT\n u = word_embedding_dict[word]\n words_distances = []\n for one_word in word_embedding_dict:\n calc = cosine_distance(u, word_embedding_dict[one_word])\n words_distances.append([one_word, calc])\n\n words_distances = sorted(words_distances, key=get_distance)\n top_k = sorted(words_distances, key=get_distance,reverse=True)[1:k+1]\n top_k = [item[0] for item in top_k]\n return top_k",
"def most_similar_word(index2word, word2index, wv, w, k=10):\n index = word2index.get(w, -1)\n\n if index < 0:\n return []\n\n # Apply matrix-vector dot product to get the distance of w from all the other vectors\n distance = np.dot(wv, wv[index, :])\n\n max_indices = (-distance).argsort()[:k + 1]\n words = [(index2word[i], distance[i]) for i in max_indices if i != index]\n\n return words",
"def get_top_N_words_per_kmeans_cluster(cluster_centers, vocab, n_words=10):\n topN_tfidf_list = []\n topN_words_list = []\n for cluster in cluster_centers:\n sorted_ind = np.argsort(cluster)[::-1][:n_words]\n topN_words_list.append(vocab[sorted_ind])\n topN_tfidf_list.append(cluster[sorted_ind])\n\n return topN_words_list",
"def top_words_by_idf(document, stats, n):\n words = list(analyze(document))\n idfs = get_idf_dict(words, stats)\n words.sort(key=lambda w: -idfs[w])\n return words[0:n]",
"def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n \n score_vals = []\n feature_vals = []\n \n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n \n \n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results, feature_vals",
"def get_top_n_words(corpus, n=None):\r\n vec = CountVectorizer().fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0)\r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)\r\n return words_freq[:n]",
"def get_top_verbs_in_path(path, top_size=10):\n all_words = get_all_words_in_path(path)\n verbs = flat([get_verbs_from_name(word) for word in all_words])\n return collections.Counter(verbs).most_common(top_size)",
"def print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic #%d:\" % topic_idx)\n print(\" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print()",
"def getTopWords(self):\n\n # First, preprocess the article text\n text = self.article_text\n text = self.preprocessor.changeToLower(text)\n text = self.preprocessor.replaceNewline(text, ' ')\n text = self.preprocessor.removeStopWords(text)\n text = self.preprocessor.stripAccents(text)\n text = self.preprocessor.removeSpecialChars(text)\n words = self.preprocessor.tokenizeWords(text)\n preprocessed_text = self.preprocessor.useOriginalWords(words)\n\n # Then, vectorize, and get the top 20 words (word frequency)\n vectorizer = CountVectorizer(ngram_range=(1,2))\n vectors = vectorizer.fit_transform([preprocessed_text])\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n df = pd.DataFrame(denselist, columns=feature_names)\n top_words = df.iloc[[0]].sum(axis=0).sort_values(ascending=False)\n return top_words[0:20]",
"def search_words(\n self,\n keyword: str,\n top_k: int = 1,\n similarity_metric: SimilarityMetric = SimilarityMetric.LEVENSHTEIN,\n similarity_threshold: float = 0.6,\n ) -> List[Word]:\n\n top_n_words = []\n for page in self.pages:\n top_n_words.extend(\n page._search_words_with_similarity(\n keyword=keyword,\n top_k=top_k,\n similarity_metric=similarity_metric,\n similarity_threshold=similarity_threshold,\n )\n )\n\n top_n_words = sorted(top_n_words, key=lambda x: x[0], reverse=True)[:top_k]\n top_n_words = EntityList([ent[1] for ent in top_n_words])\n\n return top_n_words"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds the item object based on the string name
|
def get_item(name):
for item in globals().values():
if isinstance(item, MarketItem) and item.name == name:
return item
raise Exception("Invaid item '{}'".format(name))
|
[
"def get_item_by_name(self, partialname):\n for item in self.items:\n itemobj=globalitemloader.get_item(item)\n if partialname.lower() in itemobj.name.lower():\n return itemobj\n return None",
"def get_item(self, item_name):\n if len(self.items) > 0: # if there is at least one item in that location\n for element in self.items:\n if element.get_name() == item_name:\n return element\n return False\n else:\n return False",
"def getSpecificItem(itemName):\r\n return session.query(Item).filter_by(name=itemName).one()",
"def itemFromDisplayName(name, model_item):\n assert isinstance(model_item, QtGui.QStandardItemModel)\n assert isinstance(name, str)\n\n # Iterate over model looking for named items\n item = list([i for i in [model_item.item(index)\n for index in range(model_item.rowCount())]\n if str(i.text()) == name])\n return item[0] if len(item)>0 else None",
"def get_item(self, identifier):",
"def item(self, title):\n for item in self.items():\n if item.title.lower() == title.lower():\n return item\n raise NotFound(f'Item with title \"{title}\" not found in the playlist')",
"def GetObjectWithName(name):\r\n nodeList = getAllNodes()\r\n for n in nodeList:\r\n if name in n.Name:\r\n return n\r\n return None",
"def find(self, _name):\n for c in self.__items:\n if c.name == _name:\n return c\n raise RepositoryException(\"Found no contacts with name : \" + _name)",
"def _search_id_by_name(self):\n candidate_id = None\n for item in items:\n if item[\"name\"]==self.name:\n candidate_id = item[\"id\"]\n if candidate_id==None:\n raise InvalidItemIDError\n return candidate_id",
"def get_by_name(self, obj_type, obj_name, is_public=None):\n if obj_type not in self.valid_objects:\n self.logger.debug('=> %s is not a valid object type', obj_type)\n return\n try:\n return getattr(self.client, '%ss' % obj_type).find(\n name=obj_name,\n is_public=is_public)\n except novaclient.exceptions.NotFound:\n self.logger.debug('=> %s with name %s not found', obj_type, obj_name)",
"def get_object(self, name):\n return self._objects[name]",
"def _find_item(element, tag, name):\n names = [ e.text for e in element.iter(tag)]\n if name in names:\n return True\n return False",
"def find(self, name):\n if not isinstance(name, basestring):\n raise TypeError(\"name can only be an instance of type basestring\")\n return_data = self._call(\"find\",\n in_p=[name])\n return_data = IExtPack(return_data)\n return return_data",
"def get(scene, name):\n for fbx_object in get_all(scene):\n if fbx_object.GetName() == name:\n return fbx_object\n\n return None",
"def beets_get_item(self, path):\n query = library.MatchQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))",
"def test_find_material_from_name():\n mat_1 = Material(id=1, D_0=None, E_D=None, name=\"mat1\")\n mat_2 = Material(id=2, D_0=None, E_D=None, name=\"mat2\")\n my_Mats = Materials([mat_1, mat_2])\n assert my_Mats.find_material_from_name(\"mat1\") == mat_1\n assert my_Mats.find_material_from_name(\"mat2\") == mat_2",
"def get_item(menu, item):\n for i in menu['items']:\n if i['id'] == item:\n return i",
"def find_by_name(self, name):\n try:\n for k in self.magnets.keys():\n if self.magnets[k]['name'] == name:\n return self.magnets[k]\n # not found\n except KeyError:\n pass\n return {}",
"def find_drink(self, order_name):\r\n for item in self.menu:\r\n if item.code == order_name:\r\n return item\r\n print(\"Sorry that item is not available.\")\r\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Raise any number to a power of three
|
def my_power_three(number):
return number ** 3
|
[
"def times_three(num):\n\n return num*3",
"def f7():\n x_pow3 = mul_functions(identity(), mul_functions(identity(), identity()))\n return inverse(x_pow3)",
"def pow_4_of(number):\n pow2 = pow_2_of(number)\n return pow2 * pow2",
"def triple(number):\n return number * 3",
"def cube(num):\n return num**3",
"def power(number, exp=2):\n return number ** exp",
"def find_next_multiple_of_power_two(number, initial=3):\n msb = number.bit_length()\n return 3 if number <= 1 else initial << msb - 2 << (1 & number >> msb - 2)",
"def power(a,n):\n\tresult = 1\n\tfor i in range(abs(n)):\n\t\tresult *= a\n\n\treturn result if n >= 0 else 1 / result",
"def ack_3(n):\n return 2**(n+3)-3",
"def next_power(value, base=2):\n exponent = math.ceil(math.log(value, base))\n return base**exponent",
"def pow2(n): \n return 1<<n",
"def __pow__(self, r):\n return generic_power(self, r)",
"def cube(number):\n cube = number ** 3\n return cube",
"def next_pow(x, power=2):\n return pow(power, np.ceil(np.log(x) / np.log(power)))",
"def cube(arg):\n return round(arg**3, 3)",
"def cube(self, num):\n result = float (num) ** 3\n return result",
"def __pow__(self, n):\n return LSeriesProduct(self._factorization**ZZ(n))",
"def polynomiale(a:int, b:int, c:int, d:int, x: float) -> float:\n return a * pow(x, 3) + b * pow(x, 2) + c * x + d",
"def polynomiale_carre(a:int, b:int, c:int, x: float) -> float:\n return a * pow(x, 4) + b * pow(x, 2) + c",
"def power1(x, n):\n str = num_binary(n)\n power1, result = 1, 1\n for i in str[::-1]:\n power1 = power1*x\n result = i=='1' and result*power1 or result\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the mirrors from the mirrorlist file.
|
def read(self):
p = compile('Server = {url}\n')
with open(self.path) as f:
for line in f:
r = p.parse(line)
if r:
self.mirrors.append(r.named['url'])
return self.mirrors
|
[
"def get_list_of_mirrors(file_type, file_path, mirrors_dict):\n\n # Checking if all the arguments have appropriate format.\n formats.RELPATH_SCHEMA.check_match(file_path)\n formats.MIRRORDICT_SCHEMA.check_match(mirrors_dict)\n sslib_formats.NAME_SCHEMA.check_match(file_type)\n\n # Verify 'file_type' is supported.\n if file_type not in _SUPPORTED_FILE_TYPES:\n raise sslib_exceptions.Error('Invalid file_type argument.'\n ' Supported file types: ' + repr(_SUPPORTED_FILE_TYPES))\n path_key = 'metadata_path' if file_type == 'meta' else 'targets_path'\n\n list_of_mirrors = []\n for junk, mirror_info in mirrors_dict.items():\n # Does mirror serve this file type at all?\n path = mirror_info.get(path_key)\n if path is None:\n continue\n\n # for targets, ensure directory confinement\n if path_key == 'targets_path':\n full_filepath = os.path.join(path, file_path)\n confined_target_dirs = mirror_info.get('confined_target_dirs')\n # confined_target_dirs is optional and can used to confine the client to\n # certain paths on a repository mirror when fetching target files.\n if confined_target_dirs and not file_in_confined_directories(full_filepath,\n confined_target_dirs):\n continue\n\n # parse.quote(string) replaces special characters in string using the %xx\n # escape. This is done to avoid parsing issues of the URL on the server\n # side. Do *NOT* pass URLs with Unicode characters without first encoding\n # the URL as UTF-8. We need a long-term solution with #61.\n # http://bugs.python.org/issue1712522\n file_path = parse.quote(file_path)\n url = os.path.join(mirror_info['url_prefix'], path, file_path)\n\n # The above os.path.join() result as well as input file_path may be\n # invalid on windows (might contain both separator types), see #1077.\n # Make sure the URL doesn't contain backward slashes on Windows.\n list_of_mirrors.append(url.replace('\\\\', '/'))\n\n return list_of_mirrors",
"def get_lists_from_file(file_name: str) -> List:\n list_url_file = pathlib.Path.home().joinpath('bookshelf', file_name)\n if not list_url_file.is_file():\n return []\n # open file and load contents\n with list_url_file.open(mode='r') as f:\n list_urls = json.load(f)\n return list_urls",
"def load(self, file_name=\"data/proxies.db\", proxy_list=None, proxy_type=None):\n\n if not proxy_list:\n self.proxy_list = []\n\n with open(file_name, 'r') as f:\n for line in f.readline():\n (ip, port, proxy_type) = line.split()\n proxy = Proxy(ip, port, proxy_type)\n self.proxy_list.append(proxy)\n\n return proxy_list",
"def readProxies(self):\n proxies = []\n for line in self.__proxiesFile:\n line = line.rstrip(\"\\n\")\n proxies.append({\n 'http://': 'http://'+line,\n 'https://': 'http://'+line\n })\n self.__close(self.__proxiesFile)\n return proxies",
"def readListParallel(filename):\n p = parallel.ParallelTask()\n comm, size, rank, master = parallel.info()\n entries = None\n if master:\n with open(filename,'r') as fid:\n lines = fid.readlines()\n entries = [x.strip() for x in lines]\n entries = [x for x in entries if x[0] != '#']\n entries = p.shareData(entries)\n return entries",
"def rlist(self):\n\n if not self.connected_to:\n print('You aren\\'t connected to anyone!')\n print('Connect first with \\'connect <hostname> <port>\\'')\n return\n\n filenames = self.connection.list()\n\n print('Remote filepaths:')\n for filename in filenames:\n print(f'\\t{filename}')",
"def load_rss_list(list_path):\n list_ = []\n with open(list_path, 'r') as f:\n for line in f:\n rss_name, rss = line.split('|')\n rss_name = rss_name.strip()\n rss = rss.strip()\n list_.append((rss_name, rss))\n return list_",
"def masterListRead():\n dir = os.environ[\"XUVTOP\"]\n fname = os.path.join(dir,'masterlist','masterlist.ions')\n input = open(fname,'r')\n s1 = input.readlines()\n input.close()\n masterlist = []\n for i in range(0,len(s1)):\n s1a = s1[i][:-1]\n s2 = s1a.split(';')\n masterlist.append(s2[0].strip())\n return masterlist",
"def ncnr_load(filelist=None, check_timestamps=True):\n # NB: used mainly to set metadata for processing, so keep it minimal\n # TODO: make a metadata loader that does not send all data to browser\n # NB: Fileinfo is a structure with\n # { path: \"location/on/server\", mtime: timestamp }\n from .load import url_load_list\n\n datasets = []\n for data in url_load_list(filelist, check_timestamps=check_timestamps):\n datasets.append(data)\n return datasets",
"def get_mirrors(self):\r\n return [mirror if isinstance(mirror, type) else mirror\r\n for mirror in self._mirrors]",
"def _read_lst_file(config: MutableMapping[str, Any]):\n cur_file = ReadMeta(\n filename=config[\"outputs\"][\"data_filename\"],\n input_start=config[\"inputs\"][\"start\"],\n input_stop1=config[\"inputs\"][\"stop1\"],\n input_stop2=config[\"inputs\"][\"stop2\"],\n input_stop3=config[\"inputs\"][\"stop3\"],\n input_stop4=config[\"inputs\"][\"stop4\"],\n input_stop5=config[\"inputs\"][\"stop5\"],\n binwidth=config[\"advanced\"][\"binwidth\"],\n use_sweeps=config[\"advanced\"][\"sweeps_as_lines\"],\n mirror_phase=config[\"advanced\"][\"phase\"],\n )\n cur_file.run()\n raw_data_obj = ReadData(\n filename=config[\"outputs\"][\"data_filename\"],\n start_of_data_pos=cur_file.start_of_data_pos,\n timepatch=cur_file.timepatch,\n is_binary=cur_file.is_binary,\n debug=config[\"advanced\"][\"debug\"],\n )\n raw_data = raw_data_obj.read_lst()\n if cur_file.is_binary:\n relevant_columns, dict_of_data = binary_parsing(cur_file, raw_data, config)\n else:\n relevant_columns, dict_of_data = ascii_parsing(cur_file, raw_data, config)\n lst_metadata = cur_file.lst_metadata\n fill_frac = (\n config[\"advanced\"][\"fill_frac\"]\n if cur_file.fill_fraction == -1.0\n else cur_file.fill_fraction\n )\n return relevant_columns, dict_of_data, lst_metadata, fill_frac",
"def make_metadata(mirror_list):\n result = {'_type' : 'Mirrors'}\n # TODO: set the expires time another way.\n result['ts'] = format_time(time.time())\n result['expires'] = format_time(time.time() + 3600 * 24 * 365)\n result['mirrors'] = mirror_list\n\n # Is 'result' a Mirrors metadata file?\n # Raises tuf.FormatError if not.\n MIRRORLIST_SCHEMA.check_match(result)\n\n return result",
"def getDataFiles(list_filename):\n return [line.rstrip() for line in open(list_filename)]",
"def __load_proxies_from_file(self):\n proxies_from_file = set()\n try:\n with open(\"http_handler/proxy_data.csv\", 'r', encoding='utf-8') as fd:\n for line in fd:\n line = line.split(' ')\n proxies_from_file.add(line[:-1][0])\n except BaseException as e:\n logs.save_log(\"Exception: failed to load proxies at __load_proxies_from_file method, Error: {}\".format(e))\n print(str(e))\n return\n finally:\n self._address_pool |= proxies_from_file\n self.__update_cycle()",
"def get_ftp_forecast_list(file_match, ftp_host, ftp_login,\n ftp_passwd, ftp_directory):\n ftp_client = PyFTPclient(host=ftp_host,\n login=ftp_login,\n passwd=ftp_passwd,\n directory=ftp_directory)\n ftp_client.connect()\n file_list = ftp_client.ftp.nlst(file_match)\n ftp_client.ftp.quit()\n return file_list",
"def _load_nordpool_links() -> List[str]:\n data = yaml.safe_load(pkgutil.get_data(__name__, \"assets/nordpool_files.yml\"))\n return data.get('files', [])",
"def pull_dns_list():\n try:\n os.remove(NAMESLIST_FILENAME)\n except OSError:\n log(\"Did not find \" + NAMESLIST_PATH + \" to remove.\")\n\n # this assumes the ssh_config user file is setup with an oc1 entry\n command = \"scp oc1:\" + \\\n NAMESLIST_PATH + \" .\"\n\n rc = subprocess.call(command, shell=True)\n if rc != 0:\n raise Exception(\"Something went wrong scping the names file.\")\n else:\n print \"Got file\", NAMESLIST_FILENAME",
"def list(self):\n if self.error:\n sys.stderr.write(self.error)\n return 4\n\n if self.conf.root:\n self._retrieve_single_dir_list(self.conf.root)\n else:\n self._retrieve_file_list()\n\n self._normalize_links()\n sys.stdout.write(''.join([str(entry) for entry in self._entries]))\n return 0",
"def check_mirror(self):\n\t\ttry:\n\t\t\tself.logger.info('Indexing mirror directory')\n\t\t\tself.mirror_validation_paths.append(self.config.mirror_dir)\n\t\t\tfor path in self.path.iterate_path(self.config.mirror_dir):\t\t\t\t\n\t\t\t\tself.logger.debug('Index path: %s'%(path))\n\t\t\t\t#check if path is file\n\t\t\t\tif os.path.isfile(path):\n\t\t\t\t\tself.logger.debug('Path is FILE, path: %s'%(path))\n\t\t\t\t\t#check if file exist in watched directory\n\t\t\t\t\tif self.path.check_exist(path.replace(self.config.mirror_dir,self.config.watch_dir)):\t\t\t\t\t\n\t\t\t\t\t\tif not self.path.cmp_paths(path,path.replace(self.config.mirror_dir,self.config.watch_dir)):\n\t\t\t\t\t\t\t#are diffrent\n\t\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#files are the same skip to next path\n\t\t\t\t\t\t\tself.logger.debug('Mirror path: %s are equel with original path: %s, add mirror path to list validation'%(path,path.replace(self.config.mirror_dir,self.config.watch_dir)))\n\t\t\t\t\t\t\tself.mirror_validation_paths.append(path)\n\t\t\t\t\telse:\n\t\t\t\t\t\t#file not exist in watched directory\n\t\t\t\t\t\t#delete the file\n\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\treturn False\n\t\t\t\t#check if path is dir\n\t\t\t\telif os.path.isdir(path):\n\t\t\t\t\tself.logger.debug('Path is DIRECTORY, path: %s'%(path))\n\t\t\t\t\tif not self.path.check_exist(path.replace(self.config.mirror_dir,self.config.watch_dir)):\n\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\t#directory exist skip to next path\n\t\t\t\t\t\tself.mirror_validation_paths.append(path)\n\t\t\t\telse:\n\t\t\t\t\t#not recognize path\n\t\t\t\t\tself.logger.warning('Not recognize path: %s'%(path))\n\t\t\t\t\tself.logger.error('Index mirror directory error') \n\t\t\t\t\treturn False\n\t\t\tself.logger.info('Index mirror directory ok')\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Index.check_mirror, error: %s'%(str(e)),exc_info=True)\n\t\t\treturn False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Copy the provided mirrorlist into the chroot and refresh pacman databases.
|
def copy(self, path='/etc/pacman.d/mirrorlist'):
copy2(path, self.path)
self.chroot.refresh()
self.read()
|
[
"def _sync_databases(self):\n host, port = self._src_mc.primary\n self._logger.info('[%s] sync databases from %s:%d' % (self._current_process_name, host, port))\n exclude_dbnames = ['admin', 'local']\n for dbname in self._src_mc.database_names():\n if dbname not in exclude_dbnames:\n if self._filter and not self._filter.valid_database(dbname):\n continue\n self._sync_database(dbname)\n self._logger.info('[%s] all databases done' % self._current_process_name)",
"def copy(self, uri_list, move=False): \n\n def copy():\n if move:\n action = filer.rpc.Move\n else:\n action = filer.rpc.Copy\n action(\n From={'File': [rox.get_local_path(uri) for uri in uri_list]},\n To={'File': self.get_path()}\n )\n\n if not self.is_mounted:\n self._mount(on_mount=copy)\n else:\n copy(mount)",
"def migrate_list(mlist, dry_run, verbose):\n set_dlists(mlist, verbose)\n set_essay(mlist, verbose)\n set_fullname(mlist, verbose)\n if not dry_run:\n mlist.Lock()\n mlist.Save()\n mlist.Unlock()",
"def sync_local_pacman_db():\n logger.info(\"Synchronizing package databases in {}\".format(PACMAN_DB_DIR))\n\n if not os.path.exists(PACMAN_DB_DIR):\n os.makedirs(PACMAN_DB_DIR)\n\n # This command comes from \"checkupdates\" script from pacman package\n cmd = ['fakeroot', 'pacman', '-Sy', '--dbpath', PACMAN_DB_DIR, '--logfile', '/dev/null']\n p = subprocess.Popen(cmd)\n retval = p.wait()\n if retval:\n logger.error(\"pacman exited with code {}\".format(retval))\n return False\n return True",
"def copy_db(src_val, dest_val, db_list, options):\n verbose = options.get(\"verbose\", False)\n quiet = options.get(\"quiet\", False)\n do_drop = options.get(\"do_drop\", False)\n skip_views = options.get(\"skip_views\", False)\n skip_procs = options.get(\"skip_procs\", False)\n skip_funcs = options.get(\"skip_funcs\", False)\n skip_events = options.get(\"skip_events\", False)\n skip_grants = options.get(\"skip_grants\", False)\n skip_data = options.get(\"skip_data\", False)\n skip_triggers = options.get(\"skip_triggers\", False)\n skip_tables = options.get(\"skip_tables\", False)\n skip_gtid = options.get(\"skip_gtid\", False)\n locking = options.get(\"locking\", \"snapshot\")\n\n conn_options = {\n 'quiet': quiet,\n 'version': \"5.1.30\",\n }\n servers = connect_servers(src_val, dest_val, conn_options)\n cloning = (src_val == dest_val) or dest_val is None\n\n source = servers[0]\n if cloning:\n destination = servers[0]\n else:\n destination = servers[1]\n\n src_gtid = source.supports_gtid() == 'ON'\n dest_gtid = destination.supports_gtid() == 'ON'if destination else False\n\n # Get list of all databases from source if --all is specified.\n # Ignore system databases.\n if options.get(\"all\", False):\n # The --all option is valid only if not cloning.\n if not cloning:\n if not quiet:\n print \"# Including all databases.\"\n rows = source.get_all_databases()\n for row in rows:\n db_list.append((row[0], None)) # Keep same name\n else:\n raise UtilError(\"Cannot copy all databases on the same server.\")\n elif not skip_gtid and src_gtid:\n # Check to see if this is a full copy (complete backup)\n all_dbs = source.exec_query(\"SHOW DATABASES\")\n dbs = [db[0] for db in db_list]\n for db in all_dbs:\n if db[0].upper() in [\"MYSQL\", \"INFORMATION_SCHEMA\",\n \"PERFORMANCE_SCHEMA\"]:\n continue\n if not db[0] in dbs:\n print _GTID_BACKUP_WARNING\n break\n\n # Do error checking and preliminary work:\n # - Check user permissions on source and destination for all databases\n # - Check to see if executing on same server but same db name (error)\n # - Build list of tables to lock for copying data (if no skipping data)\n # - Check storage engine compatibility\n for db_name in db_list:\n source_db = Database(source, db_name[0])\n if destination is None:\n destination = source\n if db_name[1] is None:\n db = db_name[0]\n else:\n db = db_name[1]\n dest_db = Database(destination, db)\n\n # Make a dictionary of the options\n access_options = {\n 'skip_views': skip_views,\n 'skip_procs': skip_procs,\n 'skip_funcs': skip_funcs,\n 'skip_grants': skip_grants,\n 'skip_events': skip_events,\n 'skip_triggers': skip_triggers,\n }\n\n source_db.check_read_access(src_val[\"user\"], src_val[\"host\"],\n access_options)\n\n # Make a dictionary containing the list of objects from source db\n source_objects = {\n \"views\": source_db.get_db_objects(\"VIEW\", columns=\"full\"),\n \"procs\": source_db.get_db_objects(\"PROCEDURE\", columns=\"full\"),\n \"funcs\": source_db.get_db_objects(\"FUNCTION\", columns=\"full\"),\n \"events\": source_db.get_db_objects(\"EVENT\", columns=\"full\"),\n \"triggers\": source_db.get_db_objects(\"TRIGGER\", columns=\"full\"),\n }\n\n dest_db.check_write_access(dest_val['user'], dest_val['host'],\n access_options, source_objects, do_drop)\n\n # Error is source db and destination db are the same and we're cloning\n if destination == source and db_name[0] == db_name[1]:\n raise UtilError(\"Destination database name is same as \"\n \"source - source = %s, destination = %s\" %\n (db_name[0], db_name[1]))\n\n # Error is source database does not exist\n if not source_db.exists():\n raise UtilError(\"Source database does not exist - %s\" % db_name[0])\n\n # Check storage engines\n check_engine_options(destination,\n options.get(\"new_engine\", None),\n options.get(\"def_engine\", None),\n False, options.get(\"quiet\", False))\n\n # Get replication commands if rpl_mode specified.\n # if --rpl specified, dump replication initial commands\n rpl_info = None\n\n # Turn off foreign keys if they were on at the start\n destination.disable_foreign_key_checks(True)\n\n # Get GTID commands\n if not skip_gtid:\n gtid_info = get_gtid_commands(source)\n if src_gtid and not dest_gtid:\n print _NON_GTID_WARNING % (\"destination\", \"source\", \"to\")\n elif not src_gtid and dest_gtid:\n print _NON_GTID_WARNING % (\"source\", \"destination\", \"from\")\n else:\n gtid_info = None\n if src_gtid and not cloning:\n print _GTID_WARNING\n\n # If cloning, turn off gtid generation\n if gtid_info and cloning:\n gtid_info = None\n # if GTIDs enabled, write the GTID commands\n if gtid_info and dest_gtid:\n # Check GTID version for complete feature support\n destination.check_gtid_version()\n # Check the gtid_purged value too\n destination.check_gtid_executed()\n for cmd in gtid_info[0]:\n print \"# GTID operation:\", cmd\n destination.exec_query(cmd, {'fetch': False, 'commit': False})\n\n if options.get(\"rpl_mode\", None):\n new_opts = options.copy()\n new_opts['multiline'] = False\n new_opts['strict'] = True\n rpl_info = get_change_master_command(src_val, new_opts)\n destination.exec_query(\"STOP SLAVE\", {'fetch': False, 'commit': False})\n\n # Copy (create) objects.\n # We need to delay trigger and events to after data is loaded\n new_opts = options.copy()\n new_opts['skip_triggers'] = True\n new_opts['skip_events'] = True\n\n # Get the table locks unless we are cloning with lock-all\n if not (cloning and locking == 'lock-all'):\n my_lock = get_copy_lock(source, db_list, options, True)\n\n _copy_objects(source, destination, db_list, new_opts)\n\n # If we are cloning, take the write locks prior to copying data\n if cloning and locking == 'lock-all':\n my_lock = get_copy_lock(source, db_list, options, True, cloning)\n\n # Copy tables data\n if not skip_data and not skip_tables:\n\n # Copy tables\n for db_name in db_list:\n\n # Get a Database class instance\n db = Database(source, db_name[0], options)\n\n # Perform the copy\n # Note: No longer use threads, use multiprocessing instead.\n db.init()\n db.copy_data(db_name[1], options, destination, connections=1,\n src_con_val=src_val, dest_con_val=dest_val)\n\n # if cloning with lock-all unlock here to avoid system table lock conflicts\n if cloning and locking == 'lock-all':\n my_lock.unlock()\n\n # Create triggers for all databases\n if not skip_triggers:\n new_opts = options.copy()\n new_opts['skip_tables'] = True\n new_opts['skip_views'] = True\n new_opts['skip_procs'] = True\n new_opts['skip_funcs'] = True\n new_opts['skip_events'] = True\n new_opts['skip_grants'] = True\n new_opts['skip_create'] = True\n _copy_objects(source, destination, db_list, new_opts, False, False)\n\n # Create events for all databases\n if not skip_events:\n new_opts = options.copy()\n new_opts['skip_tables'] = True\n new_opts['skip_views'] = True\n new_opts['skip_procs'] = True\n new_opts['skip_funcs'] = True\n new_opts['skip_triggers'] = True\n new_opts['skip_grants'] = True\n new_opts['skip_create'] = True\n _copy_objects(source, destination, db_list, new_opts, False, False)\n\n if not (cloning and locking == 'lock-all'):\n my_lock.unlock()\n\n # if GTIDs enabled, write the GTID-related commands\n if gtid_info and dest_gtid:\n print \"# GTID operation:\", gtid_info[1]\n destination.exec_query(gtid_info[1])\n\n if options.get(\"rpl_mode\", None):\n for cmd in rpl_info[_RPL_COMMANDS]:\n if cmd[0] == '#' and not quiet:\n print cmd\n else:\n if verbose:\n print cmd\n destination.exec_query(cmd)\n destination.exec_query(\"START SLAVE;\")\n\n # Turn on foreign keys if they were on at the start\n destination.disable_foreign_key_checks(False)\n\n if not quiet:\n print \"#...done.\"\n return True",
"def rsync(self, args):\n \n if self.transport == 'ssh': # DETER\n # First upload the files\n print \"Uploading files to DETER\"\n subprocess.call('cd ../scripts ; ./sync-deter.py', shell=True) \n \n # Copy the files to /tmp/\n threads = []\n \n nfs_dir = self.prefix[:self.prefix.rfind('/src')]\n local_dir = self.install_dir[:self.install_dir.rfind('curveball/src')]\n topdir = self.install_dir[:self.install_dir.rfind('/src')]\n \n for machine in self.servers:\n self.run(machine, 'sudo mkdir -p %s ; whoami | xargs -I {} sudo chown {} %s' % (topdir, topdir)) \n t = Thread(target=self.run, args=(machine, 'rsync -avz %s %s' % (nfs_dir, local_dir), True))\n t.start()\n threads.append((t, machine))\n\n print \"Waiting for %d machines to finish copying to local directories\" % len(threads)\n \n for (t, machine) in threads:\n print \"Waiting for %s to finish copying\" % machine\n t.join()",
"def sync(reconcile=False):\n\n\tg.db = mysqladm.core.db_connect()\n\n\t## Load servers\n\tservers = mysqladm.servers.get_servers()\n\t\n\tfor server in servers:\n\t\tprint('Checking server \"' + server['hostname'] + '\"')\n\t\ttry:\n\t\t\tjson_response = mysqladm.core.msg_node(server,'list')\n\n\t\t\tif 'status' not in json_response:\n\t\t\t\tprint('Error: Invalid response from server', file=sys.stderr)\n\t\t\t\tcontinue\n\n\t\t\tif json_response['status'] != 0:\n\t\t\t\tif 'error' in json_response:\n\t\t\t\t\tprint('Error from agent: ' + json_response['error'], file=sys.stderr)\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint('Error from agent, code: ' + json_response['code'], file=sys.stderr)\n\t\t\t\t\tcontinue\n\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint('Error contacting agent: ' + str(e), file=sys.stderr)\n\t\t\tcontinue\n\t\t\n\t\tdatabases = mysqladm.servers.get_server_databases(server['id'])\n\t\tdblist = []\n\t\tfor db in databases:\n\t\t\tdblist.append(db['name'])\n\n\t\t## Check to see if any databases have been created without us knowing about it\n\t\tif 'list' in json_response:\n\t\t\tfor instance in json_response['list']:\n\t\t\t\tif instance not in dblist:\t\t\t\t\t\n\t\t\t\t\tif reconcile:\n\t\t\t\t\t\tmysqladm.databases.insert_database_record(server['id'], instance, 'N/A', 'N/A')\n\t\t\t\t\t\tprint(\"\\t\" + 'Found unknown database \"' + instance + '\" on server. Added database to records as it exists on the server.')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"\\t\" + 'Found unknown database \"' + instance + '\" on server')\n\t\t\t\t\t\n\t\t## Check to see if any databases appear to have been deleted without us knowing about it\n\t\tfor db in databases:\n\t\t\tif db['name'] not in json_response['list']:\n\t\t\t\tif reconcile:\n\t\t\t\t\tmysqladm.databases.delete_database_record(db['id'])\n\t\t\t\t\tprint(\"\\t\" + 'Could not find database \"' + db['name'] + '\" on server. Deleted database from records as it no longer exists.')\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\t\" + 'Could not find database \"' + db['name'] + '\" on server')",
"def pull_dns_list():\n try:\n os.remove(NAMESLIST_FILENAME)\n except OSError:\n log(\"Did not find \" + NAMESLIST_PATH + \" to remove.\")\n\n # this assumes the ssh_config user file is setup with an oc1 entry\n command = \"scp oc1:\" + \\\n NAMESLIST_PATH + \" .\"\n\n rc = subprocess.call(command, shell=True)\n if rc != 0:\n raise Exception(\"Something went wrong scping the names file.\")\n else:\n print \"Got file\", NAMESLIST_FILENAME",
"def nfvi_kube_rootca_host_update_list(self, new_list):\n self._nfvi_kube_rootca_host_update_list = new_list",
"def set_hosts(serverlist_file):\n# env.hosts = open('/root/fabric/serverlist', 'r').readlines()\n# env.hosts = open('/root/fabric/servershortlist', 'r').readlines()\n env.hosts = open((serverlist_file), 'r').readlines()",
"def insert_snap(json_list):\n user = utils.get_user()\n node_type = \"Host\"\n for data in json_list:\n # Handle nerds data\n try:\n d = data['host']['snap_metadata']\n except KeyError:\n d = data\n\n name = d['hostname'].lower()\n logger.info('{} loaded'.format(name))\n\n meta_type = 'Logical' if d.get('virtual') else 'Physical'\n\n # find host first hostname, then ip? else create\n node_handle = utils.get_unique_node_handle_by_name(name, node_type, meta_type, ALLOWED_NODE_TYPE_SET)\n # Check it is a host\n if not node_handle or node_handle.node_type.type not in ALLOWED_NODE_TYPE_SET:\n logger.info('{} is not a {} skipping.'.format(name, ALLOWED_NODE_TYPE_SET))\n continue\n\n # Update host\n node = node_handle.get_node()\n\n # change from logical to physical if needed?\n if node_handle.node_meta_type == 'Logical' and not d.get('virtual'):\n logger.warning('Converting {} from logical to physical'.format(name))\n helpers.logical_to_physical(user, node.handle_id)\n\n helpers.update_noclook_auto_manage(node)\n\n managed_by = 'Puppet' if d.get('managed') else 'Manual'\n responsible = d.get('responsible', 'SEI')\n\n properties = {\n 'os': d['os'],\n 'managed_by': managed_by,\n 'responsible_group': responsible,\n 'description': d.get('description')\n }\n\n if d.get('network'):\n ipv4 = [n['ip'].split('/')[0] for n in d.get('network', []) if 'ip' in n]\n ipv4_service = [n['service_ip'].split('/')[0] for n in d.get('network', []) if 'service_ip' in n]\n ipv6 = [n['ipv6'].split('/')[0] for n in d.get('network', []) if 'ipv6' in n]\n ipv6_service = [n['service_ipv6'].split('/')[0] for n in d.get('network', []) if 'service_ipv6' in n]\n properties['ip_addresses'] = ipv4 + ipv4_service + ipv6 + ipv6_service\n\n if d.get('managed'):\n # Dont default to False\n properties['syslog'] = True\n\n if d.get('service_tag'):\n properties['service_tag'] = d.get('service_tag')\n\n helpers.dict_update_node(user, node.handle_id, properties, properties.keys())\n logger.info('{} has been imported'.format(name))",
"def update_all(self):\n\n\t\ttry:\n\t\t\tclientListPipe = subprocess.Popen(['/usr/sbin/puppetca','-la'], stdout=subprocess.PIPE)\n\t\t\tclientList = clientListPipe.communicate()[0]\n\t\t\tpattern = re.compile(r'(\\S+)\\.%s' % self.re_domain)\n\t\t\tmatcher = re.findall(pattern,clientList)\n\n\t\t\tfor result in matcher:\n\t\t\t\tif re.match(r'^DNS:',result):\n\t\t\t\t\tpass\n\n\t\t\t\telse:\n\n\t\t\t\t\thost = '%s' % result\n\t\t\t\t\tself.__host = host\n\t\t\t\t\tenv.host_string = host\n\t\t\t\t\tself.update_host()\n\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e",
"def set(self, mirror, write=True):\n self.mirrors = [mirror]\n if write:\n self.write()",
"def move_all_cards(self, destination_list):\n\n self.client.fetch_json(\n '/lists/' + self.id + '/moveAllCards',\n http_method='POST',\n post_args = {\n \"idBoard\": destination_list.board.id,\n \"idList\": destination_list.id,\n })",
"def apply_zone_change(self, cmd_list):\n stdout, stderr = None, None\n LOG.debug(\"Executing command via ssh: %s\", cmd_list)\n stdout, stderr = self._run_ssh(cmd_list, True, 1)\n # no output expected, so output means there is an error\n if stdout:\n msg = _(\"Error while running zoning CLI: (command=%(cmd)s \"\n \"error=%(err)s).\") % {'cmd': cmd_list, 'err': stdout}\n LOG.error(msg)\n self._cfg_trans_abort()\n raise b_exception.BrocadeZoningCliException(reason=msg)",
"def copyFiles(self):\n \n #./update/swap\n self._logger.info('adb push swap')\n self.getShell().push('./update/swap','/cache/recovery/')\n\n self.getShell().cmd('chmod 755 /cache/recovery/swap')\n self._logger.info('chmod 755 /cache/recovery/swap')\n\n #./update/parted\n self.getShell().push('./update/parted','/cache/recovery/')\n self._logger.info('adb push parted')\n\n self.getShell().cmd('chmod 755 /cache/recovery/parted')\n self._logger.info('chmod 755 /cache/recovery/parted')\n \n #perform restore\n self._logger.info('run swap script')\n recovery = UpdateManagerRecovery(self.getDeviceId)\n recovery.startUpdate()\n self._logger.info('running...')\n recovery.join()\n\n self.getShell().reboot('device')",
"def mirror(also_migrate=None, force=False, wait=False, date_from=None):\n halt_if_debug_mode(force=force)\n task = migrate_from_mirror(\n also_migrate=also_migrate, disable_external_push=True, date_from=date_from\n )\n if wait:\n wait_for_all_tasks(task)",
"def _copy_objects(source, destination, db_list, options,\n show_message=True, do_create=True):\n # Copy objects\n for db_name in db_list:\n\n if show_message:\n # Display copy message\n if not options.get('quiet', False):\n msg = \"# Copying database %s \" % db_name[0]\n if db_name[1]:\n msg += \"renamed as %s\" % (db_name[1])\n print msg\n\n # Get a Database class instance\n db = Database(source, db_name[0], options)\n\n # Perform the copy\n db.init()\n db.copy_objects(db_name[1], options, destination,\n options.get(\"threads\", False), do_create)",
"def mininetFlushArpCache(cli, host_list):\n for h in host_list:\n cli.runCmd(\"%s ip neigh flush all\" % h)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the given mirror as the chroot's only mirror and refresh pacman databases.
|
def set(self, mirror, write=True):
self.mirrors = [mirror]
if write:
self.write()
|
[
"def has_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = misc.url_affix_trailing_slash(mirror)\n return mirror in self.mirrors",
"def remove_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = misc.url_affix_trailing_slash(mirror)\n for i, m in enumerate(self.mirrors):\n if mirror == m.uri:\n # Immediate return as the index into the array\n # changes with each removal.\n del self.mirrors[i]\n return\n raise api_errors.UnknownRepositoryMirror(mirror)",
"def copy(self, path='/etc/pacman.d/mirrorlist'):\n copy2(path, self.path)\n self.chroot.refresh()\n self.read()",
"def add_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n def dup_check(mirror):\n if self.has_mirror(mirror):\n raise api_errors.DuplicateRepositoryMirror(\n mirror)\n\n self.__add_uri(\"mirrors\", mirror, dup_check=dup_check,\n priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key)",
"def set_mirror_volume_mounts(self, mirror_volume_mounts=True):\n\n self.mirror_volume_mounts = mirror_volume_mounts\n return self",
"def mirror(mirror='pip', py=3):\n subprocess.call(\"pip\" + str(py) +\" install pip -U\", shell=True)\n cmd = \"pip\" + str(py) +\" config set global.index-url \" + (pypi[mirror] if mirror in pypi else mirror)\n s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return s",
"def update_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = RepositoryURI(mirror, priority=priority,\n ssl_cert=ssl_cert, ssl_key=ssl_key)\n\n target = self.get_mirror(mirror)\n target.priority = mirror.priority\n target.ssl_cert = mirror.ssl_cert\n target.ssl_key = mirror.ssl_key\n self.mirrors.sort(key=URI_SORT_POLICIES[self.__sort_policy])",
"def add(self, mirror, write=True):\n self.mirrors.append(mirror)\n if write:\n self.write()",
"def increment_mirror(self, gr, action={}):\n badmirror = gr.mirrors[gr._next]\n\n self._lock.acquire()\n try:\n ind = self.mirrors.index(badmirror)\n except ValueError:\n pass\n else:\n if action.get('remove_master', 0):\n del self.mirrors[ind]\n elif self._next == ind and action.get('increment_master', 1):\n self._next += 1\n if self._next >= len(self.mirrors): self._next = 0\n self._lock.release()\n \n if action.get('remove', 1):\n del gr.mirrors[gr._next]\n elif action.get('increment', 1):\n gr._next += 1\n if gr._next >= len(gr.mirrors): gr._next = 0\n\n if DEBUG:\n grm = [m['mirror'] for m in gr.mirrors]\n DEBUG.info('GR mirrors: [%s] %i', ' '.join(grm), gr._next)\n selfm = [m['mirror'] for m in self.mirrors]\n DEBUG.info('MAIN mirrors: [%s] %i', ' '.join(selfm), self._next)",
"def setRootDatadir(self, root):\n for arg, value in self.args.items():\n if value:\n self.args[arg] = value.replace('$DATA', root)\n for arg, value in self.drendargs.items():\n if value:\n self.drendargs[arg] = value.replace('$DATA', root)\n if self.datadir:\n self.datadir = self.datadir.replace('$DATA', root)",
"def _raise_bad_mirror_error(self, path):\n raise util.UserError(f\"Directory '{path}' already exists, but is not a mirror \"\n \"of the project repository. Please remove it and try again. \"\n \"If the benchmark suite is in the project repository, you can \"\n \"also adjust the configuration to use the current \"\n \"repository (e.g. \\\"repo\\\": \\\".\\\") instead of a remote URL \"\n \"as the source.\")",
"def replaceRootpw(self, pwd):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n # if starts with $ then assume encrypted\n isCrypted = re.match(r\"\\$\", pwd)\n if not isCrypted:\n pwd = self.cryptedPwd(pwd)\n isCrypted = True\n commandSection = self.sectionByName(\"command\")\n # change to known root pwd\n commandSection.string = re.sub(r\"(?m)^([ \\t]*rootpw[ \\t]+).*$\",\n r\"\\g<1>\" + (\"--iscrypted \" if isCrypted else \"\") + pwd,\n commandSection.string)\n return self",
"def remove_global_zernike_mirror(self):\n self.zernike_mirror = None",
"def mirror(also_migrate=None, force=False, wait=False, date_from=None):\n halt_if_debug_mode(force=force)\n task = migrate_from_mirror(\n also_migrate=also_migrate, disable_external_push=True, date_from=date_from\n )\n if wait:\n wait_for_all_tasks(task)",
"def do_mirror(self, cmd):\n\n # remove unwanted spaces to avoid invalid command error\n tmparg = self.clean_cmd(cmd)\n cmds = tmparg.split(';')\n if len(cmds) < 2:\n print(\"Required an ID and ';' before the command.\")\n elif str.isdigit(cmds[0]):\n if self._is_sec_registered('mirror', int(cmds[0])):\n self.secondaries['mirror'][int(cmds[0])].run(cmds[1])\n else:\n print('Invalid command: {}'.format(tmparg))",
"def mirror_dir(\n self,\n remote_dir: str = None,\n local_root: str = 'mirror',\n blacklist_dir_names: Iterable[str] = ('python', 'build_python', 'backend', 'logs'),\n whitelist_file_ext: Iterable[str] = ('.txt', '.json', '.spec', '.sim', '.pkl'),\n ):\n remote_dir = remote_dir or self.remote_home_dir\n\n logger.info(f'Mirroring remote dir {remote_dir}')\n\n with utils.BlockTimer() as timer:\n self.walk_remote_path(\n self.remote_home_dir,\n func_on_files = functools.partial(self.mirror_file, local_root = local_root),\n func_on_dirs = lambda d, _: utils.ensure_parents_exist(d),\n blacklist_dir_names = tuple(blacklist_dir_names),\n whitelist_file_ext = tuple(whitelist_file_ext),\n )\n\n logger.info(f'Mirroring complete. {timer}')",
"def volume_mirror(self, volume, mirror_disks=None, force=None, victim_volume=None):\n return self.request( \"volume-mirror\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'victim_volume': [ victim_volume, 'victim-volume', [ basestring, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )",
"def deterfresh_manager(root, db_name):\n\n if root:\n if db_name in [\"predicted_missions\", \"missions\"]:\n mission_manager.refresh_memebers()\n elif db_name == \"emotions\":\n emotion_manager.refresh_members()\n else:\n mission_manager.refresh_members()",
"def check_mirror(self):\n\t\ttry:\n\t\t\tself.logger.info('Indexing mirror directory')\n\t\t\tself.mirror_validation_paths.append(self.config.mirror_dir)\n\t\t\tfor path in self.path.iterate_path(self.config.mirror_dir):\t\t\t\t\n\t\t\t\tself.logger.debug('Index path: %s'%(path))\n\t\t\t\t#check if path is file\n\t\t\t\tif os.path.isfile(path):\n\t\t\t\t\tself.logger.debug('Path is FILE, path: %s'%(path))\n\t\t\t\t\t#check if file exist in watched directory\n\t\t\t\t\tif self.path.check_exist(path.replace(self.config.mirror_dir,self.config.watch_dir)):\t\t\t\t\t\n\t\t\t\t\t\tif not self.path.cmp_paths(path,path.replace(self.config.mirror_dir,self.config.watch_dir)):\n\t\t\t\t\t\t\t#are diffrent\n\t\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#files are the same skip to next path\n\t\t\t\t\t\t\tself.logger.debug('Mirror path: %s are equel with original path: %s, add mirror path to list validation'%(path,path.replace(self.config.mirror_dir,self.config.watch_dir)))\n\t\t\t\t\t\t\tself.mirror_validation_paths.append(path)\n\t\t\t\t\telse:\n\t\t\t\t\t\t#file not exist in watched directory\n\t\t\t\t\t\t#delete the file\n\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\treturn False\n\t\t\t\t#check if path is dir\n\t\t\t\telif os.path.isdir(path):\n\t\t\t\t\tself.logger.debug('Path is DIRECTORY, path: %s'%(path))\n\t\t\t\t\tif not self.path.check_exist(path.replace(self.config.mirror_dir,self.config.watch_dir)):\n\t\t\t\t\t\tif not self.path.del_path(path):\n\t\t\t\t\t\t\tself.logger.error('Index mirror directory error')\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\t#directory exist skip to next path\n\t\t\t\t\t\tself.mirror_validation_paths.append(path)\n\t\t\t\telse:\n\t\t\t\t\t#not recognize path\n\t\t\t\t\tself.logger.warning('Not recognize path: %s'%(path))\n\t\t\t\t\tself.logger.error('Index mirror directory error') \n\t\t\t\t\treturn False\n\t\t\tself.logger.info('Index mirror directory ok')\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Index.check_mirror, error: %s'%(str(e)),exc_info=True)\n\t\t\treturn False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Append the given mirror to the chroot's mirrorlist and refresh pacman databases.
|
def add(self, mirror, write=True):
self.mirrors.append(mirror)
if write:
self.write()
|
[
"def add_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n def dup_check(mirror):\n if self.has_mirror(mirror):\n raise api_errors.DuplicateRepositoryMirror(\n mirror)\n\n self.__add_uri(\"mirrors\", mirror, dup_check=dup_check,\n priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key)",
"def set(self, mirror, write=True):\n self.mirrors = [mirror]\n if write:\n self.write()",
"def copy(self, path='/etc/pacman.d/mirrorlist'):\n copy2(path, self.path)\n self.chroot.refresh()\n self.read()",
"def update_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = RepositoryURI(mirror, priority=priority,\n ssl_cert=ssl_cert, ssl_key=ssl_key)\n\n target = self.get_mirror(mirror)\n target.priority = mirror.priority\n target.ssl_cert = mirror.ssl_cert\n target.ssl_key = mirror.ssl_key\n self.mirrors.sort(key=URI_SORT_POLICIES[self.__sort_policy])",
"def has_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = misc.url_affix_trailing_slash(mirror)\n return mirror in self.mirrors",
"def append_all_remotes(self):\n for remote in self.remotes:\n self.append_remote(remote)",
"def remove_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = misc.url_affix_trailing_slash(mirror)\n for i, m in enumerate(self.mirrors):\n if mirror == m.uri:\n # Immediate return as the index into the array\n # changes with each removal.\n del self.mirrors[i]\n return\n raise api_errors.UnknownRepositoryMirror(mirror)",
"def increment_mirror(self, gr, action={}):\n badmirror = gr.mirrors[gr._next]\n\n self._lock.acquire()\n try:\n ind = self.mirrors.index(badmirror)\n except ValueError:\n pass\n else:\n if action.get('remove_master', 0):\n del self.mirrors[ind]\n elif self._next == ind and action.get('increment_master', 1):\n self._next += 1\n if self._next >= len(self.mirrors): self._next = 0\n self._lock.release()\n \n if action.get('remove', 1):\n del gr.mirrors[gr._next]\n elif action.get('increment', 1):\n gr._next += 1\n if gr._next >= len(gr.mirrors): gr._next = 0\n\n if DEBUG:\n grm = [m['mirror'] for m in gr.mirrors]\n DEBUG.info('GR mirrors: [%s] %i', ' '.join(grm), gr._next)\n selfm = [m['mirror'] for m in self.mirrors]\n DEBUG.info('MAIN mirrors: [%s] %i', ' '.join(selfm), self._next)",
"def set_mirror_volume_mounts(self, mirror_volume_mounts=True):\n\n self.mirror_volume_mounts = mirror_volume_mounts\n return self",
"async def sync_dns(self) -> None:\n # Update hosts\n add_host_coros: list[Awaitable[None]] = []\n for addon in self.installed:\n try:\n if not await addon.instance.is_running():\n continue\n except DockerError as err:\n _LOGGER.warning(\"Add-on %s is corrupt: %s\", addon.slug, err)\n self.sys_resolution.create_issue(\n IssueType.CORRUPT_DOCKER,\n ContextType.ADDON,\n reference=addon.slug,\n suggestions=[SuggestionType.EXECUTE_REPAIR],\n )\n capture_exception(err)\n else:\n add_host_coros.append(\n self.sys_plugins.dns.add_host(\n ipv4=addon.ip_address, names=[addon.hostname], write=False\n )\n )\n\n await asyncio.gather(*add_host_coros)\n\n # Write hosts files\n with suppress(CoreDNSError):\n await self.sys_plugins.dns.write_hosts()",
"def import_packages(packnames, mirror):\n\n utils = importr(\"utils\")\n\n if mirror == \"CRAN\":\n utils.chooseCRANmirror(ind=1) # select the first mirror in the list\n\n\n elif mirror == \"Bioconductor\":\n utils.chooseBioCmirror(ind=1) # select the first mirror in the list\n\n else:\n raise \"ERROR: can't find mirror to load packages from\"\n\n\n p = utils.install_packages(StrVector(packnames))\n\n return p",
"def mirror(mirror='pip', py=3):\n subprocess.call(\"pip\" + str(py) +\" install pip -U\", shell=True)\n cmd = \"pip\" + str(py) +\" config set global.index-url \" + (pypi[mirror] if mirror in pypi else mirror)\n s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return s",
"def push_url_from_mirror_url(mirror_url):\n scheme = url_util.parse(mirror_url, scheme=\"<missing>\").scheme\n if scheme == \"<missing>\":\n raise ValueError('\"{0}\" is not a valid URL'.format(mirror_url))\n mirror = spack.mirror.MirrorCollection().lookup(mirror_url)\n return url_util.format(mirror.push_url)",
"def sync(reconcile=False):\n\n\tg.db = mysqladm.core.db_connect()\n\n\t## Load servers\n\tservers = mysqladm.servers.get_servers()\n\t\n\tfor server in servers:\n\t\tprint('Checking server \"' + server['hostname'] + '\"')\n\t\ttry:\n\t\t\tjson_response = mysqladm.core.msg_node(server,'list')\n\n\t\t\tif 'status' not in json_response:\n\t\t\t\tprint('Error: Invalid response from server', file=sys.stderr)\n\t\t\t\tcontinue\n\n\t\t\tif json_response['status'] != 0:\n\t\t\t\tif 'error' in json_response:\n\t\t\t\t\tprint('Error from agent: ' + json_response['error'], file=sys.stderr)\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint('Error from agent, code: ' + json_response['code'], file=sys.stderr)\n\t\t\t\t\tcontinue\n\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint('Error contacting agent: ' + str(e), file=sys.stderr)\n\t\t\tcontinue\n\t\t\n\t\tdatabases = mysqladm.servers.get_server_databases(server['id'])\n\t\tdblist = []\n\t\tfor db in databases:\n\t\t\tdblist.append(db['name'])\n\n\t\t## Check to see if any databases have been created without us knowing about it\n\t\tif 'list' in json_response:\n\t\t\tfor instance in json_response['list']:\n\t\t\t\tif instance not in dblist:\t\t\t\t\t\n\t\t\t\t\tif reconcile:\n\t\t\t\t\t\tmysqladm.databases.insert_database_record(server['id'], instance, 'N/A', 'N/A')\n\t\t\t\t\t\tprint(\"\\t\" + 'Found unknown database \"' + instance + '\" on server. Added database to records as it exists on the server.')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"\\t\" + 'Found unknown database \"' + instance + '\" on server')\n\t\t\t\t\t\n\t\t## Check to see if any databases appear to have been deleted without us knowing about it\n\t\tfor db in databases:\n\t\t\tif db['name'] not in json_response['list']:\n\t\t\t\tif reconcile:\n\t\t\t\t\tmysqladm.databases.delete_database_record(db['id'])\n\t\t\t\t\tprint(\"\\t\" + 'Could not find database \"' + db['name'] + '\" on server. Deleted database from records as it no longer exists.')\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\t\" + 'Could not find database \"' + db['name'] + '\" on server')",
"def add_mirror(self, v: UVec) -> None:\n n = Vec3(v).normalize()\n self.add(f\"mirror(v = [{n.x:g}, {n.y:g}, {n.z:g}])\")",
"def mirror_deb(self, repo_name, alias=None):\n print 'Mirror creation start'\n if alias is None:\n alias = repo_name\n cmds = []\n\n # create mirrors\n for conf in self.DISRS['deb']:\n cmds.append([\n 'aptly', 'mirror', 'create', \n '-with-sources=true', '%s_%s' % (alias, conf['dist']),\n '%s%s/%s/%s' % (self.api_url, self.api_user, repo_name, conf['os']),\n conf['dist']\n ])\n # update mirrors\n cmds.extend(self.aptly_update(repo_name, alias))\n\n # Create snapshots and publish\n for conf in self.DISRS['deb']:\n name = '%s_%s' % (alias, conf['dist'])\n cmds.append([\n \"aptly\", \"snapshot\", \"create\",\n name, \"from\", \"mirror\", name\n ])\n cmds.append([\n \"aptly\", \"publish\", \"snapshot\",\n \"-force-overwrite=true\", '-gpg-key=%s' % GPG_KEY,\n \"-architectures=amd64,i386,source\",\n name, name\n ])\n for cmd in cmds:\n self.run(cmd)\n print 'Mirror creation done'",
"async def mirror(folder):\n rm_files, rm_sub_folders = await rm.get_contents(folder.rm.id_)\n\n await process_files(folder, rm_files)\n await process_sub_folders(folder, rm_sub_folders)\n\n folder.save()",
"def nfvi_kube_rootca_host_update_list(self, new_list):\n self._nfvi_kube_rootca_host_update_list = new_list",
"def mirror_dir(\n self,\n remote_dir: str = None,\n local_root: str = 'mirror',\n blacklist_dir_names: Iterable[str] = ('python', 'build_python', 'backend', 'logs'),\n whitelist_file_ext: Iterable[str] = ('.txt', '.json', '.spec', '.sim', '.pkl'),\n ):\n remote_dir = remote_dir or self.remote_home_dir\n\n logger.info(f'Mirroring remote dir {remote_dir}')\n\n with utils.BlockTimer() as timer:\n self.walk_remote_path(\n self.remote_home_dir,\n func_on_files = functools.partial(self.mirror_file, local_root = local_root),\n func_on_dirs = lambda d, _: utils.ensure_parents_exist(d),\n blacklist_dir_names = tuple(blacklist_dir_names),\n whitelist_file_ext = tuple(whitelist_file_ext),\n )\n\n logger.info(f'Mirroring complete. {timer}')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the mirror to the Arch Linux Archive repository of the given date.
|
def set_date(self, date, write=True):
date_url = f'{date:/%Y/%m/%d}'
mirror = Mirrorlist.archive_url + date_url + '/$repo/os/$arch'
self.set(mirror, write)
return mirror
|
[
"def mirror_deb(self, repo_name, alias=None):\n print 'Mirror creation start'\n if alias is None:\n alias = repo_name\n cmds = []\n\n # create mirrors\n for conf in self.DISRS['deb']:\n cmds.append([\n 'aptly', 'mirror', 'create', \n '-with-sources=true', '%s_%s' % (alias, conf['dist']),\n '%s%s/%s/%s' % (self.api_url, self.api_user, repo_name, conf['os']),\n conf['dist']\n ])\n # update mirrors\n cmds.extend(self.aptly_update(repo_name, alias))\n\n # Create snapshots and publish\n for conf in self.DISRS['deb']:\n name = '%s_%s' % (alias, conf['dist'])\n cmds.append([\n \"aptly\", \"snapshot\", \"create\",\n name, \"from\", \"mirror\", name\n ])\n cmds.append([\n \"aptly\", \"publish\", \"snapshot\",\n \"-force-overwrite=true\", '-gpg-key=%s' % GPG_KEY,\n \"-architectures=amd64,i386,source\",\n name, name\n ])\n for cmd in cmds:\n self.run(cmd)\n print 'Mirror creation done'",
"def set(self, mirror, write=True):\n self.mirrors = [mirror]\n if write:\n self.write()",
"def mirror_rpm(self, repo_name, alias=None):\n print 'Mirror creation start'\n if alias is None:\n alias = repo_name\n cmds = []\n # create clean repository\n for conf in self.DISRS['rpm']:\n name = '%s_%s%s' % (alias, conf['os'], conf['dist'])\n path = RPM_PATH % name\n src_path = SRPM_PATH % name\n cmds.append(['mkdir', '-p', '%sPackages/' % path])\n cmds.append(['mkdir', '-p', '%sPackages/' % src_path])\n # download packages and run repo index\n cmds.extend(self.manage_rpms(repo_name, alias, update=False))\n for cmd in cmds:\n self.run(cmd)\n print 'Mirror creation done'",
"def update_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = RepositoryURI(mirror, priority=priority,\n ssl_cert=ssl_cert, ssl_key=ssl_key)\n\n target = self.get_mirror(mirror)\n target.priority = mirror.priority\n target.ssl_cert = mirror.ssl_cert\n target.ssl_key = mirror.ssl_key\n self.mirrors.sort(key=URI_SORT_POLICIES[self.__sort_policy])",
"def BhavCopy_for_date(self, date_string):\n date_obj = datetime.datetime.strptime(date_string, \"%Y-%m-%d\").date()\n records = Bhavcopy_bse_downloader(date=date_obj)\n r = redis_connection()\n if len(records)>0:\n for record in records:\n r.hmset(record[\"SC_NAME\"], record)\n else:\n r.flushall()\n r.set(\"date\", date_string)\n raise cherrypy.HTTPRedirect(\"/\")",
"def add_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n def dup_check(mirror):\n if self.has_mirror(mirror):\n raise api_errors.DuplicateRepositoryMirror(\n mirror)\n\n self.__add_uri(\"mirrors\", mirror, dup_check=dup_check,\n priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key)",
"def mirror(mirror='pip', py=3):\n subprocess.call(\"pip\" + str(py) +\" install pip -U\", shell=True)\n cmd = \"pip\" + str(py) +\" config set global.index-url \" + (pypi[mirror] if mirror in pypi else mirror)\n s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return s",
"def copy(self, path='/etc/pacman.d/mirrorlist'):\n copy2(path, self.path)\n self.chroot.refresh()\n self.read()",
"def install():\n\n # Add archzfs repository\n config_file = '/etc/pacman.conf'\n append(config_file, '[demz-repo-core]', use_sudo=True)\n append(config_file, 'Server = http://demizerone.com/$repo/$arch', use_sudo=True)\n\n # Add key\n sudo('pacman-key -r 0EE7A126')\n sudo('pacman-key --lsign-key 0EE7A126')\n\n # Update the package database\n arch.update_index()\n\n # Install package\n require.arch.package('archzfs')\n\n # Synchronize user\n dotfiles.sync('fabrecipes/zfs/user/', '$HOME/')\n dotfiles.sync('fabrecipes/zfs/sys/', '/', use_sudo='true')",
"def get_root_url_for_date(date):\n year = date[:4]\n mm = date[5:7]\n hostname = get_host_name()\n if date <= '2016-10-01':\n aosdir = 'AOS'\n elif date <= '2017-01-27':\n aosdir = 'AOS64'\n else:\n aosdir = 'APE1'\n return \"%s/index.php?dir=%s/CONTAINER/%s/\" % (hostname, aosdir, date)",
"def add(self, mirror, write=True):\n self.mirrors.append(mirror)\n if write:\n self.write()",
"def mirror(also_migrate=None, force=False, wait=False, date_from=None):\n halt_if_debug_mode(force=force)\n task = migrate_from_mirror(\n also_migrate=also_migrate, disable_external_push=True, date_from=date_from\n )\n if wait:\n wait_for_all_tasks(task)",
"def update(self, arch=None):\n if arch is not None:\n self.set_arch(arch)\n self.set_panorex(self.compute_panorex())",
"def set_repo(self, repo):\n self.repo = repo\n self.name = repo.split('/')[-1]",
"def from_distro(self, from_distro):\n\n self._from_distro = from_distro",
"def install_date(self, install_date: str):\n\n self._install_date = install_date",
"def update_backup_archive(backup_name, archive_dir):\n path_to_archive = os.path.join(archive_dir, backup_name)\n tmpdir = tempfile.mkdtemp(dir=archive_dir)\n try:\n subprocess.check_call(\n ['gunzip', path_to_archive + '.tgz'],\n stdout=DEVNULL, stderr=DEVNULL)\n # 70-persistent-net.rules with the correct MACs will be\n # generated on the linux boot on the cloned side. Remove\n # the stale file from original side.\n remove_from_archive(path_to_archive + '.tar',\n 'etc/udev/rules.d/70-persistent-net.rules')\n # Extract only a subset of directories which have files to be\n # updated for oam-ip and MAC addresses. After updating the files\n # these directories are added back to the archive.\n subprocess.check_call(\n ['tar', '-x',\n '--directory=' + tmpdir,\n '-f', path_to_archive + '.tar',\n 'etc', 'postgres', 'config',\n 'hieradata'],\n stdout=DEVNULL, stderr=DEVNULL)\n update_oamip_in_archive(tmpdir)\n update_mac_in_archive(tmpdir)\n update_disk_serial_id_in_archive(tmpdir)\n update_sysuuid_in_archive(tmpdir)\n subprocess.check_call(\n ['tar', '--update',\n '--directory=' + tmpdir,\n '-f', path_to_archive + '.tar',\n 'etc', 'postgres', 'config',\n 'hieradata'],\n stdout=DEVNULL, stderr=DEVNULL)\n subprocess.check_call(['gzip', path_to_archive + '.tar'])\n shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')\n\n except Exception as e:\n LOG.error(\"Update of backup archive {} failed {}\".format(\n path_to_archive, str(e)))\n raise CloneFail(\"Failed to update backup archive\")\n\n finally:\n if not DEBUG:\n shutil.rmtree(tmpdir, ignore_errors=True)",
"def _download( self ):\n self._system.download_file(\"https://github.com/mastbaum/avalanche/tarball/\" + self._tar_name)",
"def addArchiveVersion(self, archiveVersion: cern.lsa.domain.settings.ArchiveVersion) -> None:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make the chroot using mkarchroot.
|
def make(self):
if not self.working_dir.exists():
self.working_dir.mkdir(parents=True)
cmd = ['mkarchroot', str(self.root), 'base-devel', 'devtools']
cmdlog.run(cmd)
|
[
"def create_chroot_arch(path: str, packages: str = \"base\"):\n print(\"Creating arch at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/archlinux:latest\", path, \"sed -i 's/^#ParallelDownloads/ParallelDownloads/g' /etc/pacman.conf && pacman -Sy --noconfirm --needed arch-install-scripts && pacstrap -c /chrootfld {0}\".format(packages))",
"def unsafechroot():\n arch_chroot_into(distro_name, btrfs_dev_uuid)",
"def task__iso_mkdir_root() -> types.TaskDict:\n return helper.Mkdir(directory=constants.ISO_ROOT, task_dep=[\"_build_root\"]).task",
"def do_chroot(self, op_path):\n print(\"do_chroot(self, %s)\" % op_path)\n\n # do we need to verify the path, as in a file\n # referenced by the path\n # copy the file, and insure execute permission\n _target = os.path.join(self._rootpath, os.path.basename(op_path))\n shutil.copyfile(op_path, _target)\n os.chmod(_target, (stat.S_IXUSR | stat.S_IRUSR\n | stat.S_IXGRP | stat.S_IRGRP\n | stat.S_IXOTH | stat.S_IROTH))\n\n # build up the fakeroot/fakechroot wrapper for the command\n # assumes the command is in the root directory, and\n # thus executes it there.\n cmd = [FAKECHROOT]\n cmd += [FAKEROOT,\n '-i', self._fakeroot_state.name,\n '-s', self._fakeroot_state.name]\n cmd += ['/usr/sbin/chroot', self._rootpath]\n cmd += [os.path.sep + os.path.basename(op_path)]\n\n if verbosity > 0:\n print(\"do_chroot\")\n print(cmd)\n\n # execute the command in the fakechroot environment\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as ex:\n if verbosity > 0:\n print(ex)\n raise OpxrootfsError(\"Can't run script\")\n\n # collect status for return/display\n # need to remove the command executed from the sysroot\n os.remove(os.path.join(self._rootpath, os.path.basename(op_path)))",
"def make_fsroot(root_dir, app):\n newroot_norm = fs.norm_safe(root_dir)\n\n emptydirs = [\n '/bin',\n '/dev',\n '/etc',\n '/home',\n '/lib',\n '/lib64',\n '/opt',\n '/proc',\n '/root',\n '/run',\n '/sbin',\n '/sys',\n '/tmp',\n '/usr',\n '/var/cache',\n '/var/empty',\n '/var/lib',\n '/var/lock',\n '/var/log',\n '/var/opt',\n '/var/spool',\n '/var/tmp',\n '/var/spool/keytabs',\n '/var/spool/tickets',\n '/var/spool/tokens',\n # for SSS\n '/var/lib/sss',\n ]\n\n stickydirs = [\n '/opt',\n '/run',\n '/tmp',\n '/var/cache',\n '/var/lib',\n '/var/lock',\n '/var/log',\n '/var/opt',\n '/var/tmp',\n '/var/spool/keytabs',\n '/var/spool/tickets',\n '/var/spool/tokens',\n ]\n\n # these folders are shared with underlying host and other containers,\n mounts = [\n '/bin',\n '/etc', # TODO: Add /etc/opt\n '/lib',\n '/lib64',\n '/root',\n '/sbin',\n '/usr',\n # for SSS\n '/var/lib/sss',\n # TODO: Remove below once PAM UDS is implemented\n '/var/tmp/treadmill/env',\n '/var/tmp/treadmill/spool',\n ]\n\n # Add everything under /opt\n mounts += glob.glob('/opt/*')\n\n for directory in emptydirs:\n fs.mkdir_safe(newroot_norm + directory)\n\n for directory in stickydirs:\n os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)\n\n # /var/empty must be owned by root and not group or world-writable.\n os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)\n\n fs_linux.mount_bind(\n newroot_norm, os.path.join(os.sep, 'sys'),\n source='/sys',\n recursive=True, read_only=False\n )\n # TODO: For security, /dev/ should be minimal and separated to each\n # container.\n fs_linux.mount_bind(\n newroot_norm, os.path.join(os.sep, 'dev'),\n source='/dev',\n recursive=True, read_only=False\n )\n # Per FHS3 /var/run should be a symlink to /run which should be tmpfs\n fs.symlink_safe(\n os.path.join(newroot_norm, 'var', 'run'),\n '/run'\n )\n # We create an unbounded tmpfs mount so that runtime data can be written to\n # it, counting against the memory limit of the container.\n fs_linux.mount_tmpfs(newroot_norm, '/run')\n\n # Make shared directories/files readonly to container\n for mount in mounts:\n if os.path.exists(mount):\n fs_linux.mount_bind(\n newroot_norm, mount,\n recursive=True, read_only=True\n )\n\n if app.docker:\n _mount_docker_tmpfs(newroot_norm)",
"def InitializeNewRoot(self):\n self.logger.debug('InitializeNewRoot')\n\n # create tmpfs\n process_utils.Spawn(['mount', '-n', '-t', 'tmpfs',\n '-o', 'size=' + self.size,\n '-o', 'mode=755',\n 'tmpfs', self.new_root],\n check_call=True)\n\n self.logger.debug('create tmpfs layout')\n tmpfs_layout_dirs = [os.path.join(self.new_root, subdir)\n for subdir in ['bin', 'dev', 'etc', 'lib', 'log',\n 'mnt/stateful_partition', 'proc',\n 'root', 'sys', 'tmp', 'var']]\n process_utils.Spawn(['mkdir', '-p'] + tmpfs_layout_dirs, check_call=True)\n # use symbolic links to make /usr/local/bin, /bin/, /usr/bin same as /sbin\n process_utils.Spawn(['ln', '-s', '.', os.path.join(self.new_root, 'usr')],\n check_call=True)\n process_utils.Spawn(['ln', '-s', '.', os.path.join(self.new_root, 'local')],\n check_call=True)\n process_utils.Spawn(['ln', '-s', 'bin',\n os.path.join(self.new_root, 'sbin')],\n check_call=True)\n process_utils.Spawn(['ln', '-s', '/run',\n os.path.join(self.new_root, 'var', 'run')],\n check_call=True)\n process_utils.Spawn(['ln', '-s', '/run/lock',\n os.path.join(self.new_root, 'var', 'lock')],\n check_call=True)\n\n self.logger.debug('copy necessary files and dirs')\n files_dirs = self.file_dir_list + [\n sysconfig.get_python_lib(standard_lib=True),\n sysconfig.get_python_inc()]\n files_dirs = list(filter(os.path.exists, files_dirs))\n process_utils.Spawn(('tar -h -c %s | '\n 'tar -C %s -x --skip-old-files' %\n (' '.join(files_dirs), self.new_root)),\n shell=True, call=True, log=True)\n\n self.logger.debug('copy necessary binaries')\n bin_deps = self.binary_list + ['python3', 'busybox']\n bin_deps += self._GetLoadedLibrary()\n\n bin_paths = [(k, process_utils.SpawnOutput(['which', k]).strip())\n for k in bin_deps]\n self.logger.warning('following binaries are not found: %s',\n [k for (k, v) in bin_paths if not v])\n # remove binaries that are not found\n bin_paths = {k: v for (k, v) in bin_paths if v}\n # copy binaries and their dependencies\n process_utils.Spawn(\n ('tar -ch $(lddtree -l %s 2>/dev/null | sort -u) | '\n 'tar -C %s -x --skip-old-files' %\n (' '.join(bin_paths.values()), self.new_root)),\n check_call=True, shell=True, log=True)\n\n # install busybox for common utilities\n process_utils.Spawn(\n [os.path.join(self.new_root, 'bin', 'busybox'), '--install',\n os.path.join(self.new_root, 'bin')], check_call=True, log=True)\n\n # create /etc/issue\n open(os.path.join(self.new_root, 'etc', 'issue'), 'w').write(self.etc_issue)\n\n self.logger.debug('rebind mount points')\n rebind_dirs = ['dev', 'proc', 'sys', 'run']\n for node in rebind_dirs:\n src_dir = os.path.join('/', node)\n dst_dir = os.path.join(self.new_root, node)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n process_utils.Spawn(['mount', '--rbind', src_dir, dst_dir],\n check_call=True)\n process_utils.Spawn(['cp', '-fd', '/etc/mtab',\n os.path.join(self.new_root, 'etc', 'mtab')],\n check_call=True)",
"def run_chrooted(self, call_args, *args, **kwargs):\n assert self.chroot_prepared\n run((\"chroot\", self.mountpoint) + call_args, *args, **kwargs)",
"def test_create():\n with patch(\"os.makedirs\") as makedirs:\n with patch(\"salt.modules.chroot.exist\") as exist:\n exist.return_value = True\n assert chroot.create(\"/chroot\")\n makedirs.assert_not_called()\n\n with patch(\"os.makedirs\") as makedirs:\n with patch(\"salt.modules.chroot.exist\") as exist:\n exist.return_value = False\n assert chroot.create(\"/chroot\")\n makedirs.assert_called()",
"def task__build_root() -> types.TaskDict:\n return targets.Mkdir(directory=config.BUILD_ROOT).task",
"def grub2_install(target, root):\n with util.RunInChroot(target) as in_chroot:\n in_chroot(['grub2-install', '--recheck', root])",
"def create_chroot_ubuntu(path: str, packages: str = \"systemd-container\"):\n print(\"Creating ubuntu at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/ubuntu:rolling\", path, \"apt-get update && apt-get install -y debootstrap && debootstrap --include={1} --components=main,universe,restricted,multiverse --arch amd64 {0} /chrootfld\".format(ubuntu_version, packages))",
"def build(self, no_clean, verbose):\n\n os.chdir(self._root_dir)\n\n for spec in self._specifications:\n print(\"Building sysroot from {}\".format(spec))\n\n # The name of the sysroot image file to be built.\n test_name = os.path.basename(spec).split('.')[0]\n sysroot = os.path.join(self._root_dir, 'sysroot',\n '{}-{}'.format(self._target, test_name))\n\n # Build the command line.\n args = ['pyqtdeploy-sysroot']\n\n if no_clean:\n args.append('--no-clean')\n\n if verbose:\n args.append('--verbose')\n\n args.extend(['--source-dir', os.path.join(self._root_dir, 'src')])\n args.extend(['--target', self._target])\n args.extend(['--sysroot', sysroot])\n args.append(spec)\n\n if verbose:\n print(\"Running: '{}'\".format(' '.join(args)))\n\n try:\n subprocess.check_call(args)\n except subprocess.CalledProcessError:\n print(\"Build of sysroot from {} failed\".format(spec))\n break\n\n print(\"Build of sysroot from {} successful\".format(spec))",
"def create_chroot_fedora(path: str, packages: str = \"systemd passwd dnf fedora-release vim-minimal\"):\n print(\"Creating fedora at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"registry.fedoraproject.org/fedora\", path, \"dnf -y --releasever={0} --installroot=/chrootfld --disablerepo='*' --enablerepo=fedora --enablerepo=updates install {1}\".format(fedora_version, packages))",
"def zephyr_build(ec_root: Path, board: str, image: str) -> Optional[Path]:\n\n target = Path(\n f\"build/zephyr/{board}/build-{image.lower()}/compile_commands.json\"\n )\n cmd = [\"zmake\", \"configure\", board]\n\n print(\" \".join(cmd))\n status = subprocess.run(cmd, check=False, cwd=ec_root)\n\n if status.returncode != 0:\n return None\n\n # Replace /mnt/host/source with path of chromiumos outside chroot\n default_chromiumos_path_outside_chroot = os.path.join(\n Path.home(), \"chromiumos\"\n )\n chromiumos_path_outside_chroot = os.environ.get(\n \"EXTERNAL_TRUNK_PATH\", default_chromiumos_path_outside_chroot\n )\n chromiumos_path_inside_chroot = \"/mnt/host/source\"\n\n print(\n f\"Replacing '{chromiumos_path_inside_chroot}' with \"\n + f\"'{chromiumos_path_outside_chroot}' in file {target}\"\n )\n\n target.write_text(\n target.read_text().replace(\n chromiumos_path_inside_chroot, chromiumos_path_outside_chroot\n )\n )\n\n return target",
"def test_chroot(self):\n chroot_directory = None\n with minijail.MinijailChroot() as chroot:\n chroot_directory = chroot.directory\n self.assertListEqual(\n sorted(os.listdir(chroot_directory)),\n ['dev', 'lib', 'lib32', 'lib64', 'proc', 'tmp', 'usr'])\n\n self.assertEqual(\n chroot.get_binding(chroot.tmp_directory),\n minijail.ChrootBinding(chroot.tmp_directory, '/tmp', True))\n\n for directory in ['/lib', '/lib32', '/lib64', '/usr/lib', '/usr/lib32']:\n self.assertEqual(\n chroot.get_binding(directory),\n minijail.ChrootBinding(directory, directory, False))\n\n self.assertIsNone(chroot.get_binding('/usr'))\n\n self.assertFalse(os.path.exists(chroot_directory))",
"def ChrootCommand(RootPath: str, command: str):\n try:\n ChrootMountPaths(RootPath)\n ChrootRunCommand(RootPath, command)\n finally:\n ChrootUnmountPaths(RootPath)",
"def install():\n #requires root\n os.system(\"cp -r bin/* /usr/bin/\")",
"def makepkg(self, pkgbuild, deps=[]):\n if not self.exists():\n self.make()\n pkgbuild.update()\n cmd = ['makechrootpkg', '-cr', str(self.working_dir)]\n for d in deps:\n cmd += ['-I', d]\n cmd += ['--', '-s']\n with cwd(pkgbuild.builddir):\n return cmdlog.run(cmd)",
"def _install(mpt):\n _check_resolv(mpt)\n boot_, tmppath = prep_bootstrap(mpt) or salt.syspaths.BOOTSTRAP\n # Exec the chroot command\n cmd = \"if type salt-minion; then exit 0; \"\n cmd += \"else sh {} -c /tmp; fi\".format(os.path.join(tmppath, \"bootstrap-salt.sh\"))\n return not __salt__[\"cmd.run_chroot\"](mpt, cmd, python_shell=True)[\"retcode\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run pacman with the given flags in the chroot.
|
def pacman(self, flags):
cmdlog.run(['arch-nspawn', str(self.root), 'pacman', flags])
|
[
"def pacman(args, confirm=False):\n write_stdin(['sudo', 'pacman', *args], confirm and [] or repeat('y\\n'))",
"def run_chrooted(self, call_args, *args, **kwargs):\n assert self.chroot_prepared\n run((\"chroot\", self.mountpoint) + call_args, *args, **kwargs)",
"def run_foam():\n os.chdir('cfd')\n run_cmd('./Allclean')\n run_cmd('./Allrun')\n os.chdir('../')",
"def installAgentSuse():\n zypperCommand = [\"zypper\", \"install\", \"-y\", \"ambari-agent\"]\n return execOsCommand(zypperCommand)",
"def install():\n\n # Add archzfs repository\n config_file = '/etc/pacman.conf'\n append(config_file, '[demz-repo-core]', use_sudo=True)\n append(config_file, 'Server = http://demizerone.com/$repo/$arch', use_sudo=True)\n\n # Add key\n sudo('pacman-key -r 0EE7A126')\n sudo('pacman-key --lsign-key 0EE7A126')\n\n # Update the package database\n arch.update_index()\n\n # Install package\n require.arch.package('archzfs')\n\n # Synchronize user\n dotfiles.sync('fabrecipes/zfs/user/', '$HOME/')\n dotfiles.sync('fabrecipes/zfs/sys/', '/', use_sudo='true')",
"def ctr_create(ctrimage: str, path: str, cmd: str):\n full_cmd = [\"podman\", \"run\", \"--rm\", \"--privileged\", \"--pull=always\", \"-it\", \"--volume={0}:/chrootfld\".format(path), \"--name=chrootsetup\", ctrimage, \"bash\", \"-c\", cmd]\n print(\"Running {0}\".format(full_cmd))\n subprocess.run(full_cmd, check=True)",
"def run_amtool(args):\n # TODO(jelmer): Support setting the current user, e.g. for silence ownership.\n ret = subprocess.run(\n [\"/usr/bin/amtool\"] + args, shell=False, text=True,\n stdout=subprocess.PIPE)\n return ret.stdout",
"def do_configure():\n if flag_do_fetch:\n fetch_in_volume()\n dochdir(ssdroot)\n targdir = flag_subvol\n if flag_snapshot:\n targdir = flag_snapshot\n do_configure_binutils(targdir)\n do_setup_cmake(targdir)",
"def run_amtool(args):\n # TODO(jelmer): Support setting the current user, e.g. for silence\n # ownership.\n ret = subprocess.run(\n [\"amtool\"] + args, shell=False, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n return ret.stdout",
"def main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument('path', action=AccessibleDir)\n parser.add_argument(\"--ignore-shorties\", type=non_negative_int_validator)\n parser.add_argument(\"--exit-code\", action='store_true')\n args = parser.parse_args()\n\n run_dir = os.getcwd()\n sys.stdout.write(f'Running Koles from: {run_dir}')\n\n try:\n sys.exit(run_koles(args, run_dir=run_dir))\n except KeyboardInterrupt:\n pass",
"def add(port, repo=False):\n if env.flags[\"chroot\"]:\n args = (\"pkg_add\", \"-C\", env.flags[\"chroot\"])\n else:\n args = (\"pkg_add\",)\n if repo:\n args += (\"-r\", port.attr[\"pkgname\"])\n else:\n args += (port.attr[\"pkgfile\"],)\n return args",
"def run(args):\n _set_development_path()\n from mabot import run\n run(args)",
"def main():\n # Create the basic parser and add all globally common options.\n parser = argparse.ArgumentParser(\n description=_(\"\"\"\\\n The GNU Mailman mailing list management system\n Copyright 1998-2017 by the Free Software Foundation, Inc.\n http://www.list.org\n \"\"\"),\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '-v', '--version',\n action='version', version=MAILMAN_VERSION_FULL,\n help=_('Print this version string and exit'))\n parser.add_argument(\n '-C', '--config',\n help=_(\"\"\"\\\n Configuration file to use. If not given, the environment variable\n MAILMAN_CONFIG_FILE is consulted and used if set. If neither are\n given, a default configuration file is loaded.\"\"\"))\n # Look at all modules in the mailman.bin package and if they are prepared\n # to add a subcommand, let them do so. I'm still undecided as to whether\n # this should be pluggable or not. If so, then we'll probably have to\n # partially parse the arguments now, then initialize the system, then find\n # the plugins. Punt on this for now.\n subparser = parser.add_subparsers(title='Commands')\n subcommands = []\n for command_class in find_components('mailman.commands', ICLISubCommand):\n command = command_class()\n verifyObject(ICLISubCommand, command)\n subcommands.append(command)\n subcommands.sort(key=cmp_to_key(_help_sorter))\n for command in subcommands:\n command_parser = subparser.add_parser(\n command.name, help=_(command.__doc__))\n command.add(parser, command_parser)\n command_parser.set_defaults(func=command.process)\n args = parser.parse_args()\n if len(args.__dict__) <= 1:\n # No arguments or subcommands were given.\n parser.print_help()\n parser.exit()\n # Initialize the system. Honor the -C flag if given.\n config_path = (None if args.config is None\n else os.path.abspath(os.path.expanduser(args.config)))\n initialize(config_path)\n # Perform the subcommand option.\n with transaction():\n args.func(args)",
"def main():\n\n func_map = {\n 'init': handle_init,\n 'destroy': handle_destroy,\n 'addFile': handle_add_file,\n 'listFile': handle_list_file,\n 'showFile': handle_show_file,\n 'dropFile': handle_drop_file,\n 'addPeer': handle_add_peer,\n 'listPeer': handle_list_peer,\n 'dropPeer': handle_drop_peer,\n }\n argv = sys.argv\n\n default_dir = os.path.join(os.getenv('HOME'), '.magicsack')\n sub_cmd_at = 1\n if len(argv) >= 4:\n if argv[1] == '-m' or argv[1] == '--magic_path':\n sub_cmd_at = 3\n\n # DEBUG\n print(\"SUB CMD AT %d\" % sub_cmd_at)\n # END\n\n desc = 'command-line interface to the Magic Sack'\n parser = ArgumentParser(description=desc)\n parser.add_argument('-m', '--magic_path', default=default_dir, type=str,\n help='select folder/directory (default=%s)' % default_dir)\n parser.add_argument('command', # required\n help='subCommand to be executed')\n global_ns = parser.parse_args(sys.argv[1: sub_cmd_at + 1])\n\n global_ns.app_name = 'magicSack %s' % __version__\n global_ns.default_dir = default_dir\n\n sub_argv = sys.argv[sub_cmd_at + 1:]\n\n # DEBUG\n print(\"sub_argv:\")\n for ndx, txt in enumerate(sub_argv):\n print(\" %d %s\" % (ndx, txt))\n # END\n\n # fixups\n magic_path = global_ns.magic_path\n global_ns.rng = SystemRNG()\n\n # DEBUG\n print(\"magic_path: %s\" % magic_path)\n # END\n\n if not magic_path:\n print(\"magic_path must be specified\")\n sys.exit(1)\n\n pass_phrase = getpass.getpass('passphrase: ')\n if not pass_phrase or pass_phrase == '':\n print('you must supply a passphrase')\n sys.exit(1)\n global_ns.pass_phrase = pass_phrase\n command = global_ns.command\n if command == 'init':\n handle_init(global_ns, sub_argv, pass_phrase)\n else:\n if not verify_pass_phrase(global_ns, pass_phrase):\n print('that was not the right passphrase!')\n sys.exit(1)\n func_map[command](global_ns, sub_argv)",
"def _run_pkgmgr_cmd(vm, shell, pkgmgr, cmd):\n\n shell_cmd = ' '.join([pkgmgr, cmd])\n\n log.debug('Running %s on %s', shell_cmd, vm.uid)\n\n if get_run_as_elevated(vm.uid):\n username, password = get_credentials(vm.uid)\n\n return shell.run_as_elevated(shell_cmd, password)\n\n return shell.run(shell_cmd)",
"def control(self, args):\n yield self.local['docker'][\n 'exec',\n args.name,\n 'supervisorctl',\n '-c',\n '/etc/supervisor/supervisord.conf',\n args.mcmd,\n 'webapp',\n ]",
"def amarok():\n r1 = env.run(SUDO_INSTALL + \"amarok\")\n return r1",
"def _require_pacapt(self):\n if self._require_pacapt_already_run:\n return # optimization hack: let's only run once per process\n self.report(\"checking remote side for pacapt \"\n \"(an OS-agnostic package manager)\")\n with api.quiet():\n remote_missing_pacapt = api.run('ls /usr/bin/pacapt').failed\n if remote_missing_pacapt:\n self.report(\n ydata.FAIL + \" pacapt does not exist, installing it now\")\n local_pacapt_path = os.path.join(\n os.path.dirname(ydata.__file__), 'pacapt')\n self.put(local_pacapt_path, '/usr/bin', use_sudo=True)\n else:\n self.report(ydata.SUCCESS + \" pacapt is already present\")\n api.sudo('chmod o+x /usr/bin/pacapt')\n self._require_pacapt_already_run = True",
"def test_forwarder_cli(start_forwarder):\n sys.argv = sys.argv[:1]\n sys.argv += [\"forwarder\"]\n main()\n sys.argv = sys.argv[:1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the chroot with `pacman Syu`.
|
def update(self):
if not self.exists():
self.make()
else:
self.pacman('-Syuu')
|
[
"def scp_sysroot(ctx, user, host):\n _scp_dir(user, host, \"llvm-sysroot\")",
"def update():\n update_code()\n update_env()\n symlink()\n set_current()\n permissions()",
"def unsafechroot():\n arch_chroot_into(distro_name, btrfs_dev_uuid)",
"def update_fetch(self):\n Popen([\"mount\", \"-t\", \"devfs\", \"devfs\",\n \"{}/releases/{}/root/dev\".format(self.iocroot,\n self.release)]).communicate()\n copy(\"/etc/resolv.conf\",\n \"{}/releases/{}/root/etc/resolv.conf\".format(self.iocroot,\n self.release))\n\n # TODO: Check for STABLE/PRERELEASE/CURRENT/BETA if we support those.\n # TODO: Fancier.\n self.lgr.info(\"\\n* Updating {} to the latest patch level... \".format(\n self.release))\n\n os.environ[\"UNAME_r\"] = self.release\n os.environ[\"PAGER\"] = \"/bin/cat\"\n new_root = \"{}/releases/{}/root\".format(self.iocroot, self.release)\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(new_root)):\n # 10.1-RELEASE and under have a interactive check\n if float(self.release.partition(\"-\")[0][:5]) <= 10.1:\n with NamedTemporaryFile(delete=False) as tmp_conf:\n conf = \"{}/usr/sbin/freebsd-update\".format(new_root)\n with open(conf) as update_conf:\n for line in update_conf:\n tmp_conf.write(re.sub(\"\\[ ! -t 0 \\]\", \"false\",\n line))\n\n os.chmod(tmp_conf.name, 0o755)\n Popen([tmp_conf.name, \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n os.remove(tmp_conf.name)\n else:\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"install\"], stdout=PIPE, stderr=PIPE).communicate()\n\n try:\n # Why this sometimes doesn't exist, we may never know.\n os.remove(\"{}/releases/{}/root/etc/resolv.conf\".format(\n self.iocroot, self.release))\n except OSError:\n pass\n\n Popen([\"umount\", \"{}/releases/{}/root/dev\".format(\n self.iocroot, self.release)]).communicate()",
"def __upgrade_install__(self, name):\n install = su.Popen([name, \"-b\", self.path, \"-d\",\n f\"{self.path}/var/db/freebsd-update/\",\n \"-f\",\n f\"{self.path}/etc/freebsd-update.conf\",\n \"-r\",\n self.new_release, \"install\"], stderr=su.PIPE)\n install.communicate()\n\n return install.returncode",
"def ChrootCommand(RootPath: str, command: str):\n try:\n ChrootMountPaths(RootPath)\n ChrootRunCommand(RootPath, command)\n finally:\n ChrootUnmountPaths(RootPath)",
"def grub2_install(target, root):\n with util.RunInChroot(target) as in_chroot:\n in_chroot(['grub2-install', '--recheck', root])",
"def yaourt_packages_to_update():\n yaourt_proc = subprocess.Popen([\"/bin/yaourt -Qu\"], stdout=subprocess.PIPE, shell=True)\n (yaourt_out, yaourt_err) = yaourt_proc.communicate()\n if yaourt_proc.returncode == 0:\n pkgs_to_update = yaourt_out.decode(\"utf-8\").split(\"\\n\")\n pkgs_to_update.pop()\n return pkgs_to_update\n raise PackageManagerDoesNotExists",
"def _install(mpt):\n _check_resolv(mpt)\n boot_, tmppath = prep_bootstrap(mpt) or salt.syspaths.BOOTSTRAP\n # Exec the chroot command\n cmd = \"if type salt-minion; then exit 0; \"\n cmd += \"else sh {} -c /tmp; fi\".format(os.path.join(tmppath, \"bootstrap-salt.sh\"))\n return not __salt__[\"cmd.run_chroot\"](mpt, cmd, python_shell=True)[\"retcode\"]",
"def replaceRootpw(self, pwd):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n # if starts with $ then assume encrypted\n isCrypted = re.match(r\"\\$\", pwd)\n if not isCrypted:\n pwd = self.cryptedPwd(pwd)\n isCrypted = True\n commandSection = self.sectionByName(\"command\")\n # change to known root pwd\n commandSection.string = re.sub(r\"(?m)^([ \\t]*rootpw[ \\t]+).*$\",\n r\"\\g<1>\" + (\"--iscrypted \" if isCrypted else \"\") + pwd,\n commandSection.string)\n return self",
"def start_root_update(self):",
"def update_uwsgitool():\n url = 'https://github.com/kyan001/PyMyApps/raw/master/UwsgiTool/uwsgiTool.py'\n if cct.update_file(__file__, url):\n cct.run_cmd('{py} \"{f}\"'.format(py=cct.get_py_cmd(), f=__file__))\n cit.bye(0)",
"def update_db(ctx):\r\n with ctx.lcd(settings.SRC_DIR):\r\n ctx.local('python2.6 ./vendor/src/schematic/schematic migrations')",
"def install_ycsb():\n logging.info('Install YCSB.')\n for n in [el for s in topology for el in s['client']]:\n# execo.Process('scp /home/bconfais/YCSB_ipfs_random_4nodes.tar.gz '+str(n['ip'])+':/tmp/YCSB_ipfs_random.tar.gz').run().stdout\n# execo.Process('scp /home/bconfais/YCSB_ipfs_random_4nodes_dummyfile.tar.gz '+str(n['ip'])+':/tmp/YCSB_ipfs_random.tar.gz').run().stdout\n execo.Process('scp /home/bconfais/YCSB_ipfs_random_4nodes_cache.tar.gz '+str(n['ip'])+':/tmp/YCSB_ipfs_random.tar.gz').run().stdout\n commands = [\n 'apt-get update; apt-get --yes --force-yes install openjdk-8-jre-headless',\n 'cd /tmp; tar zxvf YCSB_ipfs_random.tar.gz',\n# 'cd /tmp; mv YCSB_ipfs_random_4nodes YCSB',\n# 'cd /tmp; mv YCSB_ipfs_random_4nodes_dummyfile YCSB',\n 'cd /tmp; mv YCSB_ipfs_random_4nodes_cache YCSB',\n ]\n exec_commands(commands, [el for s in topology for el in s['client']])",
"def update(args) -> int:\n _update_cache()\n print(\"Listing upgradable packages...\")\n _list_upgradable\n print(\"Upgrading packages...\")\n _upgrade(args)\n # install(args)\n print(\"Cleaning up...\")\n run(\"sudo apt autoremove -y\", check=True)\n run(\"sudo apt purge -y\", check=True)\n return 0",
"def install_updates():\n installer_path = os.path.abspath(init_path + \"/.HoundSploit/houndsploit/\")\n if platform.system() == \"Darwin\":\n installer_path = os.path.abspath(installer_path + \"/install_db_darwin.sh\")\n os.system(installer_path)\n elif platform.system() == \"Linux\":\n installer_path = os.path.abspath(installer_path + \"/install_db_linux.sh\")\n os.system(installer_path)\n elif platform.system() == \"Windows\":\n installer_path = os.path.abspath(installer_path + \"/install_db_windows.ps1\")\n os.system(\"powershell.exe -ExecutionPolicy Bypass -File \" + installer_path)\n else:\n printf(\"ERROR: System not supported\")",
"def update_root(self, new_root_id):",
"def target_update(target,deps,cmd):\n\n if target_outdated(target,deps):\n xsys(cmd)",
"def update_args_with_rootdir(args: List[str]) -> List[str]:\n args.extend(['--rootdir', str(sympy_dir())])\n return args"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build a package in the chroot using makechrootpkg.
|
def makepkg(self, pkgbuild, deps=[]):
if not self.exists():
self.make()
pkgbuild.update()
cmd = ['makechrootpkg', '-cr', str(self.working_dir)]
for d in deps:
cmd += ['-I', d]
cmd += ['--', '-s']
with cwd(pkgbuild.builddir):
return cmdlog.run(cmd)
|
[
"def package(ctx):\n ctx.run(\"rm -rf dist\")\n ctx.run(\"python setup.py sdist\")",
"def create_chroot_arch(path: str, packages: str = \"base\"):\n print(\"Creating arch at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/archlinux:latest\", path, \"sed -i 's/^#ParallelDownloads/ParallelDownloads/g' /etc/pacman.conf && pacman -Sy --noconfirm --needed arch-install-scripts && pacstrap -c /chrootfld {0}\".format(packages))",
"def build():\n shell(\"python setup.py sdist\")",
"def make(self):\n if not self.working_dir.exists():\n self.working_dir.mkdir(parents=True)\n cmd = ['mkarchroot', str(self.root), 'base-devel', 'devtools']\n cmdlog.run(cmd)",
"def compile_and_install_package(package_path, package_name):\n # [NOTE] use relative dir\n #with cd(os.path.abspath(os.path.dirname(package_path))):\n with cd(os.path.dirname(package_path)):\n run(\"tar -xzvf %s\" % os.path.basename(package_path))\n with cd(package_name):\n run(\"./configure\")\n run(\"make -j 8\")\n sudo(\"make install\", pty=False)\n sudo(\"ldconfig\")",
"def task__build_root() -> types.TaskDict:\n return targets.Mkdir(directory=config.BUILD_ROOT).task",
"def _build(self, rebuild=0, iter=1):\n if iter == 1:\n if rebuild:\n log.info('%s: Rebuilding...', self.name)\n else:\n if self.load() and self.verify():\n log.info('%s: Already built', self.name)\n return self.runtime_packages\n else:\n self.reset()\n\n log.info('%s: Building... [pass %d]', self.name, iter)\n r, stdout, stderr = self.chroot.makepkg(self.pkgbuild,\n self.build_depends)\n\n if r == 0:\n self.packages |= set(self.pkgbuild.packagelist)\n if self.verify():\n self.save()\n return self.runtime_packages\n elif iter == 1:\n for line in stdout.splitlines():\n f = line.split(\": \")\n if f[0] == 'error' and f[1] == 'target not found':\n dep, _ = parse_restriction(f[2])\n type = self.pkgbuild.dependency_type(dep)\n log.info('%s: Missing %s: %s', self.name, type, dep)\n rs = self.pkgbuild.dependency_restrictions(dep)\n b = Builder(dep, self.pacman_conf, self.makepkg_conf,\n self.builddir, self.chrootdir, self.localdir,\n restrictions=rs)\n b._build(rebuild if rebuild > Builder.Rebuild.Package else \\\n False)\n if type == 'depends':\n self.depends |= set(b.packages)\n if type == 'makedepends':\n self.makedepends |= set(b.packages)\n if self.build_depends:\n return self._build(rebuild, iter + 1)\n\n return set()",
"def build_package(version):\n args, extra_args = parse_version_extra_args(version.extra_args)\n\n log.debug(\"Building version {0} with debianize.\".format(version))\n with version.checkout_tag:\n deb_dir = getcwd()\n # use a package build class which has all kinds of hooks.\n builder = PackageBuilder(version, args, extra_args, deb_dir)\n builder.build_package_and_dependencies()\n return builder.exit_code\n\n return 0",
"def zephyr_build(ec_root: Path, board: str, image: str) -> Optional[Path]:\n\n target = Path(\n f\"build/zephyr/{board}/build-{image.lower()}/compile_commands.json\"\n )\n cmd = [\"zmake\", \"configure\", board]\n\n print(\" \".join(cmd))\n status = subprocess.run(cmd, check=False, cwd=ec_root)\n\n if status.returncode != 0:\n return None\n\n # Replace /mnt/host/source with path of chromiumos outside chroot\n default_chromiumos_path_outside_chroot = os.path.join(\n Path.home(), \"chromiumos\"\n )\n chromiumos_path_outside_chroot = os.environ.get(\n \"EXTERNAL_TRUNK_PATH\", default_chromiumos_path_outside_chroot\n )\n chromiumos_path_inside_chroot = \"/mnt/host/source\"\n\n print(\n f\"Replacing '{chromiumos_path_inside_chroot}' with \"\n + f\"'{chromiumos_path_outside_chroot}' in file {target}\"\n )\n\n target.write_text(\n target.read_text().replace(\n chromiumos_path_inside_chroot, chromiumos_path_outside_chroot\n )\n )\n\n return target",
"def build_package(version):\n log.debug(\"Building puppet module version {0} with fpm.\".format(version))\n with version.checkout_tag:\n full_name = basename(getcwd())\n short_name = full_name.replace('puppet-', '')\n create_package(full_name, short_name, str(version), version.extra_args)",
"def install_packages(module, pkgs, dir, user, virtual):\n num_installed = 0\n\n if platform.machine().startswith('arm') or platform.machine().startswith('aarch64'):\n makepkg_args = '-Acsrf'\n else:\n makepkg_args = '-csrf'\n cmd = 'sudo -u %s PKGEXT=\".pkg.tar\" makepkg %s --noconfirm --needed --noprogressbar' % (user, makepkg_args)\n if module.params['skip_pgp']:\n cmd += ' --skippgpcheck'\n for pkg in pkgs:\n # If the package is already installed, skip the install.\n if package_installed(module, pkg):\n continue\n \n # Change into the package directory.\n # Check if the package is a virtual package\n if virtual:\n os.chdir(os.path.join(dir, virtual))\n else:\n os.chdir(os.path.join(dir, pkg))\n \n # Attempt to build the directory.\n rc, stdout, stderr = module.run_command(cmd, check_rc=False)\n if rc != 0:\n module.fail_json(msg='failed to build package %s, because: %s' % (pkg,stderr))\n\n # If the package was succesfully built, install it.\n rc, stdout, stderr = module.run_command('pacman -U --noconfirm *.pkg.tar*', check_rc=False, use_unsafe_shell=True)\n if rc != 0:\n module.fail_json(msg='failed to install package %s, because: %s' % (pkg,stderr))\n else:\n num_installed += 1\n\n # Exit with the number of packages succesfully installed.\n if num_installed > 0:\n module.exit_json(changed=True, msg='installed %s package(s)' % num_installed)\n else:\n module.exit_json(changed=False, msg='all packages were already installed')",
"def create_chroot_ubuntu(path: str, packages: str = \"systemd-container\"):\n print(\"Creating ubuntu at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/ubuntu:rolling\", path, \"apt-get update && apt-get install -y debootstrap && debootstrap --include={1} --components=main,universe,restricted,multiverse --arch amd64 {0} /chrootfld\".format(ubuntu_version, packages))",
"def build(self, no_clean, verbose):\n\n os.chdir(self._root_dir)\n\n for spec in self._specifications:\n print(\"Building sysroot from {}\".format(spec))\n\n # The name of the sysroot image file to be built.\n test_name = os.path.basename(spec).split('.')[0]\n sysroot = os.path.join(self._root_dir, 'sysroot',\n '{}-{}'.format(self._target, test_name))\n\n # Build the command line.\n args = ['pyqtdeploy-sysroot']\n\n if no_clean:\n args.append('--no-clean')\n\n if verbose:\n args.append('--verbose')\n\n args.extend(['--source-dir', os.path.join(self._root_dir, 'src')])\n args.extend(['--target', self._target])\n args.extend(['--sysroot', sysroot])\n args.append(spec)\n\n if verbose:\n print(\"Running: '{}'\".format(' '.join(args)))\n\n try:\n subprocess.check_call(args)\n except subprocess.CalledProcessError:\n print(\"Build of sysroot from {} failed\".format(spec))\n break\n\n print(\"Build of sysroot from {} successful\".format(spec))",
"def build_python():\n\n run('python setup.py build')",
"def do_build():\n dochdir(ssdroot)\n if flag_snapshot:\n dochdir(flag_snapshot)\n else:\n dochdir(flag_subvol)\n if flag_binutils_build:\n dochdir(\"binutils-build\")\n nworkers = multiprocessing.cpu_count()\n doscmd(\"make -j%d\" % nworkers)\n doscmd(\"make -j%d all-gold\" % nworkers)\n dochdir(\"..\")\n else:\n u.verbose(0, \"... binutils build stubbed out\")\n if flag_run_ninja:\n dochdir(\"build.opt\")\n docmd(\"ninja\")\n dochdir(\"..\")\n else:\n u.verbose(0, \"... ninja build stubbed out\")",
"def git_build_package(gitLabel, pkgName, depot=\"dep\"):\n gitURL = r\"http://dpdk.org/git/dpdk\"\n gitPrefix = r\"dpdk/\"\n if os.path.exists(\"%s/%s\" % (depot, gitPrefix)) is True:\n ret = os.system(\"cd %s/%s && git pull --force\" % (depot, gitPrefix))\n else:\n print \"git clone %s %s/%s\" % (gitURL, depot, gitPrefix)\n ret = os.system(\"git clone %s %s/%s\" % (gitURL, depot, gitPrefix))\n if ret is not 0:\n raise EnvironmentError\n\n print \"git archive --format=tar.gz --prefix=%s %s -o %s\" % (gitPrefix, gitLabel, pkgName)\n ret = os.system(\"cd %s/%s && git archive --format=tar.gz --prefix=%s/ %s -o ../%s\"\n % (depot, gitPrefix, gitPrefix, gitLabel, pkgName))\n if ret is not 0:\n raise EnvironmentError",
"def _build_package(self, args, package_type):\n ctx = self.ctx\n dist = self._dist\n bs = Bootstrap.get_bootstrap(args.bootstrap, ctx)\n ctx.prepare_bootstrap(bs)\n self._fix_args(args)\n env = self._prepare_release_env(args)\n\n with current_directory(dist.dist_dir):\n self.hook(\"before_apk_build\")\n os.environ[\"ANDROID_API\"] = str(self.ctx.android_api)\n build = load_source('build', join(dist.dist_dir, 'build.py'))\n build_args = build.parse_args_and_make_package(\n args.unknown_args\n )\n\n self.hook(\"after_apk_build\")\n self.hook(\"before_apk_assemble\")\n build_tools_versions = os.listdir(join(ctx.sdk_dir,\n 'build-tools'))\n\n def sort_key(version_text):\n try:\n # Historically, Android build release candidates have had\n # spaces in the version number.\n return Version(version_text.replace(\" \", \"\"))\n except InvalidVersion:\n # Put badly named versions at worst position.\n return Version(\"0\")\n\n build_tools_versions.sort(key=sort_key)\n build_tools_version = build_tools_versions[-1]\n info(('Detected highest available build tools '\n 'version to be {}').format(build_tools_version))\n\n if Version(build_tools_version.replace(\" \", \"\")) < Version('25.0'):\n raise BuildInterruptingException(\n 'build_tools >= 25 is required, but %s is installed' % build_tools_version)\n if not exists(\"gradlew\"):\n raise BuildInterruptingException(\"gradlew file is missing\")\n\n env[\"ANDROID_NDK_HOME\"] = self.ctx.ndk_dir\n env[\"ANDROID_HOME\"] = self.ctx.sdk_dir\n\n gradlew = sh.Command('./gradlew')\n\n if exists('/usr/bin/dos2unix'):\n # .../dists/bdisttest_python3/gradlew\n # .../build/bootstrap_builds/sdl2-python3/gradlew\n # if docker on windows, gradle contains CRLF\n output = shprint(\n sh.Command('dos2unix'), gradlew._path.decode('utf8'),\n _tail=20, _critical=True, _env=env\n )\n if args.build_mode == \"debug\":\n if package_type == \"aab\":\n raise BuildInterruptingException(\n \"aab is meant only for distribution and is not available in debug mode. \"\n \"Instead, you can use apk while building for debugging purposes.\"\n )\n gradle_task = \"assembleDebug\"\n elif args.build_mode == \"release\":\n if package_type in [\"apk\", \"aar\"]:\n gradle_task = \"assembleRelease\"\n elif package_type == \"aab\":\n gradle_task = \"bundleRelease\"\n else:\n raise BuildInterruptingException(\n \"Unknown build mode {} for apk()\".format(args.build_mode))\n\n # WARNING: We should make sure to clean the build directory before building.\n # See PR: kivy/python-for-android#2705\n output = shprint(gradlew, \"clean\", gradle_task, _tail=20,\n _critical=True, _env=env)\n return output, build_args",
"def build(ctx):\n ctx.run(BUILD_COMMAND)",
"def compile_and_install_software():\n src_path = '.'\n\n # install the software (into the virtualenv bin dir if present)\n subprocess.check_call('./install-glpk.sh', cwd=src_path, shell=True)\n\n #src_path = './src/mippy/interface'\n\n # install the software (into the virtualenv bin dir if present)\n #subprocess.check_call('make', cwd=src_path, shell=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract news from user input keywords
|
def get_news_from_keywords(keywords):
keywords = rf.process_keywords(keywords)
news_data = get_all_news_entries()
news_entries = []
for title, description, link, date, named_entities, processed in news_data.values():
if all((k in named_entities) for k in keywords):
news_entries.append((title, description, link, date, named_entities, hash(processed)))
return news_entries
|
[
"def keyword_articles():\r\n # To retrieve user's keyword, use \"keyword.get()\".\r\n # Retrieve the keyword news.\r\n keyword_news = api_call(\"http://newsapi.org/v2/top-headlines?language=en&q=\" + keyword.get() + \"&apiKey=c0cbc3a185e84d60bf612e355c9a2760\")\r\n\r\n # Retrieve the keyword news article titles.\r\n keyword_news_article_titles = retrieve_article_titles(keyword_news)\r\n\r\n # Clear the keyword_news_widget listbox from any previous uses.\r\n keyword_news_widget.delete(0, \"end\")\r\n\r\n keyword_news_widget.insert(1, \"Keyword is \" + str(keyword.get()) + \" and the number of relevant articles is \" + str(keyword_news[\"totalResults\"]) + \".\", \"\")\r\n\r\n # Populate the widget with keyword news article titles.\r\n insert_article_titles(keyword_news_article_titles, keyword_news_widget)\r\n\r\n # Clear the keyword entry field.\r\n keyword_entry_field.delete(0, \"end\")",
"def keywords_extraction(config):\n\n kws = {\n 'tfidf': kw_keyword_tfidf\n }\n\n # Prompts the user to select an action\n kw = interaction.ask_action(constants.msg_kw, set(kws.keys()))\n if kw == 's':\n return\n\n result_file = interaction.ask_file(constants.msg_results_cluster)\n kw.extract_keywords(config, result_file)",
"def test_extract_keywords():\n keywordsChief = KeywordsChief(\"test_data/keywords.yaml\")\n\n assert keywordsChief.extract_keywords([\"\"]) == {}\n assert keywordsChief.extract_keywords([\"unknown\"]) == {}\n assert keywordsChief.extract_keywords([\"python\"]) == {\"python\": 1}\n assert keywordsChief.extract_keywords([\"ml\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"machine-learning\"]) == {\"machine-learning\": 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"unknown\"]) == \\\n {'python': 1, 'functional-programming': 1}\n assert keywordsChief.extract_keywords([\"python\", \"functional-programming\", \"ml\"]) == \\\n {'python': 1, 'functional-programming': 1, 'machine-learning': 1}",
"def extract_news(html):\n\n news_list = []\n titles = html.find_all(\"tr\", {\"id\": True, \"class\": \"athing\"})\n\n for title in titles:\n article_id = title.get(\"id\")\n article_url = title.find(\"a\", {\"class\": \"storylink\"})\n article_title = article_url.text\n\n if not \"https\" in article_url:\n article_url = \"https://news.ycombinator.com/\" + \\\n article_url.get(\"href\")\n\n score = html.find(\"span\", {\"id\": f\"score_{article_id}\"})\n score_parent_tag = score.parent\n\n score = score.text.split(\" \")[0]\n\n author = score_parent_tag.find(\"a\").text\n comments = score_parent_tag.find(\n \"a\", {\"href\": f\"item?id={article_id}\"}).text.split(\" \")[0]\n\n article = {\n\n 'author': author,\n 'comments': comments,\n 'points': score,\n 'title': article_title,\n 'url': article_url,\n 'article_id': article_id\n }\n news_list.append(article)\n\n return news_list",
"def keywords_annotation(cls):\n # get all news in annotated collection\n df = Database.get_all_non_keyword_annotated_news(Database.db_keywords_news)\n # for all news with keyword in title or text, mark as True\n for iter, row in df.iterrows():\n for category in Variables.keyword_categories:\n keywords = Variables.get_keywords_from_category(category)\n set_keywords = Utils.convert_text_to_set(' '.join(keywords))\n set_news_title = Utils.convert_text_to_set(row['news_site_title'])\n if len(set.intersection(set_keywords, set_news_title)) > 0:\n df.at[iter, category] = True\n continue\n set_news_text = Utils.convert_text_to_set(row['news_site_text'])\n if len(set.intersection(set_keywords, set_news_text)) > 0:\n df.at[iter, category] = True\n\n df.at[iter, 'search_keywords'] = True\n\n # save result back in database\n Database.update_keyword_annotated_news(Database.db_keywords_news, df)",
"def extract_stories(self, text):\n pass",
"def _parse_news(self, news_params):\n url = news_params[0]\n html = self._get_content(self.root_url + url)\n date = news_params[1]\n topic = news_params[2]\n title = news_params[3]\n paragraphs = html.find_all('p')\n text = '\\n'.join([p.get_text() for p in paragraphs])\n news_out = {'title': title, 'url': self.root_url + url, 'text': text,\n 'topic': topic, 'date': date, 'other': {}}\n return news_out",
"def apply_keywordProcessor(keywordProcessor, text, span_info=True):\r\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\r\n return (keywords_found)",
"def search_items(keywords, meta_types=None):",
"def extract_news(self, news_data: dict) -> dict:\n pass",
"def ProcessPage(keyword, vBrowser, vNews_name, vNews_url, language):\n\n # output: pandas dataframe with title, publishing date, article text and url\n articles_page = pd.DataFrame(columns=['title', 'publish_date', 'text', 'url'])\n\n # 1) list results\n search_result_page_source = vBrowser.page_source\n\n # make url regex-usable\n url_any = vNews_url\n url_any = re.sub(re.escape('?s='+keyword), '', url_any)\n url_any = re.sub(re.escape('search?k='+keyword), '', url_any)\n url_any = re.sub(re.escape('search?q=' + keyword), '', url_any)\n url_any = re.sub('\\?m\\=[0-9]{6}', '', url_any)\n url_any = re.escape(url_any) + '(?=\\S*[-]*)([0-9a-zA-Z-\\/\\.\\-\\n]+)'\n regex = re.compile(url_any)\n # logger.info('searching for {}'.format(url_any))\n search_results = list(set([match[0] for match in\n regex.finditer(search_result_page_source)\n if keyword in match[0].lower()]))\n\n if vNews_name in ['NewVision']:\n regex = re.compile('\\/new\\_vision\\/news\\/(?=\\S*[-])([0-9a-zA-Z-\\/\\.\\-]+)')\n search_results = list(set([ match[0] for match in regex.finditer(search_result_page_source) if keyword in match[0].lower()]))\n search_results = ['https://www.newvision.co.ug' + search_result for search_result in search_results]\n\n if vNews_name == \"FloodList\":\n regex = re.compile('(http|ftp|https):\\/\\/([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:\\/~+#-]*[\\w@?^=%&\\/~+#-])')\n search_results = list(set([ match[0] for match in regex.finditer(search_result_page_source) if '.com/africa/' in match[0].lower()]))\n search_results = [url for url in search_results if \"/attachment/\" not in url]\n\n if len(search_results) > 0:\n logger.info(\"found {0} article(s):\".format(len(search_results)))\n for title in search_results:\n logger.info(\"url: {0}\".format(title))\n else:\n logger.info('no articles found')\n\n # 2) for each result, get article and save it\n for idx, search_result in enumerate(search_results):\n\n logger.info('processing {}'.format(search_result))\n # download article\n article = Article(search_result, keep_article_html=True)\n article.download()\n attempts, attempts_max = 0, 10\n while (article.download_state != 2) and (attempts < attempts_max):\n attempts += 1\n logger.warning(f\"download_state {article.download_state} \"\n f\", retrying {attempts}/{attempts_max}\")\n article = Article(search_result, keep_article_html=True)\n article.download()\n time.sleep(10)\n\n if article.download_state != 2:\n logger.warning('unable to download article: {}'.format(search_result))\n continue\n article.parse()\n\n article_html = str(article.html)\n\n # select articles with keyword\n regex = re.compile(keyword, re.IGNORECASE)\n\n if re.search(regex, article.html) is not None:\n\n logger.debug('{}'.format(article_html))\n\n # get date\n date = article.publish_date\n date_str = \"\"\n search_date = False\n\n if not pd.isnull(date):\n # keep date found only if older than today\n if pd.to_datetime(date).date() < pd.to_datetime(datetime.today()).date():\n date_str = date.strftime(DATE_FORMAT)\n else:\n search_date = True\n else:\n search_date = True\n\n if search_date:\n article_html = re.sub('\\s+', ' ', article_html)\n dates_found = []\n\n res_date = [re.compile('[a-zA-ZÀ-ÿ]\\w+\\s[0-9]+\\,\\s[0-9]{4}'),\n re.compile('[a-zA-ZÀ-ÿ]\\w+\\s[0-9]+\\s[0-9]{4}'),\n re.compile('[0-9]\\w+\\s[a-zA-ZÀ-ÿ]+\\,\\s[0-9]{4}'),\n re.compile('[0-9]\\w+\\s[a-zA-ZÀ-ÿ]+\\s[0-9]{4}'),\n re.compile('[0-9]+\\s[a-zA-ZÀ-ÿ]+\\,\\s[0-9]{4}'),\n re.compile('[0-9]+\\s[a-zA-ZÀ-ÿ]+\\s[0-9]{4}'),\n re.compile('[0-9]{2}\\/[0-9]{2}\\/[0-9]{4}'),\n re.compile('[0-9]{2}\\-[0-9]{2}\\-[0-9]{4}'),\n re.compile('[0-9]{2}\\.[0-9]{2}\\.[0-9]{4}')]\n for re_date in res_date:\n for match in re_date.finditer(article_html):\n if is_date(match.group(), language):\n dates_found.append((match.start(), match.group()))\n if len(dates_found) > 0:\n logger.info('{}'.format(dates_found))\n dates_found.sort(key=lambda tup: tup[0])\n for res in dates_found:\n try:\n res_date = dateparser.parse(res[1], languages=[language],\n settings={'DATE_ORDER': 'DMY'}).date()\n if (res_date < pd.to_datetime(datetime.today()).date()\n and res_date > pd.to_datetime('30/04/1993', format=\"%d/%m/%Y\").date()):\n date_str = res_date.strftime(DATE_FORMAT)\n break\n except:\n pass\n\n if date_str == \"\":\n logger.warning('Publication date not found or wrongly assigned, skipping article')\n continue\n else:\n logger.info('Publication date assigned: {}'.format(date_str))\n\n # Take newspaper name out of article title\n article.title = remove_newspaper_name_from_title(article.title, vNews_name)\n\n # if no text is present (e.g. only video), use title as text\n article_text = article.text\n if len(str(article.text)) == 0:\n article_text = article.title\n\n # add to dataframe\n logger.info('{0} : {1}'.format(article.title, date_str))\n articles_page.loc[idx] = [article.title, date_str, article_text, article.url]\n\n # 3) return dataframe\n if len(search_results) > 0:\n logger.info('{}'.format(articles_page.head()))\n return articles_page",
"def query(query):\n keywords = split_keywords(query)\n for keyword in keywords:\n title = Q(title__icontains=keyword)\n content = Q(content__icontains=keyword)\n articles = Article.objects.filter(title | content)\n\n return articles",
"def process_posts(title, text, num_k):\n res = \"\"\n User_text = []\n post_num = 0\n for i in range(len(title)):\n res = res + title[i].firstChild.data + text[i].firstChild.data+'\\n'\n tmp = tokenize_str(title[i].firstChild.data + text[i].firstChild.data)\n if len(tmp)>0:\n User_text.append(tmp)\n post_num += 1\n ''''''\n if post_num == num_k:\n break\n ''''''\n res.lower()\n res = re.sub('\\n', ' ', res)\n res = res.strip()\n res = res.split()\n\n # LIWC features\n feats = cal_LIWC_features(User_text,post_num)\n # emoji & antidepressants\n emoji_cnt = 0\n antidep_cnt = 0\n for word in res:\n if word==':)' or word==':(' or word=='):' or word=='(:':\n emoji_cnt += 1\n if word in antidepressants:\n antidep_cnt += 1\n feats[FEATURE_NUM-2] = emoji_cnt/post_num\n feats[FEATURE_NUM-1] = antidep_cnt\n\n res = ' '.join(res)\n return_str = \"\"\n words = nltk.word_tokenize(res)\n for word in words:\n if word.isalpha():\n return_str= return_str + word + ' '\n\n return return_str, post_num, feats",
"def extract_news(parser):\n news_list = []\n tbl_list = parser.table.findAll('table')\n posts = tbl_list[1].findAll(\"tr\", attrs={'class': 'athing'})\n postsinfo = tbl_list[1].findAll(\"tr\", attrs={'class': ''})\n for i in range(30):\n comment = postsinfo[i].findAll('a')[-1].text.split()\n url = posts[i].findAll('a', attrs={'class': 'storylink'})[0]['href']\n if url[:4] != \"http\":\n url = \"https://news.ycombinator.com/\" + url\n if len(comment) == 1:\n comment = 0\n else:\n comment = int(comment[0])\n post = {\n 'author': postsinfo[i].findAll('a', attrs={'class': 'hnuser'})[0].text,\n 'comments': comment,\n 'points': int(postsinfo[i].findAll('span', attrs={'class': 'score'})[0].text.split(' ')[0]),\n 'title': posts[i].findAll('a')[1].text,\n 'url': url\n }\n news_list.append(post)\n return news_list",
"def apply_keywordProcessor(keywordProcessor, text, span_info=True):\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\n return(keywords_found)",
"def analyze_keyword_from_question(question):\n\n if len(question) > 38:\n question = pre_process_question(question)\n main_keywords = jieba.analyse.extract_tags(\n question,\n topK=20,\n withWeight=False,\n allowPOS=Noun_flags\n )\n # print (\"main_keywords\")\n # print (main_keywords)\n\n word = u'《' \n left_book_title = [m.start() for m in re.finditer(word, question)] \n word = u'》' \n right_book_title = [m.start() for m in re.finditer(word, question)] \n\n book_title = (min(len(left_book_title), len(right_book_title)))\n if book_title > 0:\n print (\"book_title\")\n print (left_book_title)\n print (right_book_title)\n main_keywords.insert(0, question[(left_book_title[0] + 1):right_book_title[0]])\n\n return \" \".join(main_keywords)\n else:\n return question",
"def get_summary(text):\n num_words = text.count(\" \")\n num_sentences = text.count(\".\")\n keywords = keyword_extraction(text, 5)\n# summary = summarize(text, max(1, num_sentences//10))[0]\n return keywords, None",
"def search_news(self, keyword, page_size=2, category=\"general\", sort_by=\"relevancy\"):\n\n news_data = self.news_api.get_everything(q=keyword, language='en', page_size=page_size, sort_by=sort_by)\n news_response = self.extract_news(news_data)\n\n return news_response",
"def extract_stories(self, text):\n parsed_stories = []\n\n soup = BeautifulSoup(text)\n stories = soup.find_all('div', {'class': 'news-body'})\n\n for story in stories:\n # build a dict with all the relevant attributes\n meneame_story = Story()\n\n # number of votes\n id_temp = story.find('div', {'class': 'votes'})\n if id_temp:\n meneame_story.votes = int(id_temp.a.string)\n else:\n meneame_story.votes = 0\n\n try:\n # extract the id\n id_regex = re.match(r'a-votes-(\\d*)', id_temp.a['id'])\n if id_regex:\n meneame_story.id = int(id_regex.group(1))\n except AttributeError:\n logging.error('Could not read id for new, skipping ...')\n continue\n\n if story.h2 is not None:\n meneame_story.title = story.h2.a.string\n meneame_story.url = story.h2.a['href']\n else:\n meneame_story.title = \"\"\n meneame_story.url = \"\"\n\n # number of clicks\n clicks = story.find('div', {'class': 'clics'})\n if clicks is not None:\n clicks_regex = re.match(r'\\s*(\\d+)\\s.*', clicks.string)\n if clicks_regex:\n meneame_story.clicks = int(clicks_regex.group(1))\n else:\n logging.error('Error reading clicks for story %s',\n meneame_story.id)\n meneame_story.clicks = 0\n else:\n meneame_story.clicks = 0\n\n # extract the user id\n user_a = story.find('a', {'class': 'tooltip'})\n try:\n user_regex = re.match(r'\\/user\\/(.*)', user_a['href'])\n if user_regex:\n meneame_story.author = user_regex.group(1)\n except (TypeError, ValueError):\n logging.error('Error reading user for story %s',\n meneame_story.id)\n meneame_story.user = \"\"\n\n # extract description\n try:\n meneame_story.description = story.contents[8]\n except IndexError:\n logging.error('Error reading description for story %s',\n meneame_story.id)\n meneame_story.description = \" \"\n\n parsed_stories.append(meneame_story)\n return parsed_stories"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dado un numero, devuelve true si es primo
|
def es_primo(numero):
# Comparo el numero con cada uno de sus anteriores excepto el 1 y él mismo.
for i in range(2, numero-1):
# Si el resto es 0, no es primo.
if (numero % i == 0):
return False
return True
|
[
"def primo(numero):\n\tfor i in range(2, numero, 1):\n\t\tif numero%i==0:\n\t\t\treturn False\n\tif numero==1:\n\t\treturn False\n\treturn True",
"def is_natural(num):\n if(not (num % 3) or not (num % 5)):\n return num\n else:\n return 0",
"def est_premier(nombre):\n # si le nombre est inférieur à un, il ne peut pas être premier donc on retourne false\n if nombre <= 1:\n return False\n # si le nombre est 2 ou 3, on sait qu'il est premier donc on retourne true\n if nombre <= 3:\n return True\n # si le nombre est modulo 2 ou 3, on sait qu'il n'est pas premier puisqu'on a déjà exclu 2 et 3 précédement\n if nombre % 2 == 0 or nombre % 3 == 0:\n return False\n # on\n i = 5\n while i * i <= nombre:\n if nombre % i == 0 or nombre % (i + 2) == 0:\n return False\n i = i + 6\n return True",
"def isVampire(num: int) -> bool:\n if num > 0:\n num_str = str(num)\n if len(num_str) % 2 == 0:\n return False if not getFangs(num_str) else True\n return False",
"def isnumerique(self,chaine):\n i=0\n result = True\n while (i<len(chaine)):\n if chaine[i] not in \"0123456789\":\n result = False\n return result\n i= i+1\n return result",
"def is_natural(n):\r\n\treturn isinstance(n, int) and n > 0",
"def is_number(n):\n try:\n int(n)\n except ValueError:\n return False\n else:\n return True",
"def is_deficient_number(number: int) -> bool:\n return get_sum_of_divisors(number) < number",
"def is_prime(number):\n division = 2\n while number % division != 0:\n division += 1\n if division == number:\n return True\n return False",
"def is_prime_field(self):\n return False",
"def is_positive(number):\n if number > 0:\n return True\n return None",
"def checkNumber(self):\n\t\tnumberOfTeams = len(self.teams)\n\t\twhile numberOfTeams != 0 and (numberOfTeams % 2) == 0:\n\t\t\tnumberOfTeams /= 2\n\t\tif numberOfTeams != 1:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def isSuperprime(nr):\n while nr > 0:\n if not isPrime(nr):\n return False\n nr = nr // 10\n return True",
"def is_increasing_number(num):\n\tnum_list = [int(i) for i in str(num)]\n\tprev_num = num_list[0]\n\tfor n in num_list:\n\t\tif n < prev_num:\n\t\t\treturn False\n\t\tprev_num = n\n\treturn True",
"def is_narcissistic(num):\n c = digit_count(num)\n sum = 0\n save = num\n while num != 0:\n digit = num % 10\n num = num // 10\n sum += digit**c\n return sum == save",
"def isNumber(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"num\"\n else:\n return False",
"def is_number(self):\n self.number = re.sub(r'[^\\d]', '', self.number)\n return self.number.isdigit()",
"def is_natural(self):\n if len(self._cards) == 2 and 8 <= self.value <= 9:\n return True\n return False",
"def is_decreasing_number(num):\n\tnum_list = [int(i) for i in str(num)]\n\tprev_num = num_list[0]\n\tfor n in num_list:\n\t\tif n > prev_num:\n\t\t\treturn False\n\t\tprev_num = n\n\treturn True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Devuelve una lista de los numeros primos y perfectos que hay entre 1 y el numero dado
|
def primos_y_perfectos(numero):
# Lista de nºs entre 1 y el numero dado:
numeros = [i for i in range(1, numero)]
# Listas donde almacenaré los numeros que haya:
primos = []
perfectos = []
for numero in numeros:
# Primos:
if(es_primo(numero)):
primos.append(numero)
# Perfectos:
if(es_perfecto(numero)):
perfectos.append(numero)
return primos, perfectos
|
[
"def ex7_PerfectNumber():\n N1 = int(input())\n N2 = int(input())\n\n def perfectNumber(N1, N2):\n result = []\n while N1 < N2:\n i = 1\n divisors = []\n while i < N1:\n if N1 % i == 0:\n divisors.append(i)\n i += 1\n if sum(divisors) == N1:\n result.append(N1)\n N1 += 1\n return result\n\n print(*perfectNumber(N1, N2))",
"def compute_primes(bound):\n \n answer = list(range(2, bound))\n\n for divisor in range(2, bound):\n # remove all multiple of divisor from answer\n for i in range(len(answer)):\n if answer[i] != 1:\n if answer[i] != divisor:\n if answer[i] % divisor == 0:\n answer[i] = 1\n \n return([num for num in answer if num != 1])",
"def prog4(a):\n n=[]\n for i in range(1,a+1):\n if a%i==0:\n n.append(i)\n print(n)",
"def factores_primos_de(n):\n\n\tfactor1 = pollard_rho_factor(n)\n\tfactor2 = n / factor1\n\tfactores = [factor1,factor2]\n\trespuesta = []\n\n\twhile factores != []:\n\t\tfactor = factores.pop()\n\t\tfactor_mas_chico = pollard_rho_factor(factor)\n\t\tif factor == factor_mas_chico:\n\t\t\trespuesta.append(factor)\n\t\telse:\n\t\t\tfactores.extend([factor_mas_chico, factor/factor_mas_chico])\n\n\treturn respuesta",
"def get_divisors(input_number: int) -> list:\n list_of_divisors = []\n x = range(1, int(input_number/2) + 1)\n for i in x:\n if input_number % i == 0:\n list_of_divisors.append(i)\n list_of_divisors.append(input_number)\n return list_of_divisors",
"def compute_primes(bound):\r\n \r\n answer = list(range(2, bound))\r\n for divisor in range(2, bound):\r\n for i in answer:\r\n if i % divisor == 0 and not i == divisor:\r\n answer.remove(i)\r\n\r\n return answer",
"def getPrimesLessThan(num):\n primes = [2]\n for i in range(2, num):\n for p in primes:\n if i % p == 0:\n break\n else:\n primes.append(i)\n return primes",
"def find_divisors(number) -> list:\n list_of_divisors = []\n for numb in range(1, number + 1):\n if number % numb == 0:\n list_of_divisors.append(numb)\n if len(list_of_divisors) != 2 and len(list_of_divisors) != 1:\n list_of_divisors.remove(1)\n list_of_divisors.remove(list_of_divisors[len(list_of_divisors) - 1])\n return list_of_divisors",
"def diviseurs(n):\n assert(type(n)==int and n>=0),\"un entier positif est attendu pour n\"\n \n div=[];\n i=1\n while(i<(n-1) and len(div)<10):\n i+=1\n if n % i == 0:\n div.append(i)\n\n return div",
"def factores_de(n):\n\tif es_primo(n):\n\t\treturn factores_primos_de(n)\n\n\tprimos = factores_primos_de(n)\n\t#http://stackoverflow.com/a/5898031/1603080 - Mirar los comentarios de esa respuesta\n\tgenerador = itertools.chain.from_iterable(itertools.combinations(primos, r) for r in range(len(primos) + 1))\n\tfactores_repetidos = [functools.reduce(operator.mul, item, 1) for item in generador]\n\t\n\treturn sorted(set(factores_repetidos))",
"def primes(number):\r\n \r\n # INITIALIZE\r\n primes = [2]\r\n \r\n # WORK THROUGH LIST\r\n for number in range(3, number):\r\n index = 0\r\n is_prime = True\r\n \r\n # CHECK DIVISIBILITY BY PRIME NUMBERS\r\n while index < len(primes) and primes[index] < sqrt(number) + 1:\r\n \r\n # DIVISIBLE BY OTHER PRIME -> NOT PRIME\r\n if number % primes[index] == 0:\r\n is_prime = False\r\n break\r\n \r\n index += 1\r\n\r\n # IF NOT DIVISIBLE BY OTHER PRIMES -> APPEND TO PRIMES \r\n if is_prime:\r\n primes.append(number)\r\n \r\n return primes",
"def divicion_complejos(numero1, numero2):\r\n\r\n a = ((numero1[0] * numero2[0]) - (numero1[1] * numero1[1]))/numero2[0]**2 - (numero2[1]**2*-1)\r\n b = ((numero1[0] * numero2[1]) + (numero1[1] * numero2[1]))/numero2[0]**2 - (numero2[1]**2*-1)\r\n\r\n c = [a, b]\r\n \r\n return c",
"def prime():\n array = []\n for i in range(2, 1000):\n if i % 2 != 0 and i % 3 != 0 and i % 5 != 0 and i % 7 != 0 and i % 11 != 0 or i == 2 or i == 3 or i == 5 or i == 7 or i == 11:\n array.append(i)\n return array",
"def comptertous(n: int) -> list:\n compteurs = [0] * 10\n while True:\n chiffre = n % 10\n compteurs[chiffre] += 1\n n //= 10\n if n == 0:\n break\n return compteurs",
"def proper_divisors(n):\n if n < 2:\n return []\n divisors = {1}\n for i in up_to_sqrt_of(n):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n//i)\n return sorted(list(divisors))",
"def findDivisors(num1,num2):\r\n divisors=(1,)\r\n for i in range(2,((min(num1,num2))//2)+1):\r\n if num1%i==0 and num2%i==0:\r\n divisors+=(i,)\r\n if max(num1,num2)%min(num1,num2)==0:\r\n divisors+=(min(num1,num2),)\r\n return divisors",
"def _get_special_odd_composite_numbers() -> Iterable[int]:\n primes = {2}\n for odd_number in count(start=3, step=2):\n if is_prime(odd_number):\n primes.add(odd_number)\n else:\n can_be_written_as_sum = False\n for prime_number in primes:\n if math.sqrt((odd_number - prime_number) / 2.0).is_integer():\n can_be_written_as_sum = True\n break\n if not can_be_written_as_sum:\n yield odd_number",
"def primo(num):\r\n acumulador_suma = 0\r\n total_de_numeros_primos = 0\r\n while num > 1:\r\n count = 1\r\n count_nun_primos = 0\r\n\r\n while num >= count:\r\n if num % count == 0:\r\n count_nun_primos += 1\r\n count += 1\r\n else:\r\n count += 1\r\n if count_nun_primos > 2:\r\n break\r\n\r\n if count_nun_primos == 2:\r\n total_de_numeros_primos += 1\r\n acumulador_suma += num\r\n print (\"Num-Primo-{}: {}\".format(total_de_numeros_primos, num))\r\n\r\n num -= 1\r\n #print count\r\n #print count1\r\n\r\n print (\"Suma Total de numeros Primos: {}\".format(acumulador_suma))\r\n print (\"Numeros Primos Encomtrados {}.\".format(total_de_numeros_primos))",
"def get_divisors(num):\n yield 1\n\n for i in range(2, num / 2 + 1):\n if not num % i:\n yield i"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Devuelve recursivamente el sumatorio de 1 hasta el numero dado
|
def sumatorio(numero):
# Caso base
if (numero == 1):
return numero
# Caso general
else:
return sumatorio(numero-1) + numero
|
[
"def firstnsum(n):\n\treturn sum(range(n+1))",
"def amicable(n):\r\n \"*** YOUR CODE HERE ***\"\r\n while True:\r\n n = n + 1\r\n m = sum_of_divisor(n) \r\n if m != n and sum_of_divisor(m) == n:\r\n break\r\n\r\n return n",
"def sum_to(n):\n running_sum = 0\n\n # needs to include n\n for i in range(n+1):\n running_sum += i\n\n return running_sum",
"def SimpleAdding(num):\n\n # code goes here\n return sum([x for x in range(num+1)])",
"def sum_range(num):\n if num == 0:\n return 0\n return num + sum_range(num - 1)",
"def sum(n):\n return summation_using_accumulate(n, lambda x: x)",
"def diagonal_sum(n):\r\n\tif n == 0:\r\n\t\treturn 1\r\n\telse:\r\n\t\treturn 4*((2*n+1)**2) - 12 * n + diagonal_sum(n-1)",
"def sum_series(n, o=0, p=1):\n if n <= 0:\n return o\n elif n < p + 1:\n return n\n else:\n return sum_series(n - 1, o, p) + sum_series(n - 2, o, p)",
"def circular_prime_answer(number, total):\n for i in range(1, number + 1):\n if circular_prime(i):\n total += i\n print(total)",
"def adding_one(num):\n return num + 1",
"def sum_list_recursively(num_list):\n\n if len(num_list) == 1:\n return num_list[0]\n else:\n return num_list[0] + sum_list_recursively(num_list[1:])",
"def ex4_SumOneToN():\n # print(str(sum([int(digit) for digit in str(input())])))\n print(str(sum([int(digit) for digit in range(0, int(input())+1)])))\n # N = int(input())\n # sumM = 0\n # for number in range(0, N+1):\n # sumM += number;\n # number += 1\n # print(sumM)",
"def seq_sum_recursive(seq,start,end):\r\n\tif start == end:\r\n\t\treturn seq[start]\r\n\telif (start+1) == end:\r\n\t\treturn seq[start]+seq[end]\r\n\telse:\r\n\t\treturn seq[start]+ seq[end] + seq_sum_recursive(seq,start+1,end-1)",
"def factorial(num):\n if num == 0:\n return 1\n return num * factorial(num - 1)",
"def spiral_sum(n):\n return spiral_sum(n-2) + 4 * ((n-2)**2) + 10 * (n - 1)",
"def circleOfNumbers(n, firstNumber):\n\n half = int(n/2)\n \n if firstNumber > half:\n return firstNumber - half # go backward \n \n elif firstNumber < half:\n return firstNumber + half # go forward\n \n else:\n return 0 # opposite of middle num is always 0",
"def series_sum():\n n = input(\"Please enter a non negative integer: \")\n\n if not n.isdigit():\n return None\n\n a = 1000\n for i in range(1, int(n)+1):\n a += 1/(int(i)**2)\n\n return a",
"def make_adder_inc(a):\n \"*** YOUR CODE HERE ***\"\n step = 0\n def adder(k):\n nonlocal step\n step += 1\n return a + k + step - 1\n return adder",
"def fabonacci(n):\r\n\tif n == 0:\r\n\t\treturn 0\r\n\telif n == 1:\r\n\t\treturn 1\r\n\telse:\r\n\t\treturn fabonacci(n-1) + fabonacci(n-2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create the query for insert values
|
def create_insert_query(self) -> None:
build = BuildInjectQuery()
self.query = build.build_query_insert(table=self.table, key_duplicate=True, list_values=self.list_values)
|
[
"def insert(self, sql):",
"def insert(self, table, fields): \n field_keys = ', '.join(fields.keys())\n _fields = '\\',\\''.join(fields.values())\n return self.query(\"INSERT INTO {} ({}) VALUES ({})\", (field_keys, _fields), table)",
"def _getInsertRowRequest(self, dictValues):\n request = 'INSERT INTO \"{}\" ('.format(self._tableId)\n\n for k in dictValues.iterkeys():\n request += ' \"{}\", '.format(k)\n request = request[:-2]\n\n request += \") VALUES (\"\n for k in dictValues.itervalues():\n request += ' \"{}\", '.format(k)\n request = request[:-2]\n request += \")\"\n\n return request",
"def generate_insert_query(table: str, columns: Tuple[str, ...]) -> str:\n query = \"INSERT INTO {table} ({columns}) VALUES ({placeholder})\".format(\n table=table,\n columns=\", \".join(columns),\n placeholder=\", \".join([\":\" + col for col in columns]),\n )\n return query",
"def insert(self,dict,name=None):\n if name is None:\n name = self.tableName\n\n columnNames = [sanitizeString(x) for x in dict.keys()]\n values = [sanitizeString(x) if isinstance(x,str) else x for x in dict.values()]\n\n columns = ', '.join(columnNames)\n placeholders = ', '.join(['?'] * len(dict))\n insertStr = 'INSERT INTO %s ' % name\n insertStr += '(%s) VALUES ( %s )' % (columns,placeholders) \n self.execute(insertStr,values)",
"def insert(self, table, **kw):\n return self.operation(table.insert().values(**kw))",
"def insert(self, tablename, seqname=None, _test=False, **values):\n\n def q(x):\n return \"(\" + x + \")\"\n\n if values:\n _keys = SQLQuery.join(values.keys(), ', ')\n _values = SQLQuery.join([sqlparam(v) for v in values.values()],\n ', ')\n sql_query = \"INSERT INTO %s \" % tablename + q(\n _keys) + ' VALUES ' + q(_values)\n else:\n sql_query = SQLQuery(self._get_insert_default_values_query(\n tablename))\n\n if _test: return sql_query\n\n db_cursor = self._db_cursor()\n if seqname is not False:\n sql_query = self._process_insert_query(sql_query, tablename,\n seqname)\n\n if isinstance(sql_query, tuple):\n # for some databases, a separate query has to be made to find \n # the id of the inserted row.\n q1, q2 = sql_query\n self._db_execute(db_cursor, q1)\n self._db_execute(db_cursor, q2)\n else:\n self._db_execute(db_cursor, sql_query)\n\n try:\n out = db_cursor.fetchone()[0]\n except Exception:\n out = None\n\n if not self.ctx.transactions:\n self.ctx.commit()\n return out",
"def multiple_insert(self, tablename, values, seqname=None, _test=False): \r\n if not values:\r\n return []\r\n \r\n if not self.supports_multiple_insert:\r\n out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]\r\n if seqname is False:\r\n return None\r\n else:\r\n return out\r\n \r\n keys = values[0].keys()\r\n #@@ make sure all keys are valid\r\n\r\n # make sure all rows have same keys.\r\n for v in values:\r\n if v.keys() != keys:\r\n raise ValueError, 'Bad data'\r\n\r\n sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))\r\n\r\n for i, row in enumerate(values):\r\n if i != 0:\r\n sql_query.append(\", \")\r\n SQLQuery.join([SQLParam(row[k]) for k in keys], sep=\", \", target=sql_query, prefix=\"(\", suffix=\")\")\r\n\r\n if _test: return sql_query\r\n\r\n db_cursor = self._db_cursor()\r\n if seqname is not False: \r\n sql_query = self._process_insert_query(sql_query, tablename, seqname)\r\n\r\n if isinstance(sql_query, tuple):\r\n # for some databases, a separate query has to be made to find \r\n # the id of the inserted row.\r\n q1, q2 = sql_query\r\n self._db_execute(db_cursor, q1)\r\n self._db_execute(db_cursor, q2)\r\n else:\r\n self._db_execute(db_cursor, sql_query)\r\n\r\n try: \r\n out = db_cursor.fetchone()[0]\r\n out = range(out-len(values)+1, out+1) \r\n except Exception: \r\n out = None\r\n\r\n if not self.ctx.transactions: \r\n self.ctx.commit()\r\n return out",
"def insert_many(self, table, **values):\n if len(values) < 1:\n # TODO: raise exception here instead of just returning\n return \n if len(values) == 1:\n self.insert(values[0])\n return\n placeholder = \",\".join([\"?\" for _ in values[0]])\n print(f\"INSERT INTO {table} VALUES {placeholder} {values}\")\n self.__cursor.executemany(f\"INSERT INTO {table} VALUES ({placeholder})\", values)\n self.__connection.commit()",
"def query(self, sql):",
"def add_values_from_dict(self, table, conv_dict):\n\n sql_insert = \"INSERT INTO {0} ({1}) VALUES ({2});\"\n\n fields_list = []\n values_list = []\n\n for tram in conv_dict:\n fields_list.append(tram)\n values_list.append(conv_dict[tram])\n\n fields = ', '.join(fields_list)\n values = ', '.join(values_list)\n\n executed_sql = sql_insert.format(table, fields, values)\n\n #print(executed_sql)\n self.cur.execute(executed_sql)\n\n self.conn.commit()",
"def get_add_sql(self, add_fields):\n # Insert into table.\n sql = \"INSERT INTO \" + str(self.test_table) + \" (\"\n # Get the keys set.\n key_list = list(add_fields.keys())\n # Variable for holding values.\n vals = \"\"\n # For each key, add the text 'key = value, ' so that each field is updated correctly.\n for i in range(len(key_list)):\n # Get the field being updated.\n field = key_list[i]\n val = add_fields[field]\n # Change how the value should look in the sql depending on it's type.\n if type(val) is str:\n value = self.connector.connect().escape(val)\n elif type(val) is int:\n value = str(val)\n elif type(val) is bool:\n value = str(val).upper()\n # Add the necessary text.\n sql += field\n vals += value\n # If this is not the last key, add a comma.\n if i < len(key_list) - 1:\n sql += \", \"\n vals += \", \"\n\n # Finish off the command.\n sql += \") VALUES (\" + vals + \")\"\n\n if self.verbose:\n print(\"Adding: \" + sql)\n\n return sql",
"def insert_statement(self, values):\n if not hasattr(self, 'auto_column_number'):\n self.auto_column_number = 1\n\n keys = self.table.get_insert_columns(join=False, create=True)\n if self.table.columns[0][1][0][3:] == 'auto':\n newrows = []\n for rows in values:\n insert_stmt = [self.auto_column_number] + rows\n newrows.append(insert_stmt)\n self.auto_column_number += 1\n else:\n newrows = values\n\n xml_lines = [\n '\\n<row>\\n{}</row>'.format(format_single_row(keys, line_data))\n for line_data in newrows\n ]\n return xml_lines",
"def _insert_rows_sql_insert(cls,\n table_context: \"TableContext\"\n ) -> None:\n if UploadType.SQL_INSERT not in cls.supported_uploads:\n raise Exception(f\"SQL INSERT not supported by `{cls.__name__}`\")\n with table_context.engine_context.engine.begin() as conn:\n conn.execute(table_context.table.insert(), *table_context.output_rows)",
"def insert_into(conn, table_name, columns, values):\n try:\n values_string = \"?\"\n if isinstance(columns, basestring):\n #sanitizing input to be an array if string\n columns = columns.split(',') \n for i in range(len(columns)-1):\n values_string = values_string + \",\" + values_string\n \n columns = ','.join(columns) #making sure columns is formatted correctly\n INSERT_STRING = (\n \"INSERT INTO {0} ( {1} ) \"\n \"VALUES({2})\"\n ).format(table_name, columns, values_string)\n c = conn.cursor()\n c.execute(INSERT_STRING, values)\n except Error as e:\n print e",
"def multiple_insert(self, tablename, values, seqname=None, _test=False):\n if not values:\n return []\n\n if not self.supports_multiple_insert:\n out = [self.insert(\n tablename, seqname=seqname,\n _test=_test, **v) for v in values]\n if seqname is False:\n return None\n else:\n return out\n\n keys = values[0].keys()\n #@@ make sure all keys are valid\n\n for v in values:\n if v.keys() != keys:\n raise ValueError, 'Not all rows have the same keys'\n\n sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' %\n (tablename, ', '.join(keys)))\n\n for i, row in enumerate(values):\n if i != 0:\n sql_query.append(\", \")\n SQLQuery.join([SQLParam(row[k]) for k in keys],\n sep=\", \",\n target=sql_query,\n prefix=\"(\",\n suffix=\")\")\n\n if _test: return sql_query\n\n db_cursor = self._db_cursor()\n if seqname is not False:\n sql_query = self._process_insert_query(sql_query, tablename,\n seqname)\n\n if isinstance(sql_query, tuple):\n # for some databases, a separate query has to be made to find \n # the id of the inserted row.\n q1, q2 = sql_query\n self._db_execute(db_cursor, q1)\n self._db_execute(db_cursor, q2)\n else:\n self._db_execute(db_cursor, sql_query)\n\n try:\n out = db_cursor.fetchone()[0]\n out = range(out - len(values) + 1, out + 1)\n except Exception:\n out = None\n\n if not self.ctx.transactions:\n self.ctx.commit()\n return out",
"def dbGenerateSaveQuery(self, env):",
"def __to_sql(self, types):\n\n if len(types) < 1:\n logging.error(\n \"Size of dictionary equals 0, \\\n no attributes to create SQL query\"\n )\n raise KeyError\n\n sql = f\"\"\"CREATE TABLE \\\"{self.table_name}\\\" (\"\"\"\n\n for col, t in types.items():\n sql += f\"\"\"\\n\\t\"{col}\" {t},\"\"\"\n\n sql = sql[:-1] # remove last comma\n sql += \"\\n);\"\n\n return sql",
"def __insert( self, criteria, con ):\n id = None\n table_name = ''\n\n keys = criteria.keys()\n if keys:\n table_name = criteria.getTableName(keys[0])\n else:\n raise ProofException.ProofImproperUseException( \\\n \"Database insert attempted without anything specified to insert.\" )\n\n db_map = self.__proof.getDatabaseMap(self.__db_name)\n table_map = db_map.getTable(table_name)\n column_maps = table_map.getColumns()\n key_info = table_map.getPrimaryKeyMethodInfo()\n key_gen = table_map.getIdGenerator()\n\n # create primary key\n pk = None\n for column_map in column_maps:\n if column_map.isPrimaryKey():\n pk = column_map\n break\n\n if pk and not criteria.has_key(pk.getFullyQualifiedName()):\n if not key_gen:\n raise ProofException.ProofNotFoundException( \\\n \"IDGenerator for table '%s' is None\" % (table_name) )\n\n if key_gen.isPriorToInsert():\n id = key_gen.getId(connection=con, key_info=key_info)\n criteria[pk.getFullyQualifiedName()] = id\n\n # perform the insert\n column_list = []\n value_list = []\n for column in column_maps:\n column_name = column.getFullyQualifiedName()\n if criteria.has_key(column_name):\n column_list.append(column_name)\n value_list.append(criteria[column_name])\n\n sql_expr = SQLExpression.SQLExpression()\n #self.log(\"doInsert: (%s) (%s)\"%(column_list, value_list), level=logging.INFO)\n (column_list, value_list) = sql_expr.buildInsertList(column_list, value_list)\n\n sql = \"INSERT INTO %s (%s) VALUES (%s)\" % ( table_name,\n string.join(column_list, \", \"),\n string.join(value_list, \", \") )\n\n self.log( \"%s.doInsert: %s\" % (self.__class__.__name__, sql) )\n\n self.__execute(sql, con)\n\n if pk and key_gen and key_gen.isPostInsert():\n id = key_gen.getId(connection=con, key_info=key_info)\n\n return id"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check the TS in terms of energy, normal mode displacement, and IRC. Populates the ``TS.ts_checks`` dictionary. Note that the 'freq' check is done in Scheduler.check_negative_freq() and not here.
|
def check_ts(reaction: 'ARCReaction',
verbose: bool = True,
job: Optional['JobAdapter'] = None,
checks: Optional[List[str]] = None,
rxn_zone_atom_indices: Optional[List[int]] = None,
):
checks = checks or list()
for entry in checks:
if entry not in ['energy', 'freq', 'IRC', 'rotors']:
raise ValueError(f"Requested checks could be 'energy', 'freq', 'IRC', or 'rotors', got:\n{checks}")
if 'energy' in checks or not reaction.ts_species.ts_checks['e_elect']:
check_ts_energy(reaction=reaction, verbose=verbose)
if 'freq' in checks or (not reaction.ts_species.ts_checks['normal_mode_displacement'] and job is not None):
check_normal_mode_displacement(reaction, job=job)
if 'rotors' in checks or (ts_passed_all_checks(species=reaction.ts_species, exemptions=['E0', 'warnings', 'IRC'])
and job is not None):
invalidate_rotors_with_both_pivots_in_a_reactive_zone(reaction, job,
rxn_zone_atom_indices=rxn_zone_atom_indices)
|
[
"def check_ts_energy(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> None:\n # Check whether E0 values are already known, e.g. from Arkane species YAML files\n check_rxn_e0(reaction=reaction)\n if reaction.ts_species.ts_checks['E0']:\n return\n\n r_e_elect = None if any([spc.e_elect is None for spc in reaction.r_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=0) for spc in reaction.r_species)\n p_e_elect = None if any([spc.e_elect is None for spc in reaction.p_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=1) for spc in reaction.p_species)\n ts_e_elect = reaction.ts_species.e_elect\n\n if verbose and all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n min_e = extremum_list([r_e_elect, p_e_elect, ts_e_elect], return_min=True)\n r_text = f'{r_e_elect - min_e:.2f} kJ/mol' if r_e_elect is not None else 'None'\n ts_text = f'{ts_e_elect - min_e:.2f} kJ/mol' if ts_e_elect is not None else 'None'\n p_text = f'{p_e_elect - min_e:.2f} kJ/mol' if p_e_elect is not None else 'None'\n logger.info(\n f'\\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '\n f'has the following path electronic energy:\\n'\n f'Reactants: {r_text}\\n'\n f'TS: {ts_text}\\n'\n f'Products: {p_text}')\n\n if all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n # We have all params, we can make a quantitative decision.\n if ts_e_elect > r_e_elect + 1.0 and ts_e_elect > p_e_elect + 1.0:\n # TS is above both wells.\n reaction.ts_species.ts_checks['e_elect'] = True\n return\n # TS is not above both wells.\n if verbose:\n logger.error(f'TS of reaction {reaction.label} has a lower electronic energy value than expected.')\n reaction.ts_species.ts_checks['e_elect'] = False\n return\n # We don't have any params (some are ``None``)\n if verbose:\n logger.info('\\n')\n logger.warning(f\"Could not get electronic energy for all species in reaction {reaction.label}.\\n\")\n # We don't really know.\n reaction.ts_species.ts_checks['e_elect'] = None\n if 'Could not determine TS e_elect relative to the wells; ' not in reaction.ts_species.ts_checks['warnings']:\n reaction.ts_species.ts_checks['warnings'] += 'Could not determine TS e_elect relative to the wells; '",
"def detect_freqs(self):\r\n channel_avgs = []\r\n differences = []\r\n for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]):\r\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\r\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\r\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\r\n if any(differences[j] >= self.min_percent_diff[i]\\\r\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\r\n for j in range(*self.detection_ranges[i]))\\\r\n and (time.time() - self.prev_freq_detects[i] > 0.1)\\\r\n and len(self.freq_channels[0]) == self.freq_channel_history:\r\n self.prev_freq_detects[i] = time.time()\r\n self.current_freq_detects[i] = True\r\n #print(i)\r\n else:\r\n self.current_freq_detects[i] = False",
"def ts_passed_all_checks(species: 'ARCSpecies',\n exemptions: Optional[List[str]] = None,\n verbose: bool = False,\n ) -> bool:\n exemptions = exemptions or list()\n for check, value in species.ts_checks.items():\n if check not in exemptions and not value and not (check == 'e_elect' and species.ts_checks['E0']):\n if verbose:\n logger.warning(f'TS {species.label} did not pass the all checks, status is:\\n{species.ts_checks}')\n return False\n return True",
"def EventAnalysis(self):\n\n # Require a good data quality flag\n if self.DAQ.ND280OffFlag > 0:\n return\n\n if self.trigger and self.trigger == \"FGD\":\n if (not self.BasicHeader.FGDCosmicEvent) or self.BasicHeader.TripTCosmicEvent:\n return\n\n if self.ReconPerfEval.NGlobalReconObject > 25:\n return\n\n self.numEvents[\"All\"] += 1\n\n for obj in self.ReconPerfEval.GlobalReconObject:\n if obj.SetOK and obj.StatusString.find(\"success\") != -1:\n # FV and timing cuts, if requested.\n if self.cuttype:\n isMC = (self.BasicHeader.RunID > 100000)\n if not timing_cuts.PassesCut(isMC, self.BasicHeader.RunID, obj.Position.T()):\n continue\n if not volume_cuts.IsInVolume(obj.Position.Vect(), self.cuttype):\n continue\n\n path = obj.SubdetectorString\n new = False\n prev = False\n preprev = False\n summary = grtf_tools.ConvertPathToSummary(path)\n\n if path not in self.percentages[\"ByPos\"]:\n self.percentages[\"ByPos\"][path] = {\"Total\": 0, \"Fail\": 0}\n\n self.percentages[\"ByPos\"][path][\"Total\"] += 1\n failedByPos = False\n\n # Loop over the nodes and check for any that show bad kinks.\n for node in obj.GlobalNodes:\n new = node.NodeState\n\n if not new.SetOK:\n break\n\n if preprev:\n ok = self.FillByPosPlot(summary, preprev, prev, new, obj)\n if not ok:\n failedByPos = True\n\n preprev = prev\n prev = new\n\n # Print the details of this track if it contains a bad track.\n if failedByPos:\n self.percentages[\"ByPos\"][path][\"Fail\"] += 1\n\n if grtf_tools.ContainsTracker(obj.SubdetectorString):\n grtf_tools.PrintEvent(self, \"TRACKER-Failures.txt\", path)\n\n return",
"def checkGPS(self,gps_nsats,gps_hdop,err_ecode,err_subsys):\r\n\r\n if gps_nsats is not None:\r\n if sum(gps_nsats < 10 ) > 0:\r\n self.failures['GPS Failure'] = True\r\n return True\r\n else:\r\n self.failures['GPS Failure'] = False\r\n\r\n if gps_hdop is not None:\r\n if sum(gps_hdop>2) > 0:\r\n self.failures['GPS Failure'] = True\r\n return True\r\n else:\r\n self.failures['GPS Failure'] = False\r\n\r\n self._checkerrcode('GPS Failure',err_ecode,err_subsys,2,11)",
"def on_schedule(self):\n conditions = self.condition_list.get(\"conditions\")\n if all([parse_expr(condition).subs(self.condition_data)\\\n for condition in conditions]):\n self.device_true_time += self.interval\n self.device_status = True\n _log.debug('All condition true time {}'.format(self.device_true_time))\n else:\n self.device_status = False\n _log.debug(\"one of the condition is false\")\n\n rthr = self.device_true_time/ 3600\n if rthr > self.mht:\n self.excess_operation = True\n\n if self.is_midnight(self.input_datetime):\n self.device_true_time = 0\n for device_topic in self.device_topic_list:\n print(device_topic)\n self.publish(device_topic)",
"def check_tsp_feasibility(rt):\n for stop_key, order in rt.tsp_route_dict.items():\n # print(stop_key, order)\n # print(rt.stp_dict[stop_key])\n index = rt.stop_key_index_dict[stop_key]\n if order == 0: # origin depot\n rt.stop[index].actual_tsp_start_time = 0.0\n rt.stop[index].actual_tsp_end_time = 0.0\n prev_stop_key = stop_key\n prev_index = rt.stop_key_index_dict[prev_stop_key]\n else:\n rt.stop[index].actual_tsp_start_time = rt.stop[prev_index].actual_tsp_end_time + \\\n rt.travel_time_dict[prev_stop_key][stop_key]\n rt.stop[index].actual_tsp_end_time = rt.stop[index].actual_tsp_start_time + rt.stop[\n index].planned_service_time\n prev_stop_key = stop_key\n prev_index = rt.stop_key_index_dict[prev_stop_key]\n if rt.stop[index].actual_tsp_end_time > rt.stop[index].end_time_window:\n rt.stop[index].is_tsp_feasible = False\n rt.is_tsp_feasible = False",
"def EGTS_only_perf(GR):\n #Power available\n P_APU = 62 # [kW] Available apu power\n P_sen = 0 # [kW]\n P_comp = 0 # [kW]\n P_av_e = (P_APU-P_sen-P_comp)*1000/2 # [W] APU power available per engine\n\n # Efficiencies powertrain\n n_circuit = 0.97\n n_gear = 0.9875 # Gear efficiency (torque loss -> power loss)\n amount_gears = 2\n n_emotor = 0.95 # Electricmotor efficiency (electrical loss - power loss)\n\n # Airplane characteristics\n w_rad_air = 1.27/2 # [m] wheel radius aircraft MLG wheels\n m_plane = 97400 # [kg] MRW\n weight_ratio = 0.952 # [-] Landing gear weight distribution ratio\n Roll_fric = 0.02 # [-] Rolling friction coefficient of airplane wheels\n\n # Engine output torque for available power at different RPM calculation\n P_av_e_out = n_circuit*n_emotor*P_av_e # [W] engine output power\n T_egts_w_em = np.array([500]) # [Nm] engine output torque\n\n v_slow = np.arange(0, 8.1, 0.1) # [kts] Velocity range\n v_slow = v_slow*0.514444 # to m/s\n w_slow = v_slow/w_rad_air # [rad/s] corresponding rotational speed wheels\n w_slow_eng = w_slow*GR # [rad/s] corresponding rotational speed engine\n for i in range(1, len(w_slow_eng)):\n # Enough power hence full torque\n if P_av_e_out/w_slow_eng[i] > 500:\n T_egts_w_em = np.append(T_egts_w_em, [500])\n # in sufficient power hence less torque\n elif P_av_e_out/w_slow_eng[i] < 500 and P_av_e_out/w_slow_eng[i] > 0:\n T_egts_w_em = np.append(T_egts_w_em, [P_av_e_out/w_slow_eng[i]])\n # not enough power\n else:\n T_egts_w_em = np.add(T_egts_w_em, [0])\n\n # Torque en power @ wheels = engine * gear efficiency\n T_egts_w_r = n_gear**amount_gears*GR*T_egts_w_em # [W] wheel power\n F_egts_w = T_egts_w_r/w_rad_air # [Nm] engine output torque\n\n # Resultant acceleration calculation\n # Determining friction for resultant acceleration calculation\n N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG\n N_mlg_w = N_mlg/4 # [N] Normal force per MLG wheel\n N_nlg = m_plane*(1-weight_ratio)*9.81 # [N] Total normal force of car\n F_fric = Roll_fric*N_mlg + Roll_fric*N_nlg # [N] Total force req to move plane at acceleration\n\n # Resultant force\n F_acc = 2*F_egts_w-F_fric # [N]\n\n # Resultant acceleration\n a_acc_slow = F_acc/m_plane # [m/s2]\n # Cut-off insignificant accelerations\n v_slow = v_slow[np.where(a_acc_slow >= 0.005)]\n a_acc_slow = a_acc_slow[np.where(a_acc_slow >= 0.005)]\n\n # Determine time intervals for velocity intervals w corresponding acceleration profile\n time = np.array([0])\n for i in range(1, len(v_slow)):\n time = np.append(time, [v_slow[i]/a_acc_slow[i]])\n\n # Plot\n# gs = gridspec.GridSpec(2, 2) # Define figure layout\n# fig = plt.figure(\"EGTS Only Performance\")\n# fig.suptitle(\" EGTS Only Performance \\n Pushback\")\n#\n# # Pushback velocity\n# ax1 = fig.add_subplot(gs[0, 0])\n# ax1.set_title(\"Velocity\")\n# ax1.set_xlabel(\"Time [s]\")\n# ax1.set_ylabel(\"Velocity [m/s]\")\n# ax1.plot(time[0:31], v_slow[0:31], color='g')\n# ax1.set_yticks([0, 0.5, 1, 1.5])\n# ax = ax1.twinx()\n# ax.plot(time[0:31], v_slow[0:31], color='g')\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3'])\n# # Pushback Acceleration graphs\n# ax2 = fig.add_subplot(gs[0, 1])\n# ax2.set_title(\"Acceleration\")\n# ax2.set_xlabel(\"Time [s]\")\n# ax2.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax2.set_ylim(0, max(a_acc_slow)+0.2)\n# ax2.plot(time[0:31], a_acc_slow[0:31], color='r')\n#\n# # Slow taxi title\n# ax0 = fig.add_subplot(gs[1, :])\n# ax0.axis('off')\n# ax0.set_title(\"Slow Taxi\", pad=20)\n# # Slow taxi\n# ax3 = fig.add_subplot(gs[1, 0])\n# ax3.set_title(\"Velocity\")\n# ax3.set_xlabel(\"Time [s]\")\n# ax3.set_ylabel(\"Velocity [m/s]\")\n# ax3.plot(time, v_slow, color='g')\n# ax3.plot(time, [2.88 for i in time], color='gray', linestyle='--')\n# ax3.set_yticks([0, 0.5, 1, 1.5, 2, 2.5, 3])\n# ax = ax3.twinx()\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144, 4*0.5144, 5*0.5144, 6*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3', '4', '5', '6'])\n# # Pushback Acceleration graphs\n# ax4 = fig.add_subplot(gs[1, 1])\n# ax4.set_title(\"Acceleration\")\n# ax4.set_xlabel(\"Time [s]\")\n# ax4.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax4.set_ylim(0, max(a_acc_slow)+0.2)\n# ax4.plot(time, a_acc_slow, color='r')\n\n # Plot & Save\n# fig.tight_layout()\n# fig.subplots_adjust(top=0.88)\n# fig.savefig('EGTS_Only_Perf', bbox_inches='tight')\n #plt.show()\n return a_acc_slow, F_acc, v_slow, time",
"def _valid_frequency(self) -> None:\n\n lower_granularity = [\"T\", \"S\", \"L\", \"U\", \"N\"]\n if self.freq is None:\n self.freq = self.data.infer_freq_robust()\n if self.freq == \"H\" or (\n isinstance(self.freq, pd.Timedelta) and self.freq.value == 3600000000000\n ):\n msg = \"Input data is hourly data.\"\n logging.info(msg)\n return\n if isinstance(self.freq, str):\n for level in lower_granularity:\n if isinstance(self.freq, str) and level in self.freq:\n msg = \"Input data granularity is {} and we can continue processing using aggregation function.\".format(\n self.freq\n )\n logging.info(msg)\n elif isinstance(self.freq, pd.Timedelta) and self.freq.value < 3600000000000:\n pass\n else:\n msg = \"Time series should be of hourly or finer granularity.\"\n logging.error(msg)\n raise ValueError(msg)\n\n if self.aggregate is None:\n msg = \"Aggregation method is missing.\"\n logging.error(msg)\n raise ValueError(msg)\n elif self.aggregate in [\"min\", \"max\", \"sum\", \"mean\"]:\n msg = \"Aggregation method is {}.\".format(self.aggregate)\n logging.info(msg)\n return\n else:\n msg = \"Aggregation methd {} is not implemented.\".format(self.aggregate)\n logging.error(msg)\n raise ValueError(msg)",
"def verify_thruster_flags(self):\n THRUSTER_FIRING_FLAG = 1048576\n if 'CAMPAIGN' in self.tpf[0].header and self.tpf[0].header['CAMPAIGN'] > 2:\n thruster_firings = ((self.tpf[1].data['QUALITY'] & THRUSTER_FIRING_FLAG) > 0).sum()\n campaign_length = self.tpf[1].header['TELAPSE'] # days\n # Expect at least one thruster firing per day\n assert thruster_firings > campaign_length",
"def _verifyDataMakeSense(self):\n if (self.fermenting_from_timestamp > self.fermenting_to_timestamp) | (self.conditioning_from_timestamp > self.conditioning_to_timestamp) | (self.dryhopping_from_timestamp > self.dryhopping_to_timestamp): \n raise BeerException('A \"start\" date is after its matching \"end\" date')\n if (self.fermenting_to_timestamp > self.conditioning_from_timestamp) & (self.conditioning_from_timestamp > SECONDS_IN_ONE_YEAR):\n raise BeerException('Fermentation date is after the conditioning date') \n if (self.fermenting_to_timestamp > self.dryhopping_from_timestamp) & (self.dryhopping_from_timestamp > SECONDS_IN_ONE_YEAR):\n raise BeerException('Fermentation date is after the dry-hopping date') \n if (self.dryhopping_to_timestamp > self.conditioning_from_timestamp) & (self.conditioning_from_timestamp > SECONDS_IN_ONE_YEAR):\n raise BeerException('Dry-hopping date is after the conditioning date')\n if hasattr(self, 'rating'):\n if (self.rating < 0) | (self.rating > 10):\n raise BeerException('Rating must be between 0 and 10')",
"def checkConditions(self, condFuncs):\n self.u.updateSensorVals()\n \n \n cnd = {\"df\": self.u.lastDistF, \"db\": self.u.lastDistB}\n \n if len(condFuncs) == 0:\n return False\n \n for c in condFuncs:\n if c(cnd):\n return True\n return False",
"def data_checks():\n for func in [read_adult, read_bank, read_compas, read_german, read_sqf,\n read_synthetic]:\n xtr, xte, ytr, yte, ztr, zte = func()\n\n if np.any(xtr[:, 0] != 1.) or np.any(xte[:, 0] != 1.):\n print(\"WARNING: intercept issue in {}\".format(func.__name__))\n if np.any((ytr != 1) & (ytr != 0)) or np.any((yte != 1) & (yte != 0)):\n print(\"WARNING: label issue in {}\".format(func.__name__))\n if np.any(np.std(xtr[:, 1:], 0) == 0) or np.any(np.std(xte[:, 1:], 0) == 0):\n print(\"WARNING: constant column in X {}\".format(func.__name__))\n if np.any(np.std(ztr, 0) == 0) or np.any(np.std(zte, 0) == 0):\n print(\"WARNING: constant column in Z {}\".format(func.__name__))\n if np.std(ytr) == 0 or np.std(yte) == 0:\n print(\"WARNING: constant column in y {}\".format(func.__name__))\n\n print(\"Done running checks.\")",
"def check_rules(trace_events, event_definitions):\n print(\"\\n- Checking that rules are applied...\")\n report = []\n for event_type, rule in event_definitions.items():\n print(\"\\t- Checking rule: %d %s\" % (event_type, str(rule)))\n is_undefined = False\n undefined_appearances = None\n filtered_trace_events = __filter_event_type__(trace_events, event_type)\n if EVENT_LABEL in rule:\n event_ok = True\n accumulated_events = __accumulate_events__(\n filtered_trace_events, rule[EVENT_LABEL]\n )\n for event, appearances in accumulated_events.items():\n if rule[EVENT_LABEL][event] == -1:\n is_undefined = True\n undefined_appearances = appearances\n elif isinstance(rule[EVENT_LABEL][event], list):\n if appearances not in rule[EVENT_LABEL][event]:\n report.append(\n \"ERROR: Unexpected type %d event %d appearances found: %d (Expected %d)\"\n % (\n event_type,\n event,\n appearances,\n str(rule[EVENT_LABEL][event]),\n )\n )\n event_ok = False\n elif appearances != rule[EVENT_LABEL][event]:\n report.append(\n \"ERROR: Unexpected type %d event %d appearances found: %d (Expected %d)\"\n % (event_type, event, appearances, rule[EVENT_LABEL][event])\n )\n event_ok = False\n else:\n pass # ok\n if is_undefined:\n print(\n \"\\t\\t- UNDEFINED appearances (%s) %d\"\n % (EVENT_LABEL, undefined_appearances)\n )\n elif event_ok:\n print(\"\\t\\t- OK appearances (%s)\" % EVENT_LABEL)\n else:\n print(\"\\t\\t- ERROR appearances (%s)\" % (EVENT_LABEL))\n # TODO: Check amount of zeros - some of the runtime 0 events are emitted as 0 (constraints)\n # if RANGE_LABEL not in rule:\n # expected_zeros = accumulated_events[0]\n # amount_zeros = 0\n # for event, appearances in accumulated_events.items():\n # if event != 0:\n # amount_zeros += appearances\n # if expected_zeros != amount_zeros:\n # report.append(\"ERROR: Unexpected amount of zeros in type %d found %d (Expected %d)\" % (event_type, amount_zeros, expected_zeros))\n\n is_undefined_range = False\n undefined_appearances_range = None\n if RANGE_LABEL in rule:\n range_ok = True\n accumulated_range = __accumulate_range__(filtered_trace_events)\n expected_amount = rule[RANGE_LABEL][2]\n found_appearances = len(accumulated_range)\n if expected_amount == -1:\n is_undefined_range = True\n undefined_appearances_range = found_appearances\n elif isinstance(expected_amount, list):\n if found_appearances not in expected_amount:\n report.append(\n \"ERROR: Unexpected event range of type %s found: %s (Expected %s)\"\n % (str(event_type), str(found_appearances), str(expected_amount))\n )\n range_ok = False\n elif found_appearances != expected_amount:\n report.append(\n \"ERROR: Unexpected event range of type %s found: %s (Expected %s)\"\n % (str(event_type), str(found_appearances), str(expected_amount))\n )\n range_ok = False\n if is_undefined_range:\n print(\n \"\\t\\t- UNDEFINED appearances (%s) %s\"\n % (RANGE_LABEL, str(undefined_appearances_range))\n )\n elif range_ok:\n print(\"\\t\\t- OK appearances (%s)\" % RANGE_LABEL)\n else:\n print(\"\\t\\t- ERROR appearances (%s)\" % (RANGE_LABEL))\n\n # if event 0 is undefined and range is undefined, check that they match\n if is_undefined and is_undefined_range:\n if undefined_appearances == undefined_appearances_range:\n print(\"\\t\\t- OK UNDEFINED appearances match\")\n else:\n print(\"\\t\\t- ERROR UNDEFINED appearances do not match\")\n elif is_undefined and not is_undefined_range:\n print(\"ERROR: undefined event appearances and not associated range\")\n elif not is_undefined and is_undefined_range:\n print(\"ERROR: undefined event range appearances and not associated event 0\")\n else:\n pass\n return report",
"def get_ts_energy(common_relax_workchain):\n if not isinstance(common_relax_workchain, WorkChainNode):\n return ValueError('The input is not a workchain (instance of `WorkChainNode`)')\n if common_relax_workchain.process_class != CastepCommonRelaxWorkChain:\n return ValueError('The input workchain is not a `CastepCommonRelaxWorkChain`')\n\n castep_base_wc = common_relax_workchain.get_outgoing(link_type=LinkType.CALL_WORK).one().node\n e_ks = castep_base_wc.outputs.output_parameters['total energy']\n free_e = castep_base_wc.outputs.output_parameters['free energy']\n\n ts = e_ks - free_e #pylint: disable=invalid-name\n\n return ts",
"def tick_timers(self):\r\n\r\n if self.cooldown_timer < self.cooldown_ticks:\r\n self.cooldown_timer += 1\r\n return\r\n\r\n if self.cast_timer < self.cast_time_ticks:\r\n self.cast_timer += 1\r\n return",
"def check(self, Fp_Ed, report):\n a = Q(D(1000), 'mm') # 1 meter as standard (distance btw stiffeners)\n alpha = a / self.pro_hw\n if alpha < Q(D(np.sqrt(2))):\n I_st_min = (D(1.5) * self.pro_hw ** 3 * self.pro_tw ** 3) / a ** 2\n else:\n I_st_min = D(0.75) * self.pro_hw ** 3 * self.pro_tw ** 3 / a ** 2\n epsilon = np.sqrt(Q(D(275), 'MPa') / self.fy)\n I_st = D(1 / 12) * self.pro_tw ** 3 * 2 * 15 * epsilon * self.pro_tw +\\\n D(1 / 12) * self.thickness * (2 * self.width + self.pro_tw) ** 3\n\n desc_string = \"CHECKING STIFFENER\"\n report.addLine(101, 30, desc_string, \"\")\n\n if I_st > I_st_min:\n desc_string = \"Ultrastiff | OK\"\n calc_string = \"I_st = {} > I_st_min = {}\".format(I_st, I_st_min)\n report.addLine(200, 30, desc_string, calc_string)\n return True\n else:\n desc_string = \"Ultrastiff | FAIL\"\n calc_string = \"I_st = {} < I_st_min = {}\".format(I_st, I_st_min)\n report.addLine(500, 30, desc_string, calc_string)\n return False",
"def _check_label_times(self):\n self._check_target_columns()\n self._check_target_types()",
"def TT_proc(self):\n self.TT_wait_for_rdy()#Wait until the time tagger has finished tagging or has timed out\n ftt0 = self.TT_DELAY_DATA.read(ch2_data)\n stimet0 = (ftt0) * FTIME #Calculate the fine time offset of the t0 signal\n plog.debug(\"T0FT: \"+bin(ftt0))\n dels = self.TT_DELAY_DATA.read(ch1_data)#Fine times for channels 0-3 (Each is concatenated in binary)\n #Calculating the fine time offsets for each channel\n stimet1 = ((dels&0xFF))*FTIME\n stimet2 = ((dels&0xFF00)>>8)*FTIME\n stimet3 = ((dels&0xFF0000) >> 16)*FTIME\n stimet4 = ((dels&0xFF000000)>>24)*FTIME\n plog.debug(\"T1FT: \"+bin((dels&0xFF)))\n #Include fine time offsets with the coarse times\n cttime1 = self.TT_DATA0.read(ch1_data)\n ctime1 = cttime1/DET_REF_CLK - stimet1 + stimet0\n ctime2 = self.TT_DATA0.read(ch2_data)/DET_REF_CLK - stimet2 + stimet0\n ctime3 = self.TT_DATA1.read(ch1_data)/DET_REF_CLK - stimet3 + stimet0\n ctime4 = self.TT_DATA1.read(ch2_data)/DET_REF_CLK - stimet4 + stimet0\n plog.debug(\"T1CT: \"+str(cttime1))\n timeouts = (self.TT_UTIL.read(ch1_data))#Read time outs\n #Store all information in dictionary and return it\n outdict = {\"T1\": ctime1, \"T2\": ctime2, \"T3\": ctime3, \"T4\": ctime4, \"T1s\": timeouts&0b1, \"T2s\": (timeouts&0b10)>>1,\"T3s\": (timeouts&0b100)>>2,\"T4s\": (timeouts&0b1000)>>3}\n return outdict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check whether the TS species passes all checks other than ones specified in ``exemptions``.
|
def ts_passed_all_checks(species: 'ARCSpecies',
exemptions: Optional[List[str]] = None,
verbose: bool = False,
) -> bool:
exemptions = exemptions or list()
for check, value in species.ts_checks.items():
if check not in exemptions and not value and not (check == 'e_elect' and species.ts_checks['E0']):
if verbose:
logger.warning(f'TS {species.label} did not pass the all checks, status is:\n{species.ts_checks}')
return False
return True
|
[
"def test_chk_species_fail(self):\n pass",
"def check_exon(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n for char in str(row['Exon']):\n if char in specials:\n check += 1\n error = \"Special character found in column 'Exon', see row %s in file\" % (row_index + 4)\n error_details.append(error)",
"def is_opt_extant_species(value: Any) -> bool:\n return (value == NoSpecies) or Species.is_extant_species(value)",
"def test_check_ess_settings(self):\n server_names = servers.keys()\n ess_settings1 = {'gaussian': [server_names[0]], 'molpro': [server_names[1], server_names[0]],\n 'qchem': [server_names[0]]}\n ess_settings2 = {'gaussian': server_names[0], 'molpro': server_names[1], 'qchem': server_names[0]}\n ess_settings3 = {'gaussian': server_names[0], 'molpro': [server_names[1], server_names[0]],\n 'qchem': server_names[0]}\n ess_settings4 = {'gaussian': server_names[0], 'molpro': server_names[1], 'qchem': server_names[0]}\n ess_settings5 = {'gaussian': 'local', 'molpro': server_names[1], 'qchem': server_names[0]}\n\n ess_settings1 = common.check_ess_settings(ess_settings1)\n ess_settings2 = common.check_ess_settings(ess_settings2)\n ess_settings3 = common.check_ess_settings(ess_settings3)\n ess_settings4 = common.check_ess_settings(ess_settings4)\n ess_settings5 = common.check_ess_settings(ess_settings5)\n\n ess_list = [ess_settings1, ess_settings2, ess_settings3, ess_settings4, ess_settings5]\n\n for ess in ess_list:\n for soft, server_list in ess.items():\n self.assertTrue(soft in ['gaussian', 'molpro', 'qchem'])\n self.assertIsInstance(server_list, list)\n\n with self.assertRaises(SettingsError):\n ess_settings6 = {'nosoft': ['server1']}\n common.check_ess_settings(ess_settings6)\n with self.assertRaises(SettingsError):\n ess_settings7 = {'gaussian': ['noserver']}\n common.check_ess_settings(ess_settings7)",
"def is_extant_species(value: Any) -> bool:\n return isinstance(value, Species) and cast(Species, value).is_extant",
"def test_has_ec(self):\n test1 = \"hypothetical protein\"\n test2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n test3 = \"Sigma-X negative effector (EC 3 and more)\"\n test4 = \"Hypothetical protein TEPIRE1_21570 (predicted: PeP phosphonomutase (predicted EC 2.7.8.23) (predicted EC 4.1.3.30))\"\n test5 = \"Glutaminase (EC 3.5.-.-)\"\n test6 = \"Histidinol-phosphatase (EC 3.1.3.-)\"\n test7 = \" (predicted EC 2.3.1.1)\"\n test8 = \"hypothetical protein (predicted: MULTISPECIES: GNAT family N-acetyltransferase [Geobacillus] (predicted EC 2.3.1.1))\"\n test9 = \"Aminodeoxychorismate lyase, EC 4.1.3.38\"\n test10 = \"Aminodeoxychorismate lyase, EC: 4.1.3.38\"\n test11 = \"Aminodeoxychorismate lyase, EC: -.-.-.-\"\n test12 = \"Histidinol-phosphatase (EC -.1.3.1)\"\n test13 = \"DNA polymerase IV (Web Scaped EC 2.7.7.7)\"\n \n self.assertFalse(Annot_Reader.has_ec(test1))\n self.assertTrue(Annot_Reader.has_ec(test2))\n self.assertFalse(Annot_Reader.has_ec(test3))\n self.assertTrue(Annot_Reader.has_ec(test4))\n self.assertTrue(Annot_Reader.has_ec(test5))\n self.assertTrue(Annot_Reader.has_ec(test6))\n self.assertTrue(Annot_Reader.has_ec(test7))\n self.assertTrue(Annot_Reader.has_ec(test8))\n self.assertTrue(Annot_Reader.has_ec(test9))\n self.assertTrue(Annot_Reader.has_ec(test10))\n self.assertFalse(Annot_Reader.has_ec(test11))\n self.assertFalse(Annot_Reader.has_ec(test12))\n self.assertTrue(Annot_Reader.has_ec(test13))\n # Run on test genome annotation\n orig = currentdir + '\\\\test_files\\\\' \"test_genome_annotation.xlsx\"\n cpy = currentdir + '\\\\test_files\\\\' \"test_genome_annotation_cpy.xlsx\"\n self.assertTrue(os.path.isfile(orig))\n if os.path.isfile(cpy):\n os.remove(cpy)\n self.assertFalse(os.path.isfile(cpy))\n # Now copy the file\n shutil.copyfile(orig, cpy)\n self.assertTrue(os.path.isfile(cpy))\n email = None\n min_pct_idnt = 97.0\n min_qry_cvr = 95.0\n max_blast_hits = 10\n max_uniprot_hits = 50\n args = {\n '--src' : orig,\n '--dest' : cpy,\n '--sheet': 0,\n '--visible' : False,\n '--keywords' : None,\n '--load_job' : None,\n '--email' : email, \n '--min_pct_idnt' : min_pct_idnt,\n '--min_qry_cvr' : min_qry_cvr,\n '--max_blast_hits' : max_blast_hits,\n '--max_uniprot_hits' : max_uniprot_hits,\n }\n reader = Annot_Reader(args)\n reader.autosave_filename = 'test_autosave.txt'\n self.assertTrue(reader.has_ec(reader.read(0, 'function')))\n self.assertTrue(reader.has_ec(reader.read(1, 'function')))\n self.assertFalse(reader.has_ec(reader.read(2, 'function')))\n self.assertTrue(reader.has_ec(reader.read(3, 'function')))\n self.assertTrue(reader.has_ec(reader.read(4, 'function')))\n self.assertTrue(reader.has_ec(reader.read(5, 'function')))\n self.assertTrue(reader.has_ec(reader.read(6, 'function')))\n self.assertTrue(reader.has_ec(reader.read(7, 'function')))\n self.assertTrue(reader.has_ec(reader.read(8, 'function')))\n self.assertTrue(reader.has_ec(reader.read(9, 'function')))\n self.assertFalse(reader.has_ec(reader.read(10, 'function')))\n self.assertFalse(reader.has_ec(reader.read(11, 'function')))\n self.assertFalse(reader.has_ec(reader.read(12, 'function')))\n self.assertFalse(reader.has_ec(reader.read(13, 'function')))\n self.assertFalse(reader.has_ec(reader.read(14, 'function')))\n self.assertTrue(reader.has_ec(reader.read(15, 'function')))\n self.assertTrue(reader.has_ec(reader.read(16, 'function')))\n self.assertTrue(reader.has_ec(reader.read(17, 'function')))\n self.assertTrue(reader.has_ec(reader.read(18, 'function')))",
"def check_gene(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n for char in row['Gene']:\n if char in specials:\n check += 1\n error = \"Special character found in column 'Gene', see row %s in file\" % (row_index + 4)\n error_details.append(error)",
"def check(self, partnames):\n\n for i in partnames:\n if i not in shader_part:\n return False\n\n return True",
"def test_cems_selection():\n cems_selection = pudl.etl.create_non_cems_selection(pudl.etl.default_assets)\n assert AssetKey(\"hourly_emissions_epacems\") not in cems_selection.resolve(\n pudl.etl.default_assets\n ), \"hourly_emissions_epacems or downstream asset present in selection.\"",
"def data_checks():\n for func in [read_adult, read_bank, read_compas, read_german, read_sqf,\n read_synthetic]:\n xtr, xte, ytr, yte, ztr, zte = func()\n\n if np.any(xtr[:, 0] != 1.) or np.any(xte[:, 0] != 1.):\n print(\"WARNING: intercept issue in {}\".format(func.__name__))\n if np.any((ytr != 1) & (ytr != 0)) or np.any((yte != 1) & (yte != 0)):\n print(\"WARNING: label issue in {}\".format(func.__name__))\n if np.any(np.std(xtr[:, 1:], 0) == 0) or np.any(np.std(xte[:, 1:], 0) == 0):\n print(\"WARNING: constant column in X {}\".format(func.__name__))\n if np.any(np.std(ztr, 0) == 0) or np.any(np.std(zte, 0) == 0):\n print(\"WARNING: constant column in Z {}\".format(func.__name__))\n if np.std(ytr) == 0 or np.std(yte) == 0:\n print(\"WARNING: constant column in y {}\".format(func.__name__))\n\n print(\"Done running checks.\")",
"def check_inputs(args):\n check_fail = False\n check_fail = check_sample(args.base, args.bSample)\n check_fail = check_sample(args.comp, args.cSample)\n return check_fail",
"def test_multiple(self):\n for cationstring in [\"MgSiO2\",\"MgSO4\", \"CaCO3\",\n \"Na2Mg3Al2Si8O22(OH)2\",]:\n with self.subTest(cationstring=cationstring):\n self.assertTrue(len(get_cations(cationstring))>1)",
"def has_hungry_veg(self):\n return all(self.verify_species(i, Species.is_hungry) for i in range(len(self.species_list)))",
"def check(self):\n inrange = easydev.check_range\n inlist = easydev.check_param_in_list\n # check validity of the settings\n inlist(self.include_MSI_factor, [False, True], \"MSI\")\n inrange(self.feature_factor_threshold, 0, np.inf)\n inrange(self.MSI_factor_threshold, 0, np.inf)\n\n # all those methods are from statsmodels.stats.multitest.multipletests\n inlist(\n self.pvalue_correction_method,\n [\n \"bonferroni\",\n \"sidak\",\n \"holm-sidak\",\n \"simes-hochberg\",\n \"hommel\",\n \"fdr_bh\",\n \"fdr_tsbj\",\n \"fdr_tskby\",\n \"fdr\",\n ],\n \"pvalue correction method\",\n )\n inlist(self.equal_var_ttest, [True, False], \"equal_var_ttest\")\n inrange(self.minimum_nonna_ic50, 0, np.inf)\n inrange(self.FDR_threshold, 0, 100)\n inrange(self.pvalue_threshold, 0, np.inf)\n inrange(self.effect_threshold, 0, np.inf)\n\n # for now, if MSI is False, this cannot be a PANCAN analysis\n # but a cancer specific analysis\n if self.include_MSI_factor is False:\n assert self.analysis_type != \"PANCAN\", (\n \"If MSI factor is not included, the analysis must be cancer\"\n + \" specific (i.e., a tissue must be set.\"\n )\n\n valid_reg_meth = [\"OLS\", \"ElasticNet\", \"Lasso\", \"Ridge\"]\n inlist(self.regression_method, valid_reg_meth)\n\n inlist(self.pvalue_correction_level, [True, False])",
"def cfcheck(**das):\n return True",
"def check_consistent(filtered_mutants):\r\n # if any mutant test gets filtered to 0 elements,\r\n # all of these test samples were identified in normal tests\r\n # this can only happen with an erroneous NORMAL or MUTANT genotype call.\r\n for test_set in filtered_mutants:\r\n if len(test_set) == 0:\r\n # return False flag for failure\r\n return False\r\n\r\n # return true if check passes\r\n return True",
"def is_exceptional(args):\n # Exception paradigms cannot be encoded using this function\n if \"template_name\" not in args:\n print(\"is_exceptional:\", args)\n assert \"template_name\" in args\n name = args[\"template_name\"]\n return name in (\"fi-decl\", \"fi-decl-pron\", \"fi-conj\", \"fi-conj-table\")",
"def test_exergy_analysis(self):\n # carry out exergy analysis\n ean = ExergyAnalysis(\n self.nw, E_P=[self.nw.busses['total output power']],\n E_F=[self.nw.busses['heat input']],\n E_L=[self.nw.busses['exergy loss']])\n ean.analyse(pamb=self.pamb, Tamb=self.Tamb)\n\n # generate Grassmann diagram\n links, nodes = ean.generate_plotly_sankey_input(disaggregate_flows=True)\n\n # check if exergy product value in links is equal to total power\n # output\n position = links['target'].index(nodes.index('E_P'))\n power_links = round(links['value'][position], 0)\n power_bus = round(-self.nw.busses['total output power'].P.val, 0)\n msg = (\n 'The exergy product value in the links (' + str(power_links) +\n ') must be equal to the power on the respective bus (' +\n str(power_bus) + ').')\n assert power_links == power_bus, msg",
"def check_sex(records):\n return [check_sex_record(record) for record in records]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the TS electronic energy is above both reactant and product wells in a ``reaction``. Sets the respective energy parameter 'e_elect' in the ``TS.ts_checks`` dictionary.
|
def check_ts_energy(reaction: 'ARCReaction',
verbose: bool = True,
) -> None:
# Check whether E0 values are already known, e.g. from Arkane species YAML files
check_rxn_e0(reaction=reaction)
if reaction.ts_species.ts_checks['E0']:
return
r_e_elect = None if any([spc.e_elect is None for spc in reaction.r_species]) \
else sum(spc.e_elect * reaction.get_species_count(species=spc, well=0) for spc in reaction.r_species)
p_e_elect = None if any([spc.e_elect is None for spc in reaction.p_species]) \
else sum(spc.e_elect * reaction.get_species_count(species=spc, well=1) for spc in reaction.p_species)
ts_e_elect = reaction.ts_species.e_elect
if verbose and all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):
min_e = extremum_list([r_e_elect, p_e_elect, ts_e_elect], return_min=True)
r_text = f'{r_e_elect - min_e:.2f} kJ/mol' if r_e_elect is not None else 'None'
ts_text = f'{ts_e_elect - min_e:.2f} kJ/mol' if ts_e_elect is not None else 'None'
p_text = f'{p_e_elect - min_e:.2f} kJ/mol' if p_e_elect is not None else 'None'
logger.info(
f'\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '
f'has the following path electronic energy:\n'
f'Reactants: {r_text}\n'
f'TS: {ts_text}\n'
f'Products: {p_text}')
if all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):
# We have all params, we can make a quantitative decision.
if ts_e_elect > r_e_elect + 1.0 and ts_e_elect > p_e_elect + 1.0:
# TS is above both wells.
reaction.ts_species.ts_checks['e_elect'] = True
return
# TS is not above both wells.
if verbose:
logger.error(f'TS of reaction {reaction.label} has a lower electronic energy value than expected.')
reaction.ts_species.ts_checks['e_elect'] = False
return
# We don't have any params (some are ``None``)
if verbose:
logger.info('\n')
logger.warning(f"Could not get electronic energy for all species in reaction {reaction.label}.\n")
# We don't really know.
reaction.ts_species.ts_checks['e_elect'] = None
if 'Could not determine TS e_elect relative to the wells; ' not in reaction.ts_species.ts_checks['warnings']:
reaction.ts_species.ts_checks['warnings'] += 'Could not determine TS e_elect relative to the wells; '
|
[
"def check_rxn_e0(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> Optional[bool]:\n if reaction.ts_species.ts_checks['E0']:\n return True\n r_e0 = sum_list_entries([r.e0 for r in reaction.r_species],\n multipliers=[reaction.get_species_count(species=r, well=0) for r in reaction.r_species])\n p_e0 = sum_list_entries([p.e0 for p in reaction.p_species],\n multipliers=[reaction.get_species_count(species=p, well=1) for p in reaction.p_species])\n ts_e0 = reaction.ts_species.e0\n\n if verbose and all([val is not None for val in [r_e0, p_e0, ts_e0]]):\n min_e0 = extremum_list([r_e0, p_e0, ts_e0], return_min=True)\n r_text = f'{r_e0 - min_e0:.2f} kJ/mol' if r_e0 is not None else 'None'\n ts_text = f'{ts_e0 - min_e0:.2f} kJ/mol' if ts_e0 is not None else 'None'\n p_text = f'{p_e0 - min_e0:.2f} kJ/mol' if p_e0 is not None else 'None'\n logger.info(\n f'\\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '\n f'has the following path E0 values:\\n'\n f'Reactants: {r_text}\\n'\n f'TS: {ts_text}\\n'\n f'Products: {p_text}')\n if any(e0 is None for e0 in [r_e0, p_e0, ts_e0]):\n return None\n if r_e0 >= ts_e0 or p_e0 >= ts_e0:\n reaction.ts_species.ts_checks['E0'] = False\n return False\n reaction.ts_species.ts_checks['E0'] = True\n return True",
"def check_ts(reaction: 'ARCReaction',\n verbose: bool = True,\n job: Optional['JobAdapter'] = None,\n checks: Optional[List[str]] = None,\n rxn_zone_atom_indices: Optional[List[int]] = None,\n ):\n checks = checks or list()\n for entry in checks:\n if entry not in ['energy', 'freq', 'IRC', 'rotors']:\n raise ValueError(f\"Requested checks could be 'energy', 'freq', 'IRC', or 'rotors', got:\\n{checks}\")\n\n if 'energy' in checks or not reaction.ts_species.ts_checks['e_elect']:\n check_ts_energy(reaction=reaction, verbose=verbose)\n\n if 'freq' in checks or (not reaction.ts_species.ts_checks['normal_mode_displacement'] and job is not None):\n check_normal_mode_displacement(reaction, job=job)\n\n if 'rotors' in checks or (ts_passed_all_checks(species=reaction.ts_species, exemptions=['E0', 'warnings', 'IRC'])\n and job is not None):\n invalidate_rotors_with_both_pivots_in_a_reactive_zone(reaction, job,\n rxn_zone_atom_indices=rxn_zone_atom_indices)",
"def test_compare_energies(self):\n fluxes = np.array(self.spectrum[1])\n energies = events.assign_energies(1000, self.spectrum)\n energies = [int(energy) for energy in energies]\n\n # Histogram energies to get shape approximation\n gen_energies = ((np.array(energies) - 1) / 1).astype(int)\n\n lc = np.bincount(energies)\n\n # Remove first entry as it contains occurences of '0' element\n lc = lc[1:7]\n\n # Calculate probabilities and compare\n lc_prob = (lc/float(sum(lc)))\n fluxes_prob = fluxes/float(sum(fluxes))\n assert np.all(np.abs(lc_prob - fluxes_prob) < 3 * np.sqrt(fluxes_prob))",
"def ts_passed_all_checks(species: 'ARCSpecies',\n exemptions: Optional[List[str]] = None,\n verbose: bool = False,\n ) -> bool:\n exemptions = exemptions or list()\n for check, value in species.ts_checks.items():\n if check not in exemptions and not value and not (check == 'e_elect' and species.ts_checks['E0']):\n if verbose:\n logger.warning(f'TS {species.label} did not pass the all checks, status is:\\n{species.ts_checks}')\n return False\n return True",
"def test_exergy_analysis(self):\n # carry out exergy analysis\n ean = ExergyAnalysis(\n self.nw, E_P=[self.nw.busses['total output power']],\n E_F=[self.nw.busses['heat input']],\n E_L=[self.nw.busses['exergy loss']])\n ean.analyse(pamb=self.pamb, Tamb=self.Tamb)\n\n # generate Grassmann diagram\n links, nodes = ean.generate_plotly_sankey_input(disaggregate_flows=True)\n\n # check if exergy product value in links is equal to total power\n # output\n position = links['target'].index(nodes.index('E_P'))\n power_links = round(links['value'][position], 0)\n power_bus = round(-self.nw.busses['total output power'].P.val, 0)\n msg = (\n 'The exergy product value in the links (' + str(power_links) +\n ') must be equal to the power on the respective bus (' +\n str(power_bus) + ').')\n assert power_links == power_bus, msg",
"async def test_temp_change_heater_on_outside_tolerance(\n hass: HomeAssistant, setup_comp_2\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 30)\n _setup_sensor(hass, 27)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_ON\n assert call.data[\"entity_id\"] == ENT_SWITCH",
"def validate_heater_shaker_temperature(celsius: float) -> float:\n if HEATER_SHAKER_TEMPERATURE_MIN <= celsius <= HEATER_SHAKER_TEMPERATURE_MAX:\n return celsius\n else:\n raise InvalidTargetTemperatureError(\n f\"Cannot set Heater-Shaker to {celsius} °C.\"\n f\" Valid range is {HEATER_SHAKER_TEMPERATURE_MIN}-\"\n f\"{HEATER_SHAKER_TEMPERATURE_MAX} °C.\"\n )",
"def energyRequirement(world, action):",
"def poe_power_test(self, **kwargs):\n # If product definition does not specify PoE (irrespective of any PoE equipment connections),\n # then do not run this step.\n if 'poe' not in self._ud.uut_config:\n log.warning(\"The 'poe' data dict is not defined per the product_definition.\")\n log.warning(\"This test will be disabled.\")\n return aplib.DISABLED\n\n poe = self._ud.uut_config.get('poe', {})\n log.debug(\"PoE UUT Config: {0}\".format(poe))\n if not poe:\n log.error(\"The 'poe' product definition entry is empty!\")\n return aplib.FAIL\n poe_ports = poe.get('uut_ports', None)\n\n # Now check PoE Loadbox driver\n if not self._equip.poe_loadbox:\n return handle_no_poe_equip()\n\n # Inputs\n poe_type = kwargs.get('poe_type', poe.get('type', None))\n poe_volt_range = kwargs.get('poe_volt_range', poe.get('volt_range', (47.0, 57.0)))\n poe_current_range = kwargs.get('poe_current_range', poe.get('current_range', (200, 1200)))\n powerholdtime = kwargs.get('powerholdtime', poe.get('powerholdtime', 60000))\n disconnecttimeout = kwargs.get('disconnecttimeout', poe.get('disconnecttimeout', 60000))\n poweronsetuptimeout = kwargs.get('poweronsetuptimeout', poe.get('poweronsetuptimeout', 30000))\n disconnecttype = kwargs.get('disconnecttype', poe.get('disconnecttype', 1))\n icutcode = kwargs.get('icutcode', poe.get('icutcode', None))\n\n if poe_type not in self._equip.poe_loadbox.LOAD_CLASSES.keys():\n log.error(\"PoE Type for testing is not recognized.\")\n return aplib.FAIL\n\n if not self._mode_mgr.is_mode('DIAG'):\n log.warning(\"Wrong mode ({0}) for this operation. Mode 'DIAG' is required.\".format(self._mode_mgr.current_mode))\n return aplib.FAIL\n uut_prompt = self._mode_mgr.uut_prompt_map['DIAG']\n\n # Phase 1: Cfg UUT and Get Power Budget\n # -------------------------------------\n aplib.set_container_text('PoE Power Test: UUT Cfg')\n log.info('STEP: PoE Power Test -- Phase1 UUT Cfg.')\n self._operate_poe_uut(action='CFG', poe_type=poe_type, poe_params=poe)\n if self._ud.uut_status.get('poe_pwr_budget_groups', 0) == 0:\n # Set power budget if not previously set.\n self._ud.uut_status['poe_pwr_budget_groups'] = self._operate_poe_uut(action='BUDGET', poe_type=poe_type, poe_ports=poe_ports)\n poe_pwr_budget_groups = self._ud.uut_status['poe_pwr_budget_groups']\n\n # Phase 2: Set PoE Equipment Cfg\n # ------------------------------\n aplib.set_container_text('PoE Power Test: Equip Cfg')\n log.info('STEP: PoE Power Test -- Phase2 Equip Cfg.')\n self._equip.poe_loadbox.uut_poe_type = poe_type\n self._equip.poe_loadbox.echo_msg(\"PoE Power Test (Volt Meas)\")\n self._equip.poe_loadbox.show_equipment()\n self._equip.poe_loadbox.reset()\n self._equip.poe_loadbox.set_power_load(ieee=True)\n self._equip.poe_loadbox.set_class(load_class=self._equip.poe_loadbox.LOAD_CLASSES[poe_type])\n self._equip.poe_loadbox.set_load_on()\n\n # Phase 3: Turn on PoE Power and Measure Voltage\n # ----------------------------------------------\n aplib.set_container_text('PoE Power Test: ON')\n log.info('STEP: PoE Power Test -- Phase3 ON.')\n results = []\n for group in range(1, poe_pwr_budget_groups + 1):\n active_poe_ports = self._build_pwr_budget_port_list(ports=poe_ports, group_index=group,\n poe_pwr_budget_groups=poe_pwr_budget_groups)\n title = \"PoE Port SubGroup {0}/{1}\".format(group, poe_pwr_budget_groups) if poe_pwr_budget_groups > 1 else \"PoE Port Group {0}\".format(group)\n log.info(\" \")\n log.info(title)\n log.info(\"-\" * len(title))\n log.debug(\"Active PoE Ports: {0}\".format(active_poe_ports))\n log.debug(\"1. PoE ON\")\n self._set_poe_on(poe_type=poe_type, active_poe_ports=active_poe_ports)\n log.debug(\"2. PoE Events\")\n self._operate_poe_uut(action='EVENTS', poe_type=poe_type, poe_ports=active_poe_ports)\n log.debug(\"3. PoE Measure Voltage at LoadBox\")\n results.append(self._measure_poe_volt_test(active_poe_ports=active_poe_ports, poe_volt_range=poe_volt_range))\n log.debug(\"4. PoE Measure UUT Power\")\n results.append(self._measure_uut_poe_power(poe_type=poe_type, active_poe_ports=active_poe_ports, poe_current_range=poe_current_range))\n log.debug(\"5. PoE OFF\")\n self._set_poe_off(poe_type=poe_type, active_poe_ports=active_poe_ports)\n\n if not all(results):\n log.warning(\"The PoePowerTest will NOT be run since the Power Measure Tests failed.\")\n self._equip.poe_loadbox.set_load_off()\n self._equip.poe_loadbox.disconnect()\n return aplib.FAIL\n\n # Phase 4: Run the Diag PoePowerTest\n # ----------------------------------\n log.info('STEP: PoE Power Test -- Phase4 Diag.')\n self._equip.poe_loadbox.echo_msg(\"PoE Power Test (Diag)\")\n testallports = 1 if poe_pwr_budget_groups == 1 else 0\n results = []\n for group in range(1, poe_pwr_budget_groups + 1):\n active_poe_ports = self._build_pwr_budget_port_list(ports=poe_ports, group_index=group,\n poe_pwr_budget_groups=poe_pwr_budget_groups)\n title = \"PoE Port SubGroup {0}/{1}\".format(group, poe_pwr_budget_groups) if poe_pwr_budget_groups > 1 else \"PoE Port Group {0}\".format(group)\n log.info(\" \")\n log.info(title)\n log.info(\"-\" * len(title))\n log.debug(\"Active PoE Ports: {0}\".format(active_poe_ports))\n active_poe_ports_list = active_poe_ports.split(',')\n port_count = len(active_poe_ports_list)\n poe_params = {'testallports': str(testallports), 'portgroupsize': port_count,\n 'portnum': active_poe_ports_list[0],\n 'disconnecttype': disconnecttype, 'disconnecttimeout': disconnecttimeout,\n 'poweronsetuptimeout': poweronsetuptimeout, 'powerholdtime': powerholdtime,\n 'icutcode': icutcode}\n if self._operate_poe_uut(action='CFG', poe_type=poe_type, poe_params=poe_params, poe_ports=active_poe_ports):\n ret = self._run_poe_diag_test(command='PoePowerTest')\n else:\n log.error(\"Cannot config UUT for PoE operation.\")\n ret = False\n results.append(ret)\n\n ret = all(results)\n\n # Phase 5: Disconnect PoE Equipment\n # ----------------------------------\n log.info('STEP: PoE Power Test -- Phase5 OFF.')\n aplib.set_container_text('PoE Power Test: OFF')\n self._equip.poe_loadbox.set_load_off()\n self._equip.poe_loadbox.disconnect()\n\n aplib.set_container_text('PoE Power Test')\n log.info('STEP: PoE Power Test = {0}'.format('PASSED.' if ret else 'FAILED.'))\n\n return aplib.PASS if ret else (aplib.FAIL, \"PoE Poer load is bad.\")",
"async def test_temp_change_ac_on_outside_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 25)\n _setup_sensor(hass, 30)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_ON\n assert call.data[\"entity_id\"] == ENT_SWITCH",
"async def test_temp_change_heater_trigger_on_long_enough(\n hass: HomeAssistant, setup_comp_6\n) -> None:\n fake_changed = datetime.datetime(1970, 11, 11, 11, 11, 11, tzinfo=dt_util.UTC)\n with freeze_time(fake_changed):\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 30)\n _setup_sensor(hass, 25)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_ON\n assert call.data[\"entity_id\"] == ENT_SWITCH",
"def checkUpgrades(self, towers):\n for t in towers:\n if self != t and t.type != 1:\n if self not in t.upgrades:\n distance = sqrt((t.center[0] - self.center[0]) *\n (t.center[0] - self.center[0]) +\n (t.center[1] - self.center[1]) *\n (t.center[1] - self.center[1]))\n if distance <= self.radius:\n t.damage *= self.power\n t.damage = round(t.damage, 3)\n t.upgrades.append(self)",
"def test_custom_electricity_mix():\n\n # Passing four mixes instead of 6\n mix_1 = np.zeros((4, 21))\n mix_1[:, 0] = 1\n # Passing a mix inferior to 1\n mix_2 = np.zeros((6, 21))\n mix_2[:, 0] = 0.9\n\n # Passing a mix superior to 1\n mix_3 = np.zeros((6, 21))\n mix_3[:, 0] = 1\n mix_3[:, 1] = 0.1\n\n mixes = [mix_1, mix_2, mix_3]\n\n for i, mix in enumerate(mixes):\n if i == 0:\n with pytest.raises(ValueError) as wrapped_error:\n ic = InventoryTruck(\n tm,\n method=\"recipe\",\n indicator=\"endpoint\",\n background_configuration={\"custom electricity mix\": mix},\n )\n assert wrapped_error.type == ValueError\n\n else:\n InventoryTruck(\n tm,\n method=\"recipe\",\n indicator=\"endpoint\",\n background_configuration={\"custom electricity mix\": mix},\n )",
"def check(self, Fp_Ed, report):\n a = Q(D(1000), 'mm') # 1 meter as standard (distance btw stiffeners)\n alpha = a / self.pro_hw\n if alpha < Q(D(np.sqrt(2))):\n I_st_min = (D(1.5) * self.pro_hw ** 3 * self.pro_tw ** 3) / a ** 2\n else:\n I_st_min = D(0.75) * self.pro_hw ** 3 * self.pro_tw ** 3 / a ** 2\n epsilon = np.sqrt(Q(D(275), 'MPa') / self.fy)\n I_st = D(1 / 12) * self.pro_tw ** 3 * 2 * 15 * epsilon * self.pro_tw +\\\n D(1 / 12) * self.thickness * (2 * self.width + self.pro_tw) ** 3\n\n desc_string = \"CHECKING STIFFENER\"\n report.addLine(101, 30, desc_string, \"\")\n\n if I_st > I_st_min:\n desc_string = \"Ultrastiff | OK\"\n calc_string = \"I_st = {} > I_st_min = {}\".format(I_st, I_st_min)\n report.addLine(200, 30, desc_string, calc_string)\n return True\n else:\n desc_string = \"Ultrastiff | FAIL\"\n calc_string = \"I_st = {} < I_st_min = {}\".format(I_st, I_st_min)\n report.addLine(500, 30, desc_string, calc_string)\n return False",
"async def test_temp_change_ac_off_within_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, True)\n await common.async_set_temperature(hass, 30)\n _setup_sensor(hass, 29.8)\n await hass.async_block_till_done()\n assert len(calls) == 0",
"def hypothesis_test_for_single_eer(roc, eer0, level, verbose = True):\n\n assert roc.estimated_eer and roc.bootstraped_eers, \"You must call get_confidence_interval before\"\n\n e = roc.bootstraped_eers - roc.estimated_eer + eer0\n\n\n p = (1 + np.sum(e<=roc.estimated_eer))/float(len(roc.bootstraped_eers)+1)\n better = p < level\n\n if verbose:\n print \"EER comparison\"\n print \"==============\"\n print \"Estimated EER: %0.6f\" % roc.estimated_eer\n print \"Comparison EER0: %0.6f\" % eer0\n print \" H0: EER=EER0\"\n print \" H1: EER<EER0\"\n print \"p-value: %0.6f\" % p\n if better:\n print \"Rejection of H0 => EER < EER0\"\n print \"EER is significantly less than %0.6f\" %eer0\n else:\n print \"No rejection of H0 => EER = EER0\"\n print\n\n return better",
"async def test_temp_change_ac_on_within_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 25)\n _setup_sensor(hass, 25.2)\n await hass.async_block_till_done()\n assert len(calls) == 0",
"async def test_temp_change_heater_off_outside_tolerance(\n hass: HomeAssistant, setup_comp_2\n) -> None:\n calls = _setup_switch(hass, True)\n await common.async_set_temperature(hass, 30)\n _setup_sensor(hass, 35)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_OFF\n assert call.data[\"entity_id\"] == ENT_SWITCH",
"def check_alarms(self):\r\n\r\n # Low level alarm\r\n # Check if the low level alarm is configured\r\n if self.min_level_alarm != None:\r\n if (self.current_level <= self.min_level_alarm):\r\n if (not self.level_alarm_low_triggered\r\n and self.level_growing == False):\r\n self.generate_alarm(LEVEL_ALARM, MINIMUM_LIMIT)\r\n self.level_alarm_low_triggered = True\r\n self.level_alarm_high_triggered = False\r\n else:\r\n self.level_alarm_low_triggered = False\r\n\r\n # High level alarm\r\n # Check if the high level alarm is configured\r\n if self.max_level_alarm != None:\r\n if(self.current_level >= self.max_level_alarm):\r\n if (not self.level_alarm_high_triggered\r\n and self.level_growing == True):\r\n self.generate_alarm(LEVEL_ALARM, MAXIMUM_LIMIT)\r\n self.level_alarm_high_triggered = True\r\n self.level_alarm_low_triggered = False\r\n else:\r\n self.level_alarm_high_triggered = False\r\n\r\n # Low temperature alarm\r\n # Check if the low temperature alarm is configured\r\n if self.min_temperature_alarm != None:\r\n if (self.current_temperature <= self.min_temperature_alarm):\r\n if (not self.temperature_alarm_low_triggered\r\n and self.temperature_growing == False):\r\n self.generate_alarm(TEMPERATURE_ALARM, MINIMUM_LIMIT)\r\n self.temperature_alarm_low_triggered = True\r\n self.temperature_alarm_high_triggered = False\r\n else:\r\n self.temperature_alarm_low_triggered = False\r\n\r\n # High temperature alarm\r\n # Check if the high temperature alarm is configured\r\n if self.max_temperature_alarm != None:\r\n if(self.current_temperature >= self.max_temperature_alarm):\r\n if (not self.temperature_alarm_high_triggered\r\n and self.temperature_growing == True):\r\n self.generate_alarm(TEMPERATURE_ALARM, MAXIMUM_LIMIT)\r\n self.temperature_alarm_high_triggered = True\r\n self.temperature_alarm_low_triggered = False\r\n else:\r\n self.temperature_alarm_high_triggered = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checking the E0 values between wells and a TS in a ``reaction`` using ZPE from statmech.
|
def compute_and_check_rxn_e0(reaction: 'ARCReaction',
species_dict: dict,
project_directory: str,
kinetics_adapter: str,
output: dict,
sp_level: 'Level',
freq_scale_factor: float = 1.0,
) -> Optional[bool]:
for spc in reaction.r_species + reaction.p_species + [reaction.ts_species]:
folder = 'rxns' if species_dict[spc.label].is_ts else 'Species'
freq_path = os.path.join(project_directory, 'output', folder, spc.label, 'geometry', 'freq.out')
if not spc.yml_path and not os.path.isfile(freq_path) and not species_dict[spc.label].is_monoatomic():
return None
considered_labels = list()
rxn_copy = reaction.copy()
for species in rxn_copy.r_species + rxn_copy.p_species + [rxn_copy.ts_species]:
if species.label in considered_labels or species.e0:
continue
considered_labels.append(species.label)
statmech_adapter = statmech_factory(statmech_adapter_label=kinetics_adapter,
output_directory=os.path.join(project_directory, 'output'),
output_dict=output,
bac_type=None,
sp_level=sp_level,
freq_scale_factor=freq_scale_factor,
species=species,
)
statmech_adapter.compute_thermo(kinetics_flag=True,
e0_only=True,
skip_rotors=True,
)
e0_pass = check_rxn_e0(reaction=rxn_copy)
if not e0_pass:
if rxn_copy.ts_species.ts_guesses_exhausted:
return False
return True # Switch TS.
return False # Don't switch TS.
|
[
"def check_rxn_e0(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> Optional[bool]:\n if reaction.ts_species.ts_checks['E0']:\n return True\n r_e0 = sum_list_entries([r.e0 for r in reaction.r_species],\n multipliers=[reaction.get_species_count(species=r, well=0) for r in reaction.r_species])\n p_e0 = sum_list_entries([p.e0 for p in reaction.p_species],\n multipliers=[reaction.get_species_count(species=p, well=1) for p in reaction.p_species])\n ts_e0 = reaction.ts_species.e0\n\n if verbose and all([val is not None for val in [r_e0, p_e0, ts_e0]]):\n min_e0 = extremum_list([r_e0, p_e0, ts_e0], return_min=True)\n r_text = f'{r_e0 - min_e0:.2f} kJ/mol' if r_e0 is not None else 'None'\n ts_text = f'{ts_e0 - min_e0:.2f} kJ/mol' if ts_e0 is not None else 'None'\n p_text = f'{p_e0 - min_e0:.2f} kJ/mol' if p_e0 is not None else 'None'\n logger.info(\n f'\\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '\n f'has the following path E0 values:\\n'\n f'Reactants: {r_text}\\n'\n f'TS: {ts_text}\\n'\n f'Products: {p_text}')\n if any(e0 is None for e0 in [r_e0, p_e0, ts_e0]):\n return None\n if r_e0 >= ts_e0 or p_e0 >= ts_e0:\n reaction.ts_species.ts_checks['E0'] = False\n return False\n reaction.ts_species.ts_checks['E0'] = True\n return True",
"def check_ts_energy(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> None:\n # Check whether E0 values are already known, e.g. from Arkane species YAML files\n check_rxn_e0(reaction=reaction)\n if reaction.ts_species.ts_checks['E0']:\n return\n\n r_e_elect = None if any([spc.e_elect is None for spc in reaction.r_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=0) for spc in reaction.r_species)\n p_e_elect = None if any([spc.e_elect is None for spc in reaction.p_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=1) for spc in reaction.p_species)\n ts_e_elect = reaction.ts_species.e_elect\n\n if verbose and all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n min_e = extremum_list([r_e_elect, p_e_elect, ts_e_elect], return_min=True)\n r_text = f'{r_e_elect - min_e:.2f} kJ/mol' if r_e_elect is not None else 'None'\n ts_text = f'{ts_e_elect - min_e:.2f} kJ/mol' if ts_e_elect is not None else 'None'\n p_text = f'{p_e_elect - min_e:.2f} kJ/mol' if p_e_elect is not None else 'None'\n logger.info(\n f'\\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '\n f'has the following path electronic energy:\\n'\n f'Reactants: {r_text}\\n'\n f'TS: {ts_text}\\n'\n f'Products: {p_text}')\n\n if all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n # We have all params, we can make a quantitative decision.\n if ts_e_elect > r_e_elect + 1.0 and ts_e_elect > p_e_elect + 1.0:\n # TS is above both wells.\n reaction.ts_species.ts_checks['e_elect'] = True\n return\n # TS is not above both wells.\n if verbose:\n logger.error(f'TS of reaction {reaction.label} has a lower electronic energy value than expected.')\n reaction.ts_species.ts_checks['e_elect'] = False\n return\n # We don't have any params (some are ``None``)\n if verbose:\n logger.info('\\n')\n logger.warning(f\"Could not get electronic energy for all species in reaction {reaction.label}.\\n\")\n # We don't really know.\n reaction.ts_species.ts_checks['e_elect'] = None\n if 'Could not determine TS e_elect relative to the wells; ' not in reaction.ts_species.ts_checks['warnings']:\n reaction.ts_species.ts_checks['warnings'] += 'Could not determine TS e_elect relative to the wells; '",
"def test_e0(self):\n self.assertAlmostEqual(self.thermodata.E0.value_si, self.E0, 6)",
"def test_temperature_0(self):\n self.assertAlmostEqual(self.singleExponentialDown.T0.value_si, self.T0, 4)",
"def GetEiT0(ws_name,EiGuess):\n\talg=GetEi(InputWorkspace=ws_name,Monitor1Spec=\"1\",Monitor2Spec=\"2\",EnergyEstimate=EiGuess)\t\t#Run GetEi algorithm\n\t[Ei,Tzero]=[float(alg.getPropertyValue(\"IncidentEnergy\")),-float(alg.getPropertyValue(\"Tzero\"))]\t\t#Extract incident energy and T0\n\treturn [Ei,Tzero]",
"def test_IEEEP370_SE_NZC_2xThru(self):\n self.s2xthru = rf.Network(os.path.join(self.test_dir, 's2xthru.s2p'))\n self.fdf = rf.Network(os.path.join(self.test_dir, 'fdf.s2p'))\n self.dm_nzc = rf.IEEEP370_SE_NZC_2xThru(dummy_2xthru = self.s2xthru, \n name = '2xthru')\n residuals = self.dm_nzc.s_side1.inv ** \\\n self.s2xthru ** self.dm_nzc.s_side2.flipped().inv\n # insertion loss magnitude deviate from 1.0 from less than 0.1 dB\n il_mag = 20.*np.log10(np.abs(residuals.s[:, 1, 0] + 1e-12))\n self.assertTrue(np.max(np.abs(il_mag)) <= 0.1, 'residual IL magnitude')\n # insertion loss phase deviate from 0 degree from less than 1 degree\n il_phase = np.angle(residuals.s[:, 1, 0]) * 180/np.pi\n self.assertTrue(np.max(np.abs(il_phase)) <= 1.0, 'residual IL Phase')",
"def test_T0(self):\n self.assertAlmostEqual(self.stick.T0.value_si, self.T0, 6)",
"def test_IEEEP370_SE_ZC_2xThru(self):\n self.s2xthru = rf.Network(os.path.join(self.test_dir, 's2xthru.s2p'))\n self.fdf = rf.Network(os.path.join(self.test_dir, 'fdf.s2p'))\n self.dm_zc = rf.IEEEP370_SE_ZC_2xThru(dummy_2xthru = self.s2xthru, \n dummy_fix_dut_fix = self.fdf, \n bandwidth_limit = 10e9, \n pullback1 = 0, pullback2 = 0,\n leadin = 0,\n NRP_enable = False,\n name = 'zc2xthru')\n residuals = self.dm_zc.s_side1.inv ** \\\n self.s2xthru ** self.dm_zc.s_side2.flipped().inv\n # insertion loss magnitude deviate from 1.0 from less than 0.2 dB\n il_mag = 20.*np.log10(np.abs(residuals.s[:, 1, 0] + 1e-12))\n self.assertTrue(np.max(np.abs(il_mag)) <= 0.2, 'residual IL magnitude')\n # insertion loss phase deviate from 0 degree from less than 45 degree\n # too much tolerance here allowed as for now\n il_phase = np.angle(residuals.s[:, 1, 0]) * 180/np.pi\n self.assertTrue(np.max(np.abs(il_phase)) <= 2.0, 'residual IL Phase')",
"def tox_alert(mol):\r\n if np.any([mol.HasSubstructMatch(alert) for alert in alert_mols]):\r\n score = 0\r\n else:\r\n score = 1\r\n return score",
"def test_T0(self):\n self.assertAlmostEqual(self.surfarr.T0.value_si, self.T0, 6)",
"def check_flux_signs(self, ss, fluxes, keq, enzymes=None, tol=1e-9):\n if not(0. in ss or 0. in keq):\n ln_ss = numpy.log(ss)\n allowed_directions = numpy.dot(self._N.T, ln_ss) - numpy.log(keq) < tol # sum n_i * ln(s_i) < ln(q) ?\n else:\n # the above is numerically not feasible if we have zero entries\n prod = numpy.ones(len(keq))\n for i, row in enumerate(self._N):\n if ss[i] == 0.:\n continue\n prod *= ss[i] ** row\n allowed_directions = prod - keq < tol # mathematically equivalent to the above (hopefylly:)\n real_directions = fluxes >= tol\n diff = allowed_directions - real_directions\n\n if enzymes:\n zeroflux = flux < tol\n enzymezero = enzymes < tol\n bothzero = zeroflux * enzymezero\n diff = diff * 1 - bothzero # if an enzyme and a flux is zero, the eq const doesnt matter\n\n if sum(diff) > 0:\n sys.stderr.write(\"Flux for reaction %s has wrong sign.\\n\" %diff.tolist().index(True))\n return False\n return True",
"def test_model(self):\n power_ebsilon = -31.769\n power_tespy = round(\n self.nw.busses['total output power'].P.val / 1e6, 3)\n msg = (\n 'The total power calculated (' + str(power_tespy) + ') does not '\n 'match the power calculated with the EBSILON model (' +\n str(power_ebsilon) + ').')\n assert power_tespy == power_ebsilon, msg\n\n T_c79_ebsilon = 296.254\n T_c79_tespy = round(self.nw.get_conn('79').T.val, 3)\n msg = (\n 'The temperature at connection 79 calculated (' +\n str(T_c79_tespy) + ') does not match the temperature calculated '\n 'with the EBSILON model (' + str(T_c79_ebsilon) + ').')\n assert T_c79_tespy == T_c79_ebsilon, msg",
"def test_Ea(self):\n self.assertAlmostEqual(self.stick.Ea.value_si * 0.001, self.Ea, 6)",
"def test_h0_rejected(self):\n x1 = [10, 11, 12, 13]\n x2 = [5, 6, 8, 9]\n h0r = welchs_ttest(x1, x2, alpha=0.05)[\"h0_rejected\"]\n\n assert h0r",
"def test_sp(self):\n self.job_5.execute_incore()\n self.assertTrue(os.path.isfile(self.job_5.local_path_to_output_file))\n e_elect = parse_e_elect(self.job_5.local_path_to_output_file, software='xtb')\n self.assertAlmostEqual(e_elect, -28229.8803, places=2)",
"def energyError(self, EN,E0):\n return abs( (EN - E0)/ E0)",
"def test_positivity(self):\n t = np.arange(-10, 50, step = 0.3)\n I = exponential(t, tzero = self.tzero, amp = self.amp, tconst = self.tconst)\n\n self.assertTrue(np.all(I > 0))",
"def get_ekcorrects(self, zf, zs):\n\n zf_grid = self.get_zf_grid(zf)\n if zf_grid == False:\n raise ValueError(\n 'Cannot fetch e+k correction for given formation redshift because it has not been gridded!')\n\n return zf_grid.get_obs_mags(zs) - zf_grid.rest[0]",
"def test_Ea(self):\n self.assertAlmostEqual(self.surfarr.Ea.value_si * 0.001, self.Ea, 6)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check the E0 values between wells and a TS in a ``reaction``, assuming that E0 values are available.
|
def check_rxn_e0(reaction: 'ARCReaction',
verbose: bool = True,
) -> Optional[bool]:
if reaction.ts_species.ts_checks['E0']:
return True
r_e0 = sum_list_entries([r.e0 for r in reaction.r_species],
multipliers=[reaction.get_species_count(species=r, well=0) for r in reaction.r_species])
p_e0 = sum_list_entries([p.e0 for p in reaction.p_species],
multipliers=[reaction.get_species_count(species=p, well=1) for p in reaction.p_species])
ts_e0 = reaction.ts_species.e0
if verbose and all([val is not None for val in [r_e0, p_e0, ts_e0]]):
min_e0 = extremum_list([r_e0, p_e0, ts_e0], return_min=True)
r_text = f'{r_e0 - min_e0:.2f} kJ/mol' if r_e0 is not None else 'None'
ts_text = f'{ts_e0 - min_e0:.2f} kJ/mol' if ts_e0 is not None else 'None'
p_text = f'{p_e0 - min_e0:.2f} kJ/mol' if p_e0 is not None else 'None'
logger.info(
f'\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '
f'has the following path E0 values:\n'
f'Reactants: {r_text}\n'
f'TS: {ts_text}\n'
f'Products: {p_text}')
if any(e0 is None for e0 in [r_e0, p_e0, ts_e0]):
return None
if r_e0 >= ts_e0 or p_e0 >= ts_e0:
reaction.ts_species.ts_checks['E0'] = False
return False
reaction.ts_species.ts_checks['E0'] = True
return True
|
[
"def check_ts_energy(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> None:\n # Check whether E0 values are already known, e.g. from Arkane species YAML files\n check_rxn_e0(reaction=reaction)\n if reaction.ts_species.ts_checks['E0']:\n return\n\n r_e_elect = None if any([spc.e_elect is None for spc in reaction.r_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=0) for spc in reaction.r_species)\n p_e_elect = None if any([spc.e_elect is None for spc in reaction.p_species]) \\\n else sum(spc.e_elect * reaction.get_species_count(species=spc, well=1) for spc in reaction.p_species)\n ts_e_elect = reaction.ts_species.e_elect\n\n if verbose and all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n min_e = extremum_list([r_e_elect, p_e_elect, ts_e_elect], return_min=True)\n r_text = f'{r_e_elect - min_e:.2f} kJ/mol' if r_e_elect is not None else 'None'\n ts_text = f'{ts_e_elect - min_e:.2f} kJ/mol' if ts_e_elect is not None else 'None'\n p_text = f'{p_e_elect - min_e:.2f} kJ/mol' if p_e_elect is not None else 'None'\n logger.info(\n f'\\nReaction {reaction.label} (TS {reaction.ts_label}, TSG {reaction.ts_species.chosen_ts}) '\n f'has the following path electronic energy:\\n'\n f'Reactants: {r_text}\\n'\n f'TS: {ts_text}\\n'\n f'Products: {p_text}')\n\n if all([val is not None for val in [r_e_elect, p_e_elect, ts_e_elect]]):\n # We have all params, we can make a quantitative decision.\n if ts_e_elect > r_e_elect + 1.0 and ts_e_elect > p_e_elect + 1.0:\n # TS is above both wells.\n reaction.ts_species.ts_checks['e_elect'] = True\n return\n # TS is not above both wells.\n if verbose:\n logger.error(f'TS of reaction {reaction.label} has a lower electronic energy value than expected.')\n reaction.ts_species.ts_checks['e_elect'] = False\n return\n # We don't have any params (some are ``None``)\n if verbose:\n logger.info('\\n')\n logger.warning(f\"Could not get electronic energy for all species in reaction {reaction.label}.\\n\")\n # We don't really know.\n reaction.ts_species.ts_checks['e_elect'] = None\n if 'Could not determine TS e_elect relative to the wells; ' not in reaction.ts_species.ts_checks['warnings']:\n reaction.ts_species.ts_checks['warnings'] += 'Could not determine TS e_elect relative to the wells; '",
"def compute_and_check_rxn_e0(reaction: 'ARCReaction',\n species_dict: dict,\n project_directory: str,\n kinetics_adapter: str,\n output: dict,\n sp_level: 'Level',\n freq_scale_factor: float = 1.0,\n ) -> Optional[bool]:\n for spc in reaction.r_species + reaction.p_species + [reaction.ts_species]:\n folder = 'rxns' if species_dict[spc.label].is_ts else 'Species'\n freq_path = os.path.join(project_directory, 'output', folder, spc.label, 'geometry', 'freq.out')\n if not spc.yml_path and not os.path.isfile(freq_path) and not species_dict[spc.label].is_monoatomic():\n return None\n considered_labels = list()\n rxn_copy = reaction.copy()\n for species in rxn_copy.r_species + rxn_copy.p_species + [rxn_copy.ts_species]:\n if species.label in considered_labels or species.e0:\n continue\n considered_labels.append(species.label)\n statmech_adapter = statmech_factory(statmech_adapter_label=kinetics_adapter,\n output_directory=os.path.join(project_directory, 'output'),\n output_dict=output,\n bac_type=None,\n sp_level=sp_level,\n freq_scale_factor=freq_scale_factor,\n species=species,\n )\n statmech_adapter.compute_thermo(kinetics_flag=True,\n e0_only=True,\n skip_rotors=True,\n )\n e0_pass = check_rxn_e0(reaction=rxn_copy)\n if not e0_pass:\n if rxn_copy.ts_species.ts_guesses_exhausted:\n return False\n return True # Switch TS.\n return False # Don't switch TS.",
"def test_e0(self):\n self.assertAlmostEqual(self.thermodata.E0.value_si, self.E0, 6)",
"def test_T0(self):\n self.assertAlmostEqual(self.stick.T0.value_si, self.T0, 6)",
"def GetEiT0(ws_name,EiGuess):\n\talg=GetEi(InputWorkspace=ws_name,Monitor1Spec=\"1\",Monitor2Spec=\"2\",EnergyEstimate=EiGuess)\t\t#Run GetEi algorithm\n\t[Ei,Tzero]=[float(alg.getPropertyValue(\"IncidentEnergy\")),-float(alg.getPropertyValue(\"Tzero\"))]\t\t#Extract incident energy and T0\n\treturn [Ei,Tzero]",
"def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i].as_val() is not None:\n if self.params[i].as_val(t=float, dim=0) != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)",
"def test_temperature_0(self):\n self.assertAlmostEqual(self.singleExponentialDown.T0.value_si, self.T0, 4)",
"def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i] is not None:\n if self.params[i] != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)",
"def tox_alert(mol):\r\n if np.any([mol.HasSubstructMatch(alert) for alert in alert_mols]):\r\n score = 0\r\n else:\r\n score = 1\r\n return score",
"def check_ts(reaction: 'ARCReaction',\n verbose: bool = True,\n job: Optional['JobAdapter'] = None,\n checks: Optional[List[str]] = None,\n rxn_zone_atom_indices: Optional[List[int]] = None,\n ):\n checks = checks or list()\n for entry in checks:\n if entry not in ['energy', 'freq', 'IRC', 'rotors']:\n raise ValueError(f\"Requested checks could be 'energy', 'freq', 'IRC', or 'rotors', got:\\n{checks}\")\n\n if 'energy' in checks or not reaction.ts_species.ts_checks['e_elect']:\n check_ts_energy(reaction=reaction, verbose=verbose)\n\n if 'freq' in checks or (not reaction.ts_species.ts_checks['normal_mode_displacement'] and job is not None):\n check_normal_mode_displacement(reaction, job=job)\n\n if 'rotors' in checks or (ts_passed_all_checks(species=reaction.ts_species, exemptions=['E0', 'warnings', 'IRC'])\n and job is not None):\n invalidate_rotors_with_both_pivots_in_a_reactive_zone(reaction, job,\n rxn_zone_atom_indices=rxn_zone_atom_indices)",
"def check_flux_signs(self, ss, fluxes, keq, enzymes=None, tol=1e-9):\n if not(0. in ss or 0. in keq):\n ln_ss = numpy.log(ss)\n allowed_directions = numpy.dot(self._N.T, ln_ss) - numpy.log(keq) < tol # sum n_i * ln(s_i) < ln(q) ?\n else:\n # the above is numerically not feasible if we have zero entries\n prod = numpy.ones(len(keq))\n for i, row in enumerate(self._N):\n if ss[i] == 0.:\n continue\n prod *= ss[i] ** row\n allowed_directions = prod - keq < tol # mathematically equivalent to the above (hopefylly:)\n real_directions = fluxes >= tol\n diff = allowed_directions - real_directions\n\n if enzymes:\n zeroflux = flux < tol\n enzymezero = enzymes < tol\n bothzero = zeroflux * enzymezero\n diff = diff * 1 - bothzero # if an enzyme and a flux is zero, the eq const doesnt matter\n\n if sum(diff) > 0:\n sys.stderr.write(\"Flux for reaction %s has wrong sign.\\n\" %diff.tolist().index(True))\n return False\n return True",
"def test_tzero_limits(self):\n t = np.arange(-10, 50, step = 0.3)\n I = exponential(t, tzero = self.tzero, amp = self.amp, tconst = self.tconst)\n\n # Check that all values before time-zero are the amplitude\n self.assertTrue(np.all(np.equal(I[t<self.tzero], self.amp)))\n self.assertTrue(np.all(np.less(I[t>self.tzero], self.amp)))",
"def test_T0(self):\n self.assertAlmostEqual(self.surfarr.T0.value_si, self.T0, 6)",
"def check(self, currentTau):\n voltages=[]\n for i in self.preSynapses:\n voltages.append(i.pre.sumInputs)\n #print(voltages)\n m=max(voltages)\n if m==0:\n return\n \n for i in range(len(voltages)):\n voltages[i]-=m\n for i in range(len(voltages)):\n if voltages[i]<0:\n self.preSynapses[i].pre.sumInputs=0\n else:\n self.preSynapses[i].pre.sumInputs=max(self.preSynapses[i].pre.sumInputs\\\n ,self.preSynapses[i].pre.threshold)",
"def err_vals_all_zero(output_hdul):\n\n result = np.all(output_hdul[\"ERR\"].data == 0)\n return result",
"def ts_passed_all_checks(species: 'ARCSpecies',\n exemptions: Optional[List[str]] = None,\n verbose: bool = False,\n ) -> bool:\n exemptions = exemptions or list()\n for check, value in species.ts_checks.items():\n if check not in exemptions and not value and not (check == 'e_elect' and species.ts_checks['E0']):\n if verbose:\n logger.warning(f'TS {species.label} did not pass the all checks, status is:\\n{species.ts_checks}')\n return False\n return True",
"def _check_temperature_consistency(self):\n temp = -1.0\n none_count = 0\n for cf in self.cfuncs:\n if cf is not None:\n T = cf.get_temperature()\n if temp < 0.0:\n temp = T\n if T != temp:\n raise Exception(\"Temperature of \"+\n \"CorrelationFunctionMatrix is not consistent\")\n else:\n none_count += 1\n\n if none_count > 1:\n raise Exception()\n \n return temp",
"def test_is_temperature_valid(self):\n T_data = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 4000])\n valid_data = np.array([False, True, True, True, True, True, True, True, True, False], np.bool)\n for T, valid in zip(T_data, valid_data):\n valid0 = self.stick.is_temperature_valid(T)\n self.assertEqual(valid0, valid)",
"def test__validate_application_actioned__0():\n application_actioned = DateTime(2016, 9, 9)\n \n for input_parameter, expected_output in (\n (None, None),\n (application_actioned, application_actioned),\n ):\n output = validate_application_actioned(input_parameter)\n vampytest.assert_is(output, expected_output)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check the normal mode displacement by identifying bonds that break and form and comparing them to the expected RMG template, if available.
|
def check_normal_mode_displacement(reaction: 'ARCReaction',
job: Optional['JobAdapter'],
amplitudes: Optional[Union[float, List[float]]] = None,
):
if job is None:
return
if reaction.family is None:
rmgdb.determine_family(reaction)
amplitudes = amplitudes or [0.1, 0.2, 0.4, 0.6, 0.8, 1]
amplitudes = [amplitudes] if isinstance(amplitudes, float) else amplitudes
reaction.ts_species.ts_checks['normal_mode_displacement'] = False
rmg_reactions = get_rmg_reactions_from_arc_reaction(arc_reaction=reaction) or list()
freqs, normal_modes_disp = parser.parse_normal_mode_displacement(path=job.local_path_to_output_file, raise_error=False)
if not len(normal_modes_disp):
return
largest_neg_freq_idx = get_index_of_abs_largest_neg_freq(freqs)
bond_lone_hs = any(len(spc.mol.atoms) == 2 and spc.mol.atoms[0].element.symbol == 'H'
and spc.mol.atoms[0].element.symbol == 'H' for spc in reaction.r_species + reaction.p_species)
# bond_lone_hs = False
xyz = parser.parse_xyz_from_file(job.local_path_to_output_file)
if not xyz['coords']:
xyz = reaction.ts_species.get_xyz()
done = False
for amplitude in amplitudes:
xyz_1, xyz_2 = displace_xyz(xyz=xyz, displacement=normal_modes_disp[largest_neg_freq_idx], amplitude=amplitude)
dmat_1, dmat_2 = xyz_to_dmat(xyz_1), xyz_to_dmat(xyz_2)
dmat_bonds_1 = get_bonds_from_dmat(dmat=dmat_1,
elements=xyz_1['symbols'],
tolerance=1.5,
bond_lone_hydrogens=bond_lone_hs)
dmat_bonds_2 = get_bonds_from_dmat(dmat=dmat_2,
elements=xyz_2['symbols'],
tolerance=1.5,
bond_lone_hydrogens=bond_lone_hs)
got_expected_changing_bonds = False
for i, rmg_reaction in enumerate(rmg_reactions):
r_label_dict = get_atom_indices_of_labeled_atoms_in_an_rmg_reaction(arc_reaction=reaction,
rmg_reaction=rmg_reaction)[0]
if r_label_dict is None:
continue
expected_breaking_bonds, expected_forming_bonds = reaction.get_expected_changing_bonds(r_label_dict=r_label_dict)
if expected_breaking_bonds is None or expected_forming_bonds is None:
continue
got_expected_changing_bonds = True
breaking = [determine_changing_bond(bond, dmat_bonds_1, dmat_bonds_2) for bond in expected_breaking_bonds]
forming = [determine_changing_bond(bond, dmat_bonds_1, dmat_bonds_2) for bond in expected_forming_bonds]
if len(breaking) and len(forming) \
and not any(entry is None for entry in breaking) and not any(entry is None for entry in forming) \
and all(entry == breaking[0] for entry in breaking) and all(entry == forming[0] for entry in forming) \
and breaking[0] != forming[0]:
reaction.ts_species.ts_checks['normal_mode_displacement'] = True
done = True
break
if not got_expected_changing_bonds and not reaction.ts_species.ts_checks['normal_mode_displacement']:
reaction.ts_species.ts_checks['warnings'] += 'Could not compare normal mode displacement to expected ' \
'breaking/forming bonds due to a missing RMG template; '
reaction.ts_species.ts_checks['normal_mode_displacement'] = True
break
if not len(rmg_reactions):
# Just check that some bonds break/form, and that this is not a torsional saddle point.
warning = f'Cannot check normal mode displacement for reaction {reaction} since a corresponding ' \
f'RMG template could not be generated'
logger.warning(warning)
reaction.ts_species.ts_checks['warnings'] += warning + '; '
if any(bond not in dmat_bonds_2 for bond in dmat_bonds_1) \
or any(bond not in dmat_bonds_1 for bond in dmat_bonds_2):
reaction.ts_species.ts_checks['normal_mode_displacement'] = True
break
if done:
break
|
[
"def test_d_regularization_dims(self):\n template_rxn_map = self.family.get_reaction_matches(thermo_database=self.database.thermo, estimate_thermo=False)\n\n for entry in self.family.groups.entries.values():\n if entry.children == []:\n continue\n # set of violations, one atom or one bond is allowed to be in violation (if it was just created)\n vio_obj = set()\n pgrp = entry.item\n exts = pgrp.get_extensions()\n for grp, grpc, name, typ, indc in exts:\n if typ == 'intNewBondExt' or typ == 'extNewBondExt':\n continue\n else:\n val, boo = self.family.eval_ext(entry, grp, name, template_rxn_map)\n if val != np.inf:\n continue\n atms = grp.atoms\n if typ == 'bondExt':\n bd = grp.get_bond(atms[indc[0]], atms[indc[1]])\n bds = bd.reg_dim[1]\n if boo and bds != [] and not (set(bd.order) <= set(bds)):\n logging.error('bond regularization dimension missed')\n vio_obj.add((tuple(indc), tuple(bds), tuple(bd.order), typ))\n elif typ == 'atomExt':\n atypes = atms[indc[0]].reg_dim_atm[1]\n atype = atms[indc[0]].atomtype\n if boo and atypes != [] and not (set(atype) <= set(atypes)):\n logging.error('atomtype regularization dimension missed')\n vio_obj.add((tuple(indc), tuple(atypes), tuple(atype), typ))\n elif typ == 'elExt':\n us = atms[indc[0]].reg_dim_u[1]\n u = atms[indc[0]].radical_electrons\n if boo and us != [] and not (set(u) <= set(us)):\n logging.error('unpaired electron regularization dimension missed')\n vio_obj.add((tuple(indc), tuple(us), tuple(u), typ))\n elif typ == 'ringExt':\n rs = atms[indc[0]].reg_dim_r[1]\n if 'inRing' in atms[indc[0]].props.keys():\n r = atms[indc[0]].props['inRing']\n else:\n r = [True, False]\n if boo and rs != [] and not (set(r) <= set(rs)):\n logging.error('in ring regularization dimension missed')\n vio_obj.add((tuple(indc), tuple(rs), tuple(r), typ))\n else:\n raise ValueError('extension type {0} not identified within test'.format(typ))\n\n self.assertTrue(len(vio_obj) <= 1,\n 'there were {0} regularization violations at, {1}'.format(len(vio_obj), vio_obj))",
"def test_boundary_nondimensional_force(self):\r\n rgn = np.random.rand()\r\n model = random_crack_model(varepsilon=800)\r\n compare = model.p_0(rgn, [1, 1])[0]\r\n self.assertAlmostEqual(\r\n model.p(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )\r\n model = random_crack_model(N=100, varepsilon=800)\r\n compare = 3*model.kappa/model.N**3*(rgn - 1)\r\n self.assertAlmostEqual(\r\n model.p(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )",
"def test_no_nonbonded_force(self, sage):\n\n del sage._parameter_handlers[\"Constraints\"]\n del sage._parameter_handlers[\"ToolkitAM1BCC\"]\n del sage._parameter_handlers[\"LibraryCharges\"]\n del sage._parameter_handlers[\"Electrostatics\"]\n del sage._parameter_handlers[\"vdW\"]\n\n methane = MoleculeWithConformer.from_smiles(\"C\")\n\n openmm_system = Interchange.from_smirnoff(sage, [methane]).to_openmm()\n\n for force in openmm_system.getForces():\n if isinstance(force, NonbondedForce):\n pytest.fail(\"A NonbondedForce was found in the OpenMM system.\")\n elif isinstance(force, PeriodicTorsionForce):\n assert force.getNumTorsions() == 0\n elif isinstance(force, HarmonicBondForce):\n assert force.getNumBonds() == 4\n elif isinstance(force, HarmonicAngleForce):\n assert force.getNumAngles() == 6\n else:\n pytest.fail(f\"Unexpected force found, type: {type(force)}\")",
"def test_two_gaussian_potential_correct_bc_force(self):\n\n newcoords1 = round(two_gaussian_potential_bc(vnew, f2, 4.5)[1], 5)\n self.assertEqual(newcoords1, -50.0)\n newcoords2 = round(two_gaussian_potential_bc(vnew, f2, -4.5)[1], 5)\n self.assertEqual(newcoords2, 50.0)\n newcoords3 = round(two_gaussian_potential_bc(vnew, f2, 0.5)[1], 5)\n self.assertEqual(newcoords3, f2)",
"def calculate_potential_field(self, pos_drones, pos_obstacles):\n alpha = beta = 0.005\n # --- Repulsion drones\n for position in pos_drones:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = (position - self.location).normalize() / distance \n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion obstacles \n for position in pos_obstacles:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = 2*(position - self.location).normalize() / sqrt(distance)\n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion walls\n # Distance to Bottom\n distance = UPPER_Y - self.location[1] \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,2) * SEEK_FORCE\n self.applyForce(-f_repulsion)\n \n # Distance to Top\n distance = self.location[1] - LOWER_Y \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,-2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,-2) * SEEK_FORCE\n self.applyForce(-f_repulsion)",
"def test_DrudeMass(self):\n\n psf = CharmmPsfFile('systems/ala3_solv_drude.psf')\n crd = CharmmCrdFile('systems/ala3_solv_drude.crd')\n params = CharmmParameterSet('systems/toppar_drude_master_protein_2013e.str')\n system = psf.createSystem(params, drudeMass=0)\n trueMass = [system.getParticleMass(i) for i in range(system.getNumParticles())]\n drudeMass = 0.3*amu\n system = psf.createSystem(params, drudeMass=drudeMass)\n adjustedMass = [system.getParticleMass(i) for i in range(system.getNumParticles())]\n drudeForce = [f for f in system.getForces() if isinstance(f, DrudeForce)][0]\n drudeParticles = set()\n parentParticles = set()\n for i in range(drudeForce.getNumParticles()):\n params = drudeForce.getParticleParameters(i)\n drudeParticles.add(params[0])\n parentParticles.add(params[1])\n for i in range(system.getNumParticles()):\n if i in drudeParticles:\n self.assertEqual(0*amu, trueMass[i])\n self.assertEqual(drudeMass, adjustedMass[i])\n elif i in parentParticles:\n self.assertEqual(trueMass[i]-drudeMass, adjustedMass[i])\n else:\n self.assertEqual(trueMass[i], adjustedMass[i])",
"def test_boundary_nondimensional_end_separation(self):\r\n rgn = np.random.rand()\r\n model = random_crack_model(varepsilon=800)\r\n compare = model.v_0(rgn, [1, 1])[0]\r\n self.assertAlmostEqual(\r\n model.v(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )\r\n model = random_crack_model(N=100, varepsilon=800)\r\n compare = 1 + model.N**3/3/model.kappa*rgn\r\n self.assertAlmostEqual(\r\n model.v(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )",
"def verify_correct_chemical_labels(self):\n if self.temperature_bounds[1] < self.temperature_bounds[0]:\n temporary = self.light_chemical\n self.light_chemical = self.heavy_chemical\n self.heavy_chemical = temporary\n\n temporary = self.temperature_bounds[0]\n # The bounds need to be extended in the proper direction; including correcting the original safety extension\n self.temperature_bounds[0] = self.temperature_bounds[1] - 2\n self.temperature_bounds[1] = temporary + 2",
"def test_NoLongRangeCorrection(self):\n parameters = CharmmParameterSet(\n 'systems/charmm-solvated/envi.str',\n 'systems/charmm-solvated/m14.rtf',\n 'systems/charmm-solvated/m14.prm'\n )\n psf = CharmmPsfFile('systems/charmm-solvated/isa_wat.3_kcl.m14.psf')\n psf.setBox(3.0584*nanometers,3.0584*nanometers,3.0584*nanometers)\n system = psf.createSystem(parameters, nonbondedMethod=PME)\n for force in system.getForces():\n if isinstance(force, CustomNonbondedForce):\n self.assertFalse(force.getUseLongRangeCorrection())\n if isinstance(force, NonbondedForce):\n self.assertFalse(force.getUseDispersionCorrection())",
"def sanity_check_geometry(self):\n g = self.geometry\n\n has_normal = False\n\n for ni in range(g.nnode):\n if (g.t_type[ni] & BCTYPE_MASK) == NODE_CLEAN_S_TERMINAL:\n if g.t_mu[ni] != 0:\n raise ValueError, \\\n (\"Superconducting terminal %d has \" +\n \"a finite bias voltage %g. Non-stationary \" +\n \"effects are unsupported.\") % ( ni, g.t_mu[ni] )\n elif (g.t_type[ni] & BCTYPE_MASK) == NODE_CLEAN_N_TERMINAL:\n has_normal = True\n\n if not has_normal and self.get_equilibrium_T() is None:\n raise ValueError, \"No normal terminals in the setup, and \" \\\n \"a non-equilibrium situation was specified. This code \" \\\n \"does not solve this class of problems: in these cases \" \\\n \"inelastic relaxation may be important for sub-gap \" \\\n \"transport. Please specify an equilibrium situation \" \\\n \"instead.\"",
"def areNormalGenerated(self) -> \"SbBool\":\n return _coin.SoReorganizeAction_areNormalGenerated(self)",
"def test_get_expected_changing_bonds(self):\n self.rxn11.determine_family(self.rmgdb)\n expected_breaking_bonds, expected_forming_bonds = self.rxn11.get_expected_changing_bonds(\n r_label_dict={'*1': 1, '*2': 2, '*3': 6})\n self.assertEqual(expected_breaking_bonds, [(2, 6)])\n self.assertEqual(expected_forming_bonds, [(1, 6)])",
"def iterateBuoyancyCorrect(self):\n thermexp = np.array(self.massInfo['coexpans']) # 1\n deltatemp = self.temperature.flatten() - 20 # 3\n thermexp = np.absolute(np.dot(self.config, np.diag(thermexp))) # 4\n withinstd = self.massInfo['balstd'][0]\n c1 = self.volumes\n self.c1 = c1\n #print c1\n flag = True\n counter = 0\n while flag:\n betahat = np.dot(self.config, np.diag(self.correct.flatten()))\n volume = np.nan_to_num((np.dot(self.config, np.diag(self.masses)) + .001 * betahat) / np.absolute(\n np.dot(self.config, np.diag(self.density))))\n c2 = volume * (1 + np.dot(thermexp.T, np.diag(deltatemp)).T)\n #print (np.absolute(c1 - c2) < (.01 * withinstd)).all()\n if (np.absolute(c1 - c2) < (.01 * withinstd)).all():\n flag = False\n else:\n counter += 1\n if counter == 10:\n flag = False\n c1 = c2\n self.correct2 = self.correct\n self.corrected2 = self.corrected\n self.volumes = c2\n self.correctBuoyancy()\n self.doCorrections()\n self.computeFinal()\n print c2",
"def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return random.random() < cutoff",
"def test_bond_check_false():\n bond_length = 3.0\n observed = ga.bond_check(bond_length)\n assert observed == False",
"def test_ImplicitSolventParameters(self):\n system = self.psf_x.createSystem(self.params, implicitSolvent=GBn,\n solventDielectric=50.0,\n soluteDielectric = 0.9)\n for force in system.getForces():\n if isinstance(force, NonbondedForce):\n self.assertEqual(force.getReactionFieldDielectric(), 1.0)",
"def dust_detect(self):\n self.df[\"dust\"] = (self.df[\"aod_1020nm\"] > 0.3) & (\n self.df[\"440-870_angstrom_exponent\"] < 0.6\n )",
"def verify_bound_basic_opt(self, mode_set):\n bond_port_0 = self.create_bonded_device(mode_set, SOCKET_0, True)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[1])\n\n mode_value = self.get_bond_mode(bond_port_0)\n self.verify('%d' % mode_set in mode_value,\n \"Setting bonding mode error\")\n\n bond_port_1 = self.create_bonded_device(mode_set, SOCKET_0)\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[0])\n self.add_slave_to_bonding_device(bond_port_1, True, self.dut_ports[0])\n\n OTHER_MODE = mode_set + 1 if not mode_set else mode_set - 1\n self.set_mode_for_bonding_device(bond_port_0, OTHER_MODE)\n self.set_mode_for_bonding_device(bond_port_0, mode_set)\n\n self.add_slave_to_bonding_device(bond_port_0, False, self.dut_ports[2])\n time.sleep(5)\n self.set_primary_for_bonding_device(bond_port_0, self.dut_ports[2])\n\n self.remove_slave_from_bonding_device(bond_port_0, False, self.dut_ports[2])\n primary_now = self.get_bond_primary(bond_port_0)\n self.verify(int(primary_now) == self.dut_ports[1],\n \"Reset primary slave failed after removing primary slave\")\n\n for bond_port in [bond_port_0, bond_port_1]:\n self.remove_all_slaves(bond_port)\n\n self.dut.send_expect(\"quit\", \"# \")\n self.launch_app()",
"def test_Cutoff(self):\n\n for top in (self.psf_c, self.psf_x, self.psf_v):\n for method in [CutoffNonPeriodic]:\n system = top.createSystem(self.params, nonbondedMethod=method,\n nonbondedCutoff=2*nanometer,\n constraints=HBonds)\n cutoff_distance = 0.0*nanometer\n cutoff_check = 2.0*nanometer\n for force in system.getForces():\n if isinstance(force, NonbondedForce):\n cutoff_distance = force.getCutoffDistance()\n self.assertEqual(cutoff_distance, cutoff_check)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determine whether a bond breaks or forms in a TS. Note that ``bond`` and all bond entries in `dmat_bonds_1/2`` must be already sorted from small to large indices.
|
def determine_changing_bond(bond: Tuple[int, ...],
dmat_bonds_1: List[Tuple[int, int]],
dmat_bonds_2: List[Tuple[int, int]],
) -> Optional[str]:
if len(bond) != 2 or any(not isinstance(entry, int) for entry in bond):
raise ValueError(f'Expected a bond to be represented by a list of length 2 with int entries, got {bond} '
f'of length {len(bond) if isinstance(bond, list) else None} with {type(bond[0]), type(bond[1])}')
if bond not in dmat_bonds_1 and bond in dmat_bonds_2:
return 'forming'
if bond in dmat_bonds_1 and bond not in dmat_bonds_2:
return 'breaking'
return None
|
[
"def isin_bond(self):\n return 'bond' in self.flags",
"def has_bond(self, id):\n for row in self._group.bonds.where(\n 'id == value', condvars={'value': id}):\n return True\n return False",
"def same_bond_topology(bt1, bt2):\n natoms = len(bt1.atom)\n if len(bt2.atom) != natoms:\n return False\n nbonds = len(bt1.bond)\n\n if len(bt2.bond) != nbonds:\n return False\n for i, t1 in enumerate(bt1.atom):\n if t1 != bt2.atom[i]:\n return False\n for i, b1 in enumerate(bt1.bond):\n b2 = bt2.bond[i]\n if b1.atom_a != b2.atom_a:\n return False\n if b1.atom_b != b2.atom_b:\n return False\n if b1.bond_type != b2.bond_type:\n return False\n return True",
"def validate_bond_HTM(self, bond_holding):\n self.assertEqual(len(bond_holding), 3) # should have 3 positions\n self.assertEqual(self.count_zero_holding_bond(bond_holding), 1)\n\n i = 0\n for bond in bond_holding:\n i = i + 1\n\n if (i == 1): # the first bond\n self.assertEqual(bond['isin'], 'XS0508012092')\n self.assertEqual(bond['name'], '(XS0508012092) China Overseas Finance 5.5%')\n self.assertEqual(bond['currency'], 'USD')\n self.assertEqual(bond['accounting_treatment'], 'HTM')\n self.assertAlmostEqual(bond['par_amount'], 400000)\n self.assertEqual(bond['is_listed'], 'TBC')\n self.assertEqual(bond['listed_location'], 'TBC')\n self.assertAlmostEqual(bond['fx_on_trade_day'], 8.0024)\n self.assertAlmostEqual(bond['coupon_rate'], 5.5/100)\n self.assertEqual(bond['coupon_start_date'], datetime(2017,5,10))\n self.assertEqual(bond['maturity_date'], datetime(2020,11,10))\n self.assertAlmostEqual(bond['average_cost'], 95.75)\n self.assertAlmostEqual(bond['amortized_cost'], 98.25302)\n self.assertAlmostEqual(bond['book_cost'], 383000)\n self.assertAlmostEqual(bond['interest_bought'], 0)\n self.assertAlmostEqual(bond['amortized_value'], 393012.08)\n self.assertAlmostEqual(bond['accrued_interest'], 4766.67)\n self.assertAlmostEqual(bond['amortized_gain_loss'], 10012.08)\n self.assertAlmostEqual(bond['fx_gain_loss'], 15831.6)\n\n if (i == 2): # the second bond\n self.assertEqual(bond['isin'], 'USG59606AA46')\n self.assertEqual(bond['name'], '(USG59606AA46) Mega Advance Investment 5%')\n self.assertEqual(bond['currency'], 'USD')\n self.assertEqual(bond['accounting_treatment'], 'HTM')\n self.assertAlmostEqual(bond['par_amount'], 400000)\n self.assertEqual(bond['is_listed'], 'TBC')\n self.assertEqual(bond['listed_location'], 'TBC')\n self.assertAlmostEqual(bond['fx_on_trade_day'], 8.0024)\n self.assertAlmostEqual(bond['coupon_rate'], 5.0/100)\n self.assertEqual(bond['coupon_start_date'], datetime(2017,5,12))\n self.assertEqual(bond['maturity_date'], datetime(2021,5,12))\n self.assertAlmostEqual(bond['average_cost'], 96.25)\n self.assertAlmostEqual(bond['amortized_cost'], 98.2888)\n self.assertAlmostEqual(bond['book_cost'], 385000)\n self.assertAlmostEqual(bond['interest_bought'], 0)\n self.assertAlmostEqual(bond['amortized_value'], 393155.2)\n self.assertAlmostEqual(bond['accrued_interest'], 4222.22)\n self.assertAlmostEqual(bond['amortized_gain_loss'], 8155.2)\n self.assertAlmostEqual(bond['fx_gain_loss'], 15914.28)\n\n if (i == 3): # the last bond, an empty position\n self.assertEqual(bond['isin'], 'USG2108YAA31')\n self.assertEqual(bond['name'], '(USG2108YAA31) China Resource Land Ltd 4.625%')\n self.assertEqual(bond['currency'], 'USD')\n self.assertEqual(bond['accounting_treatment'], 'HTM')\n self.assertAlmostEqual(bond['par_amount'], 0)\n self.assertEqual(len(bond), 5) # should have no more fields",
"def has_same_bonds(self, other_atoms):\n if len(self) != len(other_atoms):\n return False\n if len(self.bonds) != len(other_atoms.bonds):\n return False\n for (i, atom) in enumerate(self.atoms):\n other = other_atoms.atoms[i]\n # print(\"{}={}\".format(i, atom.index))\n atom_neighbors = {n.index for n in atom.neighbors}\n other_neighbors = {n.index for n in other.neighbors}\n # print(atom_neighbors, other_neighbors)\n if atom_neighbors == other_neighbors:\n continue\n else:\n return False\n return True",
"def hasBond(self, atom1, atom2, jMatrix = None):\n for eachBond in self.getBonds():\n if eachBond.getAtom1() == atom1 or eachBond.getAtom2() == atom1:\n if eachBond.getAtom1() == atom2 or eachBond.getAtom2() == atom2:\n if jMatrix != None and eachBond.getJMatrix() != None:\n if eachBond.getJMatrix().all() == jMatrix.all():\n print \"True\"\n return True\n elif (not jMatrix) and (not eachBond.getJMatrix()):\n print \"True\"\n return True\n return False",
"def is_H_bond(self, atom_A, atom_D):\n \n \"\"\" CG -\n acceptor:atom1\n donors:atom2\n source RDOCK\n X-A - - - D-Y\n is Hbond if:\n 2.5 < dist (A-D) < 3.5\n 90<angle(YDA)<150\n \"\"\"\n \n #KC# result file written to compare with modeller function\n fileout=open(self.model.filename[:-4]+\"_angles.txt\",'a')\n \n #KC#CG# H bond research\n if atom_A.get_parent()!=atom_D.get_parent():\n NS=NeighborSearch([atom for atom in self.model.atoms if atom_D!=atom])\n is_hbond=False\n for atom_Y in NS.search(center=atom_D.get_coord(), radius=parametres.cutoff_bonds):\n angle=calcul_angle(atom_Y.get_coord(), atom_D.get_coord(), atom_A.get_coord())\n angle=min(angle, 360-angle)\n if 90 < angle < 150 and 2.5 < atom_D-atom_A < 3.5:\n fileout.write(str(atom_D.get_parent())+'\\t'+str(atom_D)+'\\t'+str(atom_A.get_parent())+'\\t'+str(atom_A)+'\\t'+str(angle)+'\\n')\n is_hbond=True\n break\n if is_hbond:\n return True\n fileout.close()\n \n return False",
"def bond_features(bond):\n bt = bond.GetBondType()\n fbond = [\n bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE,\n bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC,\n (bond.GetIsConjugated() if bt is not None else 0),\n (bond.IsInRing() if bt is not None else 0)\n ]\n fbond += get_onehot(int(bond.GetStereo()), list(range(6)))\n return fbond",
"def is_single_fragment(bond_topology):\n\n natoms = len(bond_topology.atom)\n nbonds = len(bond_topology.bond)\n # Some special cases are easy.\n if natoms == 1:\n return True\n if natoms == 2 and nbonds == 1:\n return True\n if natoms == 3 and nbonds == 2:\n return True\n if natoms == nbonds and natoms <= 4:\n return True\n\n connection_matrix = bonded(bond_topology)\n\n # Any row with all zero means a detached atom.\n if np.sum(connection_matrix.any(axis=1)) != natoms:\n return False\n\n # For each atom, the neighbours.\n attached: List[Any] = []\n for i in range(0, natoms):\n attached.append(np.ravel(np.argwhere(connection_matrix[i,])))\n\n # neighbours = np.argwhere(connection_matrix > 0)\n\n visited = np.zeros(natoms, dtype=np.int32)\n # Mark anything with a single connection as visited.\n # Record the index of an atom that has multiple connections.\n a_multiply_connected_atom = -1\n for i in range(0, natoms):\n if bond_topology.atom[i] == dataset_pb2.BondTopology.AtomType.ATOM_H:\n visited[i] = 1\n continue\n\n if len(attached[i]) > 1:\n a_multiply_connected_atom = i\n continue\n\n # A singly connected heavy atom. Mark visited if not of a two atom fragment.\n if len(attached[attached[i][0]]) > 1:\n visited[i] = 1\n\n if a_multiply_connected_atom < 0: # Cannot happen\n return False\n\n number_visited = np.count_nonzero(visited) + visit(\n attached, a_multiply_connected_atom, visited)\n return number_visited == natoms",
"def _get_bond_info(self, bond):\n # blank bond string is single or aromatic\n # empty or_types in Chemical Environments are treated as ~ bonds\n if bond == \"\":\n and_types = list()\n or_types = [('-', []), (':', [])]\n return or_types, and_types\n\n # AND types indicated by ; at the end\n split = bond.split(';')\n and_types = list()\n for a in split[1:]:\n and_types += re.findall(self.bond_regs, a)\n\n # or_types are divided by ,\n or_list = split[0].split(',')\n or_types = list()\n for OR in or_list:\n if OR == '~':\n continue\n or_divide = re.findall(self.bond_regs, OR)\n if len(or_divide) > 0:\n or_types.append((or_divide[0], or_divide[1:]))\n\n return or_types, and_types",
"def bonds_compatible(self, mapping):\n # Get the bonds to the already mapped graph\n sub_atom_bonds = set(self.sub.atoms[mapping[0]].bonds).intersection(self.sub_atoms_mapped)\n master_atom_bonds = set(self.master.atoms[mapping[1]].bonds).intersection(self.master_atoms_mapped)\n # Convert the sub atoms to master atoms\n master_atom_bonds_from_sub = set(self.sub_to_master(atom) for atom in sub_atom_bonds)\n\n return master_atom_bonds == master_atom_bonds_from_sub",
"def generate_bond_features(mol: Chem.rdchem.Mol, bond: Chem.rdchem.Bond) -> np.ndarray:\n bond_features = np.zeros(NUMBER_BOND_GRAPH_FEATURES)\n bond_type = BONDS.index(bond.GetBondType())\n bond_features[bond_type] = 1.0\n\n # Is the bond in the same ring\n bond_features[4] = float(bond.IsInRing())\n\n # Is the bond conjugated\n bond_features[5] = float(bond.GetIsConjugated())\n\n # Distance\n begin = bond.GetBeginAtom().GetIdx()\n end = bond.GetEndAtom().GetIdx()\n bond_features[6] = Chem.rdMolTransforms.GetBondLength(mol.GetConformer(), begin, end)\n\n return bond_features",
"def stubbify_bonds(self):\n # If we are dealing with an object that contains a bond pattern, the degree of a node has no meaning.\n # The degree is used only for VF2 isomorphism checking, but not for pattern embeddings.\n self.bonds = set()\n bonds = {}\n for name in self.agents:\n degree = 0\n for site in self.agents[name]:\n link = self.agents[name][site]['bond']\n if link != '.':\n if is_number(link):\n degree += 1\n if link in bonds:\n [(name1, site1)] = bonds[link]\n # self.agents[name1][site1]['bond'] = name + self.bondsep + site\n # self.agents[name][site]['bond'] = name1 + self.bondsep + site1\n self.agents[name1][site1]['bond'] = ''.join([name, self.bondsep, site])\n self.agents[name][site]['bond'] = ''.join([name1, self.bondsep, site1])\n b = sorted([(name1, site1), (name, site)], key=lambda i: i[0])\n self.bonds.add(tuple(b)) # collect unique bonds\n else:\n bonds[link] = [(name, site)]\n elif self.bondsep in self.agents[name][site]['bond']:\n degree += 1\n else:\n # bond state is a ghost, or '_', or '#'\n degree = -1 # reset and flag, just in case\n self.is_pattern = True\n\n self.info[name]['degree'] = degree",
"def bond_features(bond: Chem.rdchem.Bond, args) -> List[Union[bool, int, float]]:\n bond_fdim = get_bond_fdim(args)\n\n if bond is None:\n fbond = [1] + [0] * (bond_fdim - 1)\n else:\n bt = bond.GetBondType()\n fbond = [\n 0, # bond is not None\n bt == Chem.rdchem.BondType.SINGLE,\n bt == Chem.rdchem.BondType.DOUBLE,\n bt == Chem.rdchem.BondType.TRIPLE,\n bt == Chem.rdchem.BondType.AROMATIC,\n (bond.GetIsConjugated() if bt is not None else 0),\n (bond.IsInRing() if bt is not None else 0)\n ]\n # if args.chiral_features:\n # fbond += onek_encoding_unk(int(bond.GetStereo()), list(range(6))) # remove global cis/trans tags\n return fbond",
"def test_bond_check_false():\n bond_length = 3.0\n observed = ga.bond_check(bond_length)\n assert observed == False",
"def is_hydrogen_bond(self, wildcards=False):\n if wildcards:\n for order in self.order:\n if abs(order) <= 1e-9:\n return True\n else:\n return False\n else:\n return abs(self.order[0]) <= 1e-9 and len(self.order) == 1",
"def get_bonds(self, atoms):\n cutoffs = CutoffList(self.data['cutoffs'])\n self.update_neighbor_list(atoms)\n\n types = atoms.get_types()\n tags = atoms.get_tags()\n cell = atoms.get_cell()\n positions = atoms.get_positions()\n bond_list = []\n bond_types = []\n for i, atom in enumerate(atoms):\n iname = types[tags[i]]\n indices, offsets = self.nl.get_neighbors(i)\n for j, offset in zip(indices, offsets):\n if j <= i:\n continue # do not double count\n jname = types[tags[j]]\n cut = cutoffs.value(iname, jname)\n if cut is None:\n if self.warnings > 1:\n print('Warning: cutoff %s-%s not found'\n % (iname, jname))\n continue # don't have it\n dist = np.linalg.norm(atom.position - atoms[j].position\n - np.dot(offset, cell))\n if dist > cut:\n continue # too far away\n name, val = self.bonds.name_value(iname, jname)\n if name is None:\n if self.warnings:\n print('Warning: potential %s-%s not found'\n % (iname, jname))\n continue # don't have it\n if name not in bond_types:\n bond_types.append(name)\n bond_list.append([bond_types.index(name), i, j])\n return bond_types, bond_list",
"def test_remove_h_bonds(self):\n test_mol = self.mHBonds.generate_h_bonded_structures()[0]\n test_mol.remove_h_bonds()\n\n for i, atm1 in enumerate(test_mol.atoms):\n for j, atm2 in enumerate(test_mol.atoms):\n if j < i and test_mol.has_bond(atm1, atm2):\n bd = test_mol.get_bond(atm1, atm2)\n self.assertNotAlmostEqual(bd.order, 0.1)",
"def is_interface_in_bond(self, interface_name):\n # In the case of bond with a single member\n if interface_name == \"\":\n return False\n\n if ((self.management_interface_configured and\n self.lag_management_interface and\n (interface_name == self.lag_management_interface_member0 or\n interface_name == self.lag_management_interface_member1))\n or\n (self.external_oam_interface_configured and\n self.lag_external_oam_interface and\n (interface_name == self.lag_external_oam_interface_member0 or\n interface_name == self.lag_external_oam_interface_member1))\n or\n (self.cluster_host_interface_configured and\n self.lag_cluster_host_interface and\n (interface_name == self.lag_cluster_host_interface_member0 or\n interface_name == self.lag_cluster_host_interface_member1))):\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Invalidate rotors in which both pivots are included in the reactive zone.
|
def invalidate_rotors_with_both_pivots_in_a_reactive_zone(reaction: 'ARCReaction',
job: 'JobAdapter',
rxn_zone_atom_indices: Optional[List[int]] = None,
):
rxn_zone_atom_indices = rxn_zone_atom_indices or get_rxn_zone_atom_indices(reaction, job)
if not reaction.ts_species.rotors_dict:
reaction.ts_species.determine_rotors()
rxn_zone_atom_indices_1 = convert_list_index_0_to_1(rxn_zone_atom_indices)
for key, rotor in reaction.ts_species.rotors_dict.items():
if rotor['pivots'][0] in rxn_zone_atom_indices_1 and rotor['pivots'][1] in rxn_zone_atom_indices_1:
rotor['success'] = False
if 'pivTS' not in rotor['invalidation_reason']:
rotor['invalidation_reason'] += 'Pivots participate in the TS reaction zone (code: pivTS). '
logging.info(f"\nNot considering rotor {key} with pivots {rotor['pivots']} in TS {reaction.ts_species.label}\n")
|
[
"def rotate_box(self):\r\n self._rotate = not self._rotate",
"def combineRotations(circuit,wires):\n #initialize a list to store the indices that will be deleted after the loop to not affect the stored sequence of indices\n indicesToPop=[]\n for j in range(len(wires)):\n i=0\n n=len(wires[j])\n while i<n-1:\n #checking if there exists a consecutive rotation gate about the same axis\n if wires[j][i][:2]==wires[j][i+1][:2]:\n \n #getting the instruction index that stored in the wires list \n index1=int(wires[j][i][2:])\n index2=int(wires[j][i+1][2:])\n \n #getting the two angles of rotations to combine them\n angle1=circuit.data[index1][0].params[0]\n angle2=circuit.data[index2][0].params[0]\n Sum=angle1+angle2\n \n #combining the two angles in one gate and delete the other gate\n circuit.data[index2][0].params[0]=Sum\n indicesToPop.append(index1)\n \n #update the wires list data and redirect the i index \n del wires[j][i]\n n=n-1\n i=i-1\n \n #checking if the combination of the angles equals 2PI (rotate and back to the same position)\n if round(Sum,2)==round(2*PI,2):\n indicesToPop.append(index2)\n \n #delete the 2PI rotation and redirect the i index\n del wires[j][i+1]\n n=n-1\n i=i-2\n \n i=i+1\n \n #sorting the indices in descending order to pop the biggest index first\n indicesToPop=sorted(indicesToPop, reverse=True)\n #removing the elements\n [circuit.data.pop(i) for i in indicesToPop]\n \n return circuit",
"def reorientCamera(camera: 'SoCamera', rot: 'SbRotation') -> \"void\":\n return _coin.SoScXMLSpinTarget_reorientCamera(camera, rot)",
"def rotate(self):\n self.currtetri.rotate()\n if self.collision():\n self.currtetri.rotate(False)\n else:\n self.get_resource('sound', 'RotateSound').play()",
"def brute_force_transducer_angle(shot1, shot2, channelnum1, channelnum2,\n start_time, end_time,\n A1s=linspace(18,23,30),\n A2s=linspace(18,23,30), \n plot_output=1,\n norm=1):\n #Get copies of the real channels\n sh1ch1 = deepcopy(shot1.get_channel(channelnum1))\n sh1ch2 = deepcopy(shot1.get_channel(channelnum2))\n sh2ch1 = deepcopy(shot2.get_channel(channelnum1))\n sh2ch2 = deepcopy(shot2.get_channel(channelnum2))\n\n #Now get some dummy combined velocity things.\n vel1 = deepcopy(shot1.get_velocity(channelnum1, channelnum2))\n vel2 = deepcopy(shot2.get_velocity(channelnum1, channelnum2))\n \n start_idx = vel1.get_index_near_time(start_time)\n end_idx = vel1.get_index_near_time(end_time)\n\n def err_func(A1, A2):\n #First modify the As in the relevant dummy channels.\n sh1ch1.A = A1\n sh2ch1.A = A1\n sh1ch2.A = A2\n sh2ch2.A = A2\n \n #Need to update the radius vectors, too\n sh1ch1.calculate_radius()\n sh1ch2.calculate_radius()\n sh2ch1.calculate_radius()\n sh2ch2.calculate_radius()\n \n #Now reprocess the dummy velocities.\n vel1.gen_velocity_two_transducers(sh1ch1, sh1ch2)\n vel2.gen_velocity_two_transducers(sh2ch1, sh2ch2)\n \n #Find indices of radius bounds\n indx_rmin = vel1.get_index_near_radius(11.0)\n indx_rmax = vel1.get_index_near_radius(19.0)\n nindx = indx_rmax - indx_rmin\n \n #And calculate the norm for the differences between the\n #forward and backwards shots, divided by the total number of\n #radial samples. The degree of the norm is specified by the\n #'norm' parameter (1=L1, 2=L2, 3=L3, etc.). We multiply the vr\n #difference by 10 assuming that vr ~ 0.10*vtheta so that we\n #give ~equal weights to both components.\n\n vt_diff = abs(mean(vel1.vtheta[start_idx:end_idx, indx_rmin:indx_rmax],\n axis=0) +\n mean(vel2.vtheta[start_idx:end_idx, indx_rmin:indx_rmax],\n axis=0))\n \n vr_diff = 10*abs(mean(vel1.vr[start_idx:end_idx, indx_rmin:indx_rmax],\n axis=0) -\n mean(vel2.vr[start_idx:end_idx, indx_rmin:indx_rmax],\n axis=0))\n \n total_err = ((vr_diff**norm).sum() +\n (vt_diff**norm).sum())**(1.0/norm)/(nindx)\n \n return total_err\n\n errors = ones([A2s.size, A1s.size])*nan\n if(plot_output):\n fig = figure()\n\n totalpts = errors.size\n print \"Entering loop....\"\n for i in range(0, A1s.size):\n for j in range(0, A2s.size):\n errors[j, i] = err_func(A1s[i], A2s[j])\n if(plot_output):\n fig.clear()\n ax = fig.add_subplot(111)\n cp = ax.contourf(A1s, A2s, errors, 50)\n fig.colorbar(cp)\n ax.set_xlabel(\"A for Channel %d\" % channelnum1)\n ax.set_ylabel(\"A for Channel %d\" % channelnum2)\n draw()\n sys.stdout.write('\\x1b[1A\\x1b[2K\\x1b[J')\n print \"%d of %d positions analyzed\" % (i*A2s.size + j + 1,\n totalpts)\n\n if(plot_output):\n fig.clear()\n ax = fig.add_subplot(111)\n cp = ax.contourf(A1s, A2s, errors, 30)\n fig.colorbar(cp)\n ax.set_xlabel(\"A for Channel %d\" % channelnum1)\n ax.set_ylabel(\"A for Channel %d\" % channelnum2)\n return (A1s, A2s, errors)",
"def invalidate_members(self):\n self._computed_members = None\n d = self._REGISTRY._groups\n for name in self._used_by:\n d[name].invalidate_members()",
"def clean_orbitals(self):\n while abs(self.orbitals[-1].fill) < 0.01:\n self.orbitals = self.orbitals[:-1]",
"def SoScXMLSpinTarget_reorientCamera(camera: 'SoCamera', rot: 'SbRotation') -> \"void\":\n return _coin.SoScXMLSpinTarget_reorientCamera(camera, rot)",
"def unset_am(self):\n for i in self._rdbk_pv + self._cset_pv + self._rset_pv:\n i.auto_monitor = False",
"def rotate_components(rx, ry, rz, nodes=None):\n if nodes is None:\n nodes = cmds.ls(sl=True) or []\n for node in nodes:\n pivot = cmds.xform(node, q=True, rp=True, ws=True)\n cmds.rotate(rx, ry, rz, '{0}.cv[*]'.format(node), r=True, p=pivot, os=True, fo=True)",
"def set_angles(self, acc, mag, time_T):\n # ------------------------------------\n self.DT = time_T\n acc = np.array([acc[0], acc[1], acc[2]]).transpose()\n self.Orientation_acc[:, 2] = acc\n self.Orientation_acc[:, 1] = np.cross(acc, np.array([mag[0], mag[1], mag[2]]).transpose())\n self.Orientation_acc[:, 0] = np.cross(self.Orientation_acc[:, 1], acc)\n self.Orientation_acc[:, 0] = self.Orientation_acc[:, 0] / np.linalg.norm(self.Orientation_acc[:, 0])\n self.Orientation_acc[:, 1] = self.Orientation_acc[:, 1] / np.linalg.norm(self.Orientation_acc[:, 1])\n self.Orientation_acc[:, 2] = self.Orientation_acc[:, 2] / np.linalg.norm(self.Orientation_acc[:, 2])\n\n tmp = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]], float)\n mat = np.matmul(tmp,self.Orientation_acc)\n quat = quaternion.from_rotation_matrix(self.Orientation_acc)\n q = quaternion.as_float_array(quat)\n # --------------------------------------\n self.yaw_a = (math.atan2(2.0 * (q[1] * q[2] - q[0] * q[3]),\n -1 + 2 * (q[0] * q[0] + q[1] * q[1])))\n pitch_a = (-math.asin(2.0 * (q[1] * q[3] + q[0] * q[2])))\n roll_a = (math.atan2(2.0 * (-q[0] * q[1] + q[2] * q[3]),\n -1 + 2 * (q[0] * q[0] + q[1] * q[1])))\n #print(self.yaw_a)\n # -----------------------------------------\n \"\"\"yaw_g = (math.atan2(2.0 * (q_gy[1] *q_gy[2] - q_gy[0] * q_gy[3]),\n -1+2*(q_gy[0] * q_gy[0] + q_gy[1] * q_gy[1])))\n pitch_g = (-math.asin(2.0 * (q_gy[1] * q_gy[3] + q_gy[0] * q_gy[2])))\n roll_g = (math.atan2(2.0 * (-q_gy[0] * q_gy[1] + q_gy[2] * q_gy[3]),\n -1+2*(q_gy[0] * q_gy[0] + q_gy[1] * q_gy[1])))\n #print(math.degrees(roll_a),math.degrees(pitch_a),math.degrees(yaw_a),math.degrees(roll_g),math.degrees(pitch_g),math.degrees(yaw_g))\n #-----------------------------------------\n #q_final = 0.8*self.q+(1-0.8)*self.quat_gy\n #self.q_final = q_final\n #-----------------------------------------\n\n yaw = (math.atan2(2.0 * (q_final[1] *q_final[2] - q_final[0] * q_final[3]),\n -1+2*(q_final[0] * q_final[0] + q_final[1] * q_final[1])))\n pitch = (-math.asin(2.0 * (q_final[1] * q_final[3] + q_final[0] * q_final[2])))\n roll = (math.atan2(2.0 * (-q_final[0] * q_final[1] + q_final[2] * q_final[3]),\n -1+2*(q_final[0] * q_final[0] + q_final[1] * q_final[1])))\n #print(math.degrees(roll),math.degrees(pitch),math.degrees(yaw))\n plt.pause(0.001)\"\"\"",
"def clean_up_sensors_for_actors(self, current_actor_ids: Set[str], renderer):\n # This is not good enough by itself since actors can keep alive sensors that are not in use by an agent\n old_actor_ids = set(self._sensor_states)\n missing_actors = old_actor_ids - current_actor_ids\n\n for aid in missing_actors:\n self.remove_sensors_by_actor_id(aid)\n\n for sensor in self._scheduled_sensors:\n sensor.teardown(renderer=renderer)\n\n self._scheduled_sensors.clear()",
"def cleanup(self):\n plots = self.traverse(lambda x: x, [Plot])\n for plot in plots:\n if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):\n continue\n for stream in set(plot.streams):\n stream._subscribers = [\n (p, subscriber) for p, subscriber in stream._subscribers\n if not util.is_param_method(subscriber) or\n util.get_method_owner(subscriber) not in plots\n ]",
"def rotated_measurement_operators(rotations: List[qtp.Qobj],\n Fs: List[qtp.Qobj]) -> List[List[qtp.Qobj]]:\n return [[U.dag() * F * U for U in rotations] for F in Fs]",
"def _undo_trajectory(self):\n for t in self._traj:\n self._mask.__setitem__(t, 0)",
"def reference_is_rotated(self):\n if (\n np.allclose(self.axis_u, (1, 0, 0))\n and np.allclose(self.axis_v, (0, 1, 0))\n and np.allclose(self.axis_w, (0, 0, 1))\n ):\n return False\n return True",
"def clear_actors(self):\n for renderer in self:\n renderer.clear_actors()",
"def removeRegions(self):\n for i in range(len(self.verticalStripes)):\n for j in range(len(self.horizontalStripes)):\n region = Region(self.verticalStripes[i], self.horizontalStripes[j])\n self.regions.append(region)\n region.removeRegion(self)",
"def test_change_referential_vf_waveform_aper():\n\n tf = 1\n k = 1\n r = 1\n rotation_speed = 60 * k / r\n Time = DataLinspace(\n name=\"time\",\n unit=\"s\",\n initial=0,\n final=tf / (2 * k),\n number=200,\n include_endpoint=False,\n symmetries={\"antiperiod\": 2 * k},\n )\n time = Time.get_values(is_smallestperiod=True)\n Angle = DataLinspace(\n name=\"angle\",\n unit=\"rad\",\n initial=0,\n final=2 * np.pi / (2 * r),\n number=200,\n include_endpoint=False,\n symmetries={\"antiperiod\": 2 * r},\n )\n angle = Angle.get_values(is_smallestperiod=True)\n\n z = DataPattern(name=\"z\", unit=\"m\", values=np.array([0]))\n\n xangle, xtime = np.meshgrid(angle, time)\n\n A0 = 2\n field = A0 * np.cos(2 * np.pi * k * xtime + r * xangle) + 2 * A0 * np.cos(\n 2 * np.pi * 3 * k * xtime + 3 * r * xangle\n )\n\n Field = VectorField(\n name=\"test field\",\n components={\n \"radial\": DataTime(\n name=\"radial test field\",\n symbol=\"X\",\n unit=\"T\",\n axes=[Time, Angle, z],\n values=field[:, :, None],\n ),\n \"tangential\": DataTime(\n name=\"tangential test field\",\n symbol=\"X\",\n unit=\"T\",\n axes=[Time, Angle, z],\n values=field[:, :, None],\n ),\n },\n )\n\n # Change field to rotating referential\n Field_R = Field.change_referential(\n -rotation_speed, is_waveform=True, sym_t_new={\"period\": k}\n )\n\n # Change field back to static referential\n Field_RR = Field_R.change_referential(\n rotation_speed, is_waveform=True, sym_t_new={\"antiperiod\": 2 * k}\n )\n\n field_RR = Field_RR.get_rphiz_along(\n \"time[smallestperiod]\", \"angle[smallestperiod]\"\n )[\"radial\"]\n\n assert_almost_equal(field, field_RR, decimal=5)\n\n if is_show_fig:\n\n Field.plot_2D_Data(\n \"time[smallestperiod]\",\n \"angle[0]\",\n data_list=[Field_RR],\n linestyles=[\"solid\", \"dotted\"],\n )\n\n Field.plot_2D_Data(\n \"angle[smallestperiod]\",\n \"time[0]\",\n data_list=[Field_RR],\n linestyles=[\"solid\", \"dotted\"],\n )\n\n Field.plot_3D_Data(\"time\", \"angle\", is_same_size=True)\n\n Field_R.plot_3D_Data(\"time\", \"angle\", is_same_size=True)\n\n Field_RR.plot_3D_Data(\"time\", \"angle\", is_same_size=True)\n\n Field.plot_3D_Data(\"freqs\", \"wavenumber\", is_same_size=True)\n\n Field_R.plot_3D_Data(\"freqs\", \"wavenumber\", is_same_size=True)\n\n Field_RR.plot_3D_Data(\"freqs\", \"wavenumber\", is_same_size=True)\n\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the reaction zone atom indices by parsing normal mode displacement.
|
def get_rxn_zone_atom_indices(reaction: 'ARCReaction',
job: 'JobAdapter',
) -> List[int]:
freqs, normal_mode_disp = parser.parse_normal_mode_displacement(path=job.local_path_to_output_file,
raise_error=False)
normal_mode_disp_rms = get_rms_from_normal_mode_disp(normal_mode_disp, freqs, reaction=reaction)
num_of_atoms = get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms=normal_mode_disp_rms,
ts_guesses=reaction.ts_species.ts_guesses,
reaction=reaction) \
+ round(reaction.ts_species.number_of_atoms ** 0.25) # Peripheral atoms might get in the way
indices = sorted(range(len(normal_mode_disp_rms)), key=lambda i: normal_mode_disp_rms[i], reverse=True)[:num_of_atoms]
return indices
|
[
"def get_atom_indices(self):\n return (range(1, self.natm+1), self)",
"def positions(self):\n return get_positions(as_numpy=True).reshape((self.natom, 3))",
"def find_indices(universe, atoms, molname, natoms):\n indices = []\n atoms = np.array(atoms)\n mol_atoms = universe.atoms[universe.atoms.moltypes == molname]\n n_mols = len(set(mol_atoms.molnums))\n for idx in range(0, n_mols):\n pairs = mol_atoms.indices[atoms + idx * natoms]\n indices.append(pairs)\n return indices",
"def get_positions(grofile, trajfile, atom_range):\n\n\n\n # traj = md.load_trr(trr, top=gro)\n traj = md.load(trajfile, top=grofile)\n\n # positions = []\n # u = MDAnalysis.Universe(gro, trr)\n # for frame in u.trajectory:\n # positions+= [frame.positions]\n # positions = np.array(positions)[:,atom_range[0]:atom_range[1]]\n \n \n # residues = traj.topology.residues\n\n # atoms = list(traj.topology.atoms) #[:num_atoms*nmol]\n \n # atoms = np.compress([not ('-W' in str(atom)) and not ('ION' in str(atom)) for atom in atoms], atoms, axis=0)\n \n\n positions = traj.xyz[:,atom_range[0]:atom_range[1]]\n\n\n return positions",
"def _get_nem_zones(self) -> tuple:\r\n\r\n # Extract nem zones from existing generators dataset\r\n zones = tuple(self.existing_units.loc[:, ('PARAMETERS', 'NEM_ZONE')].unique())\r\n\r\n # There should be 16 zones\r\n assert len(zones) == 16, 'Unexpected number of NEM zones'\r\n\r\n return zones",
"def read_pos(in_name):\n atoms = read_xyz(in_name)[-1]\n\n return atoms",
"def offset_to_shard(self):\n unravel_index = self.mesh.unravel_index()\n locations = [None] * self.mesh.size\n for offset, mesh_loc in unravel_index.items():\n loc = []\n for dim_sharding in self.sharding_specs:\n if dim_sharding == UNSHARDED:\n loc.append(0)\n else:\n loc.append(mesh_loc[dim_sharding])\n locations[offset] = tuple(loc)\n\n return locations",
"def get_indices_section(self):\n return np.unique(self.sv_map.volume_surf_coordinates['triangles'])",
"def actuator_coords(self):\n\n mask = np.ones((11, 11), np.bool)\n for i in range(0, 3):\n for j in range(3 - i):\n mask[i, j] = False\n mask = np.bitwise_and(mask, mask[::-1])\n mask = np.bitwise_and(mask, mask[:, ::-1])\n rs = np.stack(np.where(mask)).T - 5\n return rs",
"def get_nucleic(self):\n with open(self.filename) as pdb:\n atoms = [atom(line) for line in pdb if re.search\n ('(^ATOM)\\s*\\S*\\s*\\S*\\s*'\n '(DA5|DA3|DA|DT5|DT3|DT|DG5|DG3|DG|DC5|DC3|DC)', line)]\n return atoms",
"def get_residue_positions(array, indices):\n starts = get_residue_starts(array, add_exclusive_stop=True)\n return get_segment_positions(starts, indices)",
"def get_ctcf_indices(self):\n\n \"gets CTCF positions\"\n ctcf_ob = TFChip(cfg, chr)\n data = ctcf_ob.get_ctcf_data()\n data = data.filter(['start'], axis=1)\n\n \"converts to cumulative indices\"\n cum_pos = get_cumpos(self.cfg, self.chr)\n data[\"start\"] = data[\"start\"] + cum_pos\n indices = np.array(data[\"start\"])\n return indices",
"def dimension_positions(self):\n return [dim.position for dim in self]",
"def get_chemical_indices(self):\n return self.indices",
"def listof_positions(self):\n l = []\n for rnum in range(self.rnum_min, self.rnum_max+1):\n if rnum in self._pieces:\n l.append(rnum)\n return l",
"def get_indices(self, modes: str, *indices: int) -> Union[int, List[int]]:\n logger = logging.getLogger(__name__)\n output = []\n for mode, index in zip(reversed(modes), indices):\n\n logger.warning(\"Getting value %r: %d\", mode, index)\n if mode == \"0\":\n index = self[index]\n logger.warning(\" from position: %d\", index)\n elif mode == \"1\":\n pass\n elif mode == \"2\":\n index = self[index]+self.offset\n logger.warning(\" using relative base %d\", self.offset)\n logger.warning(\" from position: %d\", index)\n\n output.append(index)\n logger.warning(\" referencing value: %d\", self[index])\n\n if len(output) == 1:\n output = output[0]\n return output",
"def get_mass_indices(self, masses: Sequence[str]):\n order_dict = dict()\n for i, v in enumerate(self.channel_masses):\n order_dict.update({v: i})\n return [order_dict[m] for m in masses]",
"def number_of_atoms(dxyz):\n natms_pattern = maybe(WHITESPACES).join(\n [STRING_START, named_capturing(UNSIGNED_INTEGER, 'natms'), LINE_END])\n match = re.search(natms_pattern, dxyz, re.MULTILINE)\n assert match\n gdct = match.groupdict()\n natms = int(gdct['natms'])\n return natms",
"def get_pos(token, morph):\n return [c.name for c in pos if c.match(token, morph)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the root mean squares of the normal mode displacements. Use atom mass weights if ``reaction`` is given.
|
def get_rms_from_normal_mode_disp(normal_mode_disp: np.ndarray,
freqs: np.ndarray,
reaction: Optional['ARCReaction'] = None,
) -> List[float]:
mode_index = get_index_of_abs_largest_neg_freq(freqs)
nmd = normal_mode_disp[mode_index]
masses = reaction.get_element_mass() if reaction is not None else [1] * len(nmd)
rms = list()
for i, entry in enumerate(nmd):
rms.append(((entry[0] ** 2 + entry[1] ** 2 + entry[2] ** 2) ** 0.5) * masses[i] ** 0.55)
return rms
|
[
"def rms(x):\n import numpy as np\n return np.sqrt(np.mean(x*x))",
"def compute_norm_mode(mode, molecule):\n norm = 0.0\n for iatom, displacement in enumerate(mode.displacements):\n for xyz in displacement:\n norm += xyz**2 * molecule.masses[iatom]/U_TO_AMU\n return np.sqrt(norm)",
"def mean(self):\n nelem = 0\n sum = 0.\n for win in self._data:\n nelem += win.size\n sum += win.sum()\n return sum / float(nelem)",
"def _mean(self):\n mat = self._factorize(self.matrix, self.xdef)\n mat = self._rdc_x(mat, self.xdef)\n ysects = self._by_ysect(mat, self.ydef)\n return np.expand_dims([np.nansum(ymat[:, 0] /\n np.nansum(ymat[:, -1]))\n for ymat in ysects], 1).T",
"def calc_mean_std(self):\n\n # get ob_next sets from memory\n memory_len = len(self._memory)\n all_obs_next = []\n col_len = len(self._memory[memory_len - 1].obs_nex)\n \n for i in range(memory_len):\n all_obs_next.append(self._memory[i].obs_nex)\n \n # cacualte average and standard diviation for each features \n return (np.mean(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1), \n np.std(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1))",
"def compute_mean_norms(self,mean_norms):\n res = []\n for mean_norm in mean_norms:\n a = mean_norm.values()\n res.append(np.dot(a,a))\n return res",
"def eval_rmsre(dataset,xc,psp):\n rmsre = 0.\n N = 0\n for mol in build_computed_mol_list(dataset):\n sre = eval_sre_molecule(dataset,mol,xc,psp)\n if not (sre is None):\n rmsre+=sre\n N+=1\n if N>0 : rmsre = rmsre/N\n return np.sqrt(rmsre)",
"def flatness(self):\n asArray = self.view(numpy.ndarray)\n geometricMean = scipy.stats.mstats.gmean(abs(asArray), axis=0)\n arithmeticMean = self.mean()\n\n return geometricMean / arithmeticMean",
"def find_norm_mean():\n all_attrs = attrs.index.values\n count = .0\n norm_sum = .0\n\n for a in all_attrs:\n new_norm = np.linalg.norm(attrs.loc[a].as_matrix())\n norm_sum += new_norm\n count += 1\n norm_sum /= count\n return norm_sum",
"def mean_euclidean_distance(self):\n rval = sum([n.polar.rho for n in self._nodes]) / len(self._nodes)\n return rval",
"def averagemass(self):\n if self._averagemass is None:\n self.loadextendedcompoundinfo()\n return self._averagemass",
"def normal_modes(masses, mass_weighted_normal_modes):\n list_m = []\n normal_modes = np.zeros_like(mass_weighted_normal_modes)\n nions = np.size(masses)\n for a in range(nions):\n x = 1.0 / math.sqrt(masses[a])\n atom = [x, x, x]\n list_m.append(atom)\n # end of loop of ions\n array_m = np.array(list_m)\n for index, mode in enumerate(mass_weighted_normal_modes):\n normal_modes[index] = mode * array_m\n return normal_modes",
"def meanSolar():",
"def _normal_scores_average(self):\n average_scores = npi.group_by(self.normal_score_matrix[:, 0], self.normal_score_matrix[:, 3], np.mean)\n\n return average_scores",
"def oscillator_strengths(normal_modes, born_charges):\n # Each mode has a 3x3 oscillator strength\n nmodes = np.size(normal_modes, 0)\n oscillator_strengths = np.zeros((nmodes, 3, 3))\n for imode, mode in enumerate(normal_modes):\n # We calculate the dipole induced by displacement of each atom along the normal mode\n z_imode = np.zeros(3)\n for atom, born in enumerate(born_charges):\n # atom is the atom index\n # born contains the polarisability tensor [a1x a1y a1z] [a2x a2y a2z] [a3x a3y a3z]]\n # where 1, 2, 3 are the directions of the field and x, y, z are the coordinates of the atom\n z_imode = z_imode + np.dot(born, mode[atom]) # the displacement is an array [x, y, z]\n # end for\n # The oscillator strength matrix is the outer product of z\n oscillator_strengths[imode] = np.outer(z_imode, z_imode)\n # end for\n return oscillator_strengths",
"def norm(self) -> float:\n return np.sqrt(self.inner_product(self).real)",
"def get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms: List[float],\n ts_guesses: List['TSGuess'],\n reaction: Optional['ARCReaction'] = None,\n ) -> int:\n num_of_atoms = reaction.get_number_of_atoms_in_reaction_zone() if reaction is not None else None\n if num_of_atoms is not None:\n return num_of_atoms\n families = list(set([tsg.family for tsg in ts_guesses]))\n num_of_atoms = max([get_rxn_normal_mode_disp_atom_number(rxn_family=family,\n reaction=reaction,\n rms_list=normal_mode_disp_rms,\n )\n for family in families])\n return num_of_atoms",
"def get_mean_errors(self):\n return self.equation_nodes[\"mean\"]",
"def _get_mean_simu_moments(self):\n moment_sim_list = []\n for ix in range(self.X_t.shape[1]):\n moment_sim = self._get_simu_moments_once(self.X_t[:, ix], self.U_t[:, ix], self.V_t[:, ix], self.rspread_t[:, ix]) \n moment_sim_list.append(moment_sim) \n\n # Compute mean of moments over all simulations\n mean_moments = np.array(moment_sim_list).mean(axis=0)\n return mean_moments"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the index of the |largest| negative frequency.
|
def get_index_of_abs_largest_neg_freq(freqs: np.ndarray) -> Optional[int]:
if not len(freqs) or all(freq > 0 for freq in freqs):
return None
return list(freqs).index(min(freqs))
|
[
"def argmax(array: list) -> int:\n index, value = max(enumerate(array), key=lambda x: x[1])\n return index",
"def indexOfMax(list):\r\n max = -np.Infinity\r\n index = 0\r\n i = 0\r\n for value in list:\r\n if value > max:\r\n max = value\r\n index = i\r\n i += 1\r\n return index",
"def max_frequency(self):\n max = 0\n for term, frequency in self.vocabulary.items() :\n if frequency > max :\n max = frequency\n return max",
"def find_largest(in_array, n_largest):\n new_arr = np.copy(in_array)\n shape = np.shape(new_arr)\n indices = []\n while len(indices) < n_largest:\n flat_index = np.argmax(new_arr)\n folded_index = np.unravel_index(flat_index,shape)\n indices.append(folded_index)\n new_arr[folded_index] = 0\n return indices",
"def max_confidence_index(self) -> int:\n return self.probabilities.index(self.max_confidence)",
"def most_freq(seq):\n return Counter(seq).most_common(1)[0][0]",
"def find_longest_negative_sequence(arr):\n sequences = []\n sequence = 0\n indexes = []\n first_index = None\n last_index = None\n for i, element in enumerate(arr): \n# if element < 0 and i > limit:\n if element < 0:\n if first_index == None: first_index = i\n sequence += 1\n elif sequence > 0:\n last_index = i\n sequences.append(sequence)\n indexes.append((first_index, last_index))\n sequence = 0\n first_index = None\n \n max_sequence = max(sequences)\n max_index = sequences.index(max_sequence)\n return indexes[max_index]",
"def findTheHighestWithWorkload(self):\n hidx = -1\n # Find first row with non-zero entry at 0 = remaining workload\n for i in range(self.n):\n if self.statusTable[i][0] != 0:\n hidx = i\n break\n return hidx",
"def argmax_of_array(array):\n\n # Simple but does not support random selection in the case of more than one largest values. \n ind = int(np.argmax(array))\n \n return ind",
"def best_idx(self):\n target = self.data_matrix[-1]\n best_idx = -1\n diff_val = 999999999\n\n # iterate over all but the last data column\n for i in range(len(self.data_matrix)-1):\n col = self.data_matrix[i]\n diff = self._diff(col, target)\n\n if diff < diff_val:\n diff_val = diff\n best_idx = i\n\n return best_idx",
"def largest_int(number_list):\n is_largest = []\n for item in number_list:\n is_largest = sorted(number_list, reverse=True)\n #apologies if this is too off-book, had a hard time\n #remembering how to do a reverse sort and looked it up.\n try:\n return is_largest[0]\n except IndexError:\n return None\n #returns index 0 of a reverse-numerically-sorted list",
"def mode(self):\n\n values = self.frequencies\n highest_frequency = max(values.values())\n if len([v for v in values if values[v] == highest_frequency]) == 1:\n return values.most_common()[0][0]",
"def argmax(l):\n return max(enumerate(l), key=lambda x: x[1])[0]",
"def get_index_of_largest_feature_map(self) -> int:\n return int(np.argmin(self.strides))",
"def largest_element(arr: List[int]) -> int:\n return(max(arr))",
"def largestIndices(array, n):\n # flatten the array\n flatArray = array.flatten()\n indices = np.argpartition(flatArray, -n)[-n:]\n indices = indices[np.argsort(-flatArray[indices])]\n return np.unravel_index(indices, array.shape)",
"def max_raw_frequency(terms):\n max = 0\n for term, frequency in terms:\n if frequency > max:\n max = frequency\n\n return max",
"def most_similar_image():\n most_similar_index = -1\n return most_similar_index",
"def largest_indices(x, K):\n indices = jnp.argsort(jnp.abs(x))\n return indices[:-K-1:-1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the number of atoms that are expected to have the largest normal mode displacement for the TS (considering all families). This is a wrapper for ``get_rxn_normal_mode_disp_atom_number()``. It is theoretically possible that TSGuesses of the same species will belong to different families.
|
def get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms: List[float],
ts_guesses: List['TSGuess'],
reaction: Optional['ARCReaction'] = None,
) -> int:
num_of_atoms = reaction.get_number_of_atoms_in_reaction_zone() if reaction is not None else None
if num_of_atoms is not None:
return num_of_atoms
families = list(set([tsg.family for tsg in ts_guesses]))
num_of_atoms = max([get_rxn_normal_mode_disp_atom_number(rxn_family=family,
reaction=reaction,
rms_list=normal_mode_disp_rms,
)
for family in families])
return num_of_atoms
|
[
"def get_rxn_normal_mode_disp_atom_number(rxn_family: Optional[str] = None,\n reaction: Optional['ARCReaction'] = None,\n rms_list: Optional[List[float]] = None,\n ) -> int:\n default = 3\n if rms_list is not None \\\n and (not isinstance(rms_list, list) or not all(isinstance(entry, float) for entry in rms_list)):\n raise TypeError(f'rms_list must be a non empty list, got {rms_list} of type {type(rms_list)}.')\n family = rxn_family\n if family is None and reaction is not None and reaction.family is not None:\n family = reaction.family.label\n if family is None:\n logger.warning(f'Cannot deduce a reaction family for {reaction}, assuming {default} atoms in the reaction zone.')\n return default\n content = read_yaml_file(os.path.join(ARC_PATH, 'data', 'rxn_normal_mode_disp.yml'))\n number_by_family = content.get(rxn_family, default)\n if rms_list is None or not len(rms_list):\n return number_by_family\n entry = None\n rms_list = rms_list.copy()\n for i in range(number_by_family):\n entry = max(rms_list)\n rms_list.pop(rms_list.index(entry))\n if entry is not None:\n for rms in rms_list:\n if (entry - rms) / entry < 0.12:\n number_by_family += 1\n return number_by_family",
"def get_max_mag_observed(self):\r\n\t\tmag_bin_centers = self.get_magnitude_bin_centers()\r\n\t\tMmax = mag_bin_centers[np.array(self.occurrence_rates) > 0][-1]\r\n\t\treturn Mmax",
"def getLargestNodeNumber():\n arcpy.env.workspace = WORKING_GDB\n arcpy.env.overwriteOutput = True\n arcpy.Statistics_analysis(SOURCE_NETWORK_SHAPEFILE,'node_number_stats',[['A','MAX'],['B','MAX']],'')\n rows = arcpy.SearchCursor('node_number_stats')\n for row in rows: #only one row\n max_node = max(row.MAX_A,row.MAX_B)\n return max_node",
"def get_largest_ring_size(self, molecule):\n cycle_list = molecule.GetRingInfo().AtomRings()\n if cycle_list:\n cycle_length = max([len(j) for j in cycle_list])\n else:\n cycle_length = 0\n return cycle_length",
"def getHighestOccupancyLocation(self, anAtom):\n occupancy = anAtom.occupancy\n location = anAtom.altLoc\n for atom in self.atoms.values():\n if anAtom.model == atom.model:\n if anAtom.resSeq == atom.resSeq:\n if atom.altLoc != anAtom.altLoc:\n if atom.altLoc != ' ' and atom.occupancy > anAtom.occupancy:\n occupancy = atom.occupancy\n location = atom.altLoc\n return location",
"def max_nosso_numero(self):\n return self._max_nosso_numero",
"def RandomMaxMcGroupCount(self):\n return self._RandomMaxMcGroupCount",
"def max() -> int:\n return max(*[x.value for x in IntentValue._member_map_.values()])",
"def get_max_distance_residue(self, residue):\n row = self.index_map[residue]\n maxdist_index = Numeric.argmax(self.dist_matrix[row])\n maxdist_residue = self.reverse_index_map[maxdist_index]\n return maxdist_residue",
"def mode(self):\n\n values = self.frequencies\n highest_frequency = max(values.values())\n if len([v for v in values if values[v] == highest_frequency]) == 1:\n return values.most_common()[0][0]",
"def _get_max_item_count(self):\n return 7 - len(self.constants)",
"def get_max_distance(self) -> int:\n return max(self.graph.get_node_distances(self.origin), key=itemgetter(0))[0]",
"def most_repeated(self) -> xr.DataArray:\n if not self._is_memoized('_most_repeated'):\n self._most_repeated = ss.mode(self._ds.values.flatten())[0][0]\n return self._most_repeated",
"def get_mass_max(self):\n return self._mass_max",
"def compute_atom_count(self):\n self.frame['atom_count'] = self.atom.cardinal_groupby().size()",
"def get_most(self, game, app):\n dots = {}\n kinds = app.get_dot_kinds()\n for kind in kinds:\n dots[kind] = []\n for pos in app.get_grid():\n if game.grid[pos].get_dot().get_kind() == kind:\n dots[kind].append(pos)\n most_kind, most_value = max(dots.items(), key = lambda x: len(set(x[1])))\n return most_kind",
"def max(self):\n return __class__.MAX_POS",
"def calculate_max_seq_num(self):\n return ceildiv(\n len(self._output),\n WORDS_PER_FULL_PACKET_WITH_SEQUENCE_NUM * BYTES_PER_WORD)",
"def _getMaxCellIds(self):\n maxCellId = 0\n for node in self.grid.iter():\n maxLocalCellId = max(node.get('grid').returnParameter('cellIDs').keys())\n maxCellId = maxLocalCellId if maxLocalCellId > maxCellId else maxCellId\n\n return maxCellId"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the number of atoms expected to have the largest normal mode displacement per family. If ``rms_list`` is given, also include atoms with an RMS value close to the lowest RMS still considered.
|
def get_rxn_normal_mode_disp_atom_number(rxn_family: Optional[str] = None,
reaction: Optional['ARCReaction'] = None,
rms_list: Optional[List[float]] = None,
) -> int:
default = 3
if rms_list is not None \
and (not isinstance(rms_list, list) or not all(isinstance(entry, float) for entry in rms_list)):
raise TypeError(f'rms_list must be a non empty list, got {rms_list} of type {type(rms_list)}.')
family = rxn_family
if family is None and reaction is not None and reaction.family is not None:
family = reaction.family.label
if family is None:
logger.warning(f'Cannot deduce a reaction family for {reaction}, assuming {default} atoms in the reaction zone.')
return default
content = read_yaml_file(os.path.join(ARC_PATH, 'data', 'rxn_normal_mode_disp.yml'))
number_by_family = content.get(rxn_family, default)
if rms_list is None or not len(rms_list):
return number_by_family
entry = None
rms_list = rms_list.copy()
for i in range(number_by_family):
entry = max(rms_list)
rms_list.pop(rms_list.index(entry))
if entry is not None:
for rms in rms_list:
if (entry - rms) / entry < 0.12:
number_by_family += 1
return number_by_family
|
[
"def get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms: List[float],\n ts_guesses: List['TSGuess'],\n reaction: Optional['ARCReaction'] = None,\n ) -> int:\n num_of_atoms = reaction.get_number_of_atoms_in_reaction_zone() if reaction is not None else None\n if num_of_atoms is not None:\n return num_of_atoms\n families = list(set([tsg.family for tsg in ts_guesses]))\n num_of_atoms = max([get_rxn_normal_mode_disp_atom_number(rxn_family=family,\n reaction=reaction,\n rms_list=normal_mode_disp_rms,\n )\n for family in families])\n return num_of_atoms",
"def get_largest_ring_size(self, molecule):\n cycle_list = molecule.GetRingInfo().AtomRings()\n if cycle_list:\n cycle_length = max([len(j) for j in cycle_list])\n else:\n cycle_length = 0\n return cycle_length",
"def mue_finder(fz_list, force_list):\n max_force = (max(min(force_list), max(force_list), key=abs))\n mue = max_force / fz_list[force_list.index(max_force)]\n return abs(mue)",
"def get_max_mag_observed(self):\r\n\t\tmag_bin_centers = self.get_magnitude_bin_centers()\r\n\t\tMmax = mag_bin_centers[np.array(self.occurrence_rates) > 0][-1]\r\n\t\treturn Mmax",
"def get_mean_size(smiles_list):\n size = []\n for smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n num_atoms = mol.GetNumAtoms()\n size.append(num_atoms)\n\n return np.mean(size), np.std(size)",
"def findMaxGram(query_list):\n if len(query_list) > 10:\n max_gram = 10\n else:\n max_gram = len(query_list)\n return max_gram",
"def get_rms_from_normal_mode_disp(normal_mode_disp: np.ndarray,\n freqs: np.ndarray,\n reaction: Optional['ARCReaction'] = None,\n ) -> List[float]:\n mode_index = get_index_of_abs_largest_neg_freq(freqs)\n nmd = normal_mode_disp[mode_index]\n masses = reaction.get_element_mass() if reaction is not None else [1] * len(nmd)\n rms = list()\n for i, entry in enumerate(nmd):\n rms.append(((entry[0] ** 2 + entry[1] ** 2 + entry[2] ** 2) ** 0.5) * masses[i] ** 0.55)\n return rms",
"def max_fold_enrichment (self):\n peaks = self.peaks\n chrs = peaks.keys()\n chrs.sort()\n x = 0\n for chrom in chrs:\n if peaks[chrom]:\n m = max([i[7] for i in peaks[chrom]])\n if m>x:\n x=m\n return x",
"def calc_spclmbs_max_dist(self):\n # For brevity, \"dimensionless\" prefix omitted from \"position\" and \"motive\" variable names.\n # I am using the suffix \"vr\" to denote \"Virtual cRitical point.\"\n co_motive_vr = self[\"Collector\"][\"nea\"]/ \\\n (physical_constants[\"boltzmann\"] * self[\"Emitter\"][\"temp\"])\n co_position_vr = self[\"motive_data\"][\"dps\"].get_position(co_motive_vr,branch=\"rhs\")\n \n spclbs_max_dist = (co_position_vr * self[\"Emitter\"][\"temp\"]**(3./4)) / \\\n (self[\"Emitter\"].calc_saturation_current_density()**(1./2)) * \\\n ((physical_constants[\"permittivity0\"]**2 * physical_constants[\"boltzmann\"]**3)/ \\\n (2*np.pi*physical_constants[\"electron_mass\"]*physical_constants[\"electron_charge\"]**2))**(1./4)\n \n return spclbs_max_dist",
"def max_spells_cast(self):\r\n return self.data.mostSpellsCast",
"def _get_res_max_z(res: ParmedRes) -> float:\n return max((a.xz for a in res.atoms))",
"def get_max_score(friend_score_list: List[Tuple[str, int]]) -> int:\n\n max = 0\n for t in friend_score_list:\n if t[1] > max:\n max = t[1]\n return max",
"def find_max_persistence(self, persistence_list):\n max_per = 0\n for obj in persistence_list:\n if obj.persistence_count > max_per:\n max_per = obj.persistence_count\n return max_per",
"def max_items_by_mass(self):\n\n if not self.trip_data:\n return np.NaN\n\n i = np.argmax(self.cargo_mass_list)\n return self.trip_data[i].items",
"def max_ranks_per_node(rank_spec):",
"def maxDistEMD_test():\n\n swcList1 = [\"tests/testFiles/toy2.swc\",\n \"tests/testFiles/toy3.swc\"]\n emd_val = maxDistEMD(swcList1)\n assert np.allclose(emd_val, 35.99999099999999)",
"def eval_rmsre(dataset,xc,psp):\n rmsre = 0.\n N = 0\n for mol in build_computed_mol_list(dataset):\n sre = eval_sre_molecule(dataset,mol,xc,psp)\n if not (sre is None):\n rmsre+=sre\n N+=1\n if N>0 : rmsre = rmsre/N\n return np.sqrt(rmsre)",
"def score(motifs):\n columns = [''.join(seq) for seq in zip(*motifs)]\n max_count = sum([max([c.count(nucleotide) for nucleotide in 'ACGT']) for c in columns])\n\n return len(motifs[0])*len(motifs) - max_count",
"def RandomMaxMcGroupCount(self):\n return self._RandomMaxMcGroupCount"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check whether two lists of bonds are equal.
|
def _check_equal_bonds_list(bonds_1: List[Tuple[int, int]],
bonds_2: List[Tuple[int, int]],
) -> bool:
if len(bonds_1) != len(bonds_2):
return False
if all(bond in bonds_2 for bond in bonds_1):
return True
return False
|
[
"def test_equivalent(self):\n for order1 in self.orderList:\n for order2 in self.orderList:\n bond1 = Bond(None, None, order=order1)\n bond2 = Bond(None, None, order=order2)\n if order1 == order2:\n self.assertTrue(bond1.equivalent(bond2))\n self.assertTrue(bond2.equivalent(bond1))\n else:\n self.assertFalse(bond1.equivalent(bond2))\n self.assertFalse(bond2.equivalent(bond1))",
"def has_same_bonds(self, other_atoms):\n if len(self) != len(other_atoms):\n return False\n if len(self.bonds) != len(other_atoms.bonds):\n return False\n for (i, atom) in enumerate(self.atoms):\n other = other_atoms.atoms[i]\n # print(\"{}={}\".format(i, atom.index))\n atom_neighbors = {n.index for n in atom.neighbors}\n other_neighbors = {n.index for n in other.neighbors}\n # print(atom_neighbors, other_neighbors)\n if atom_neighbors == other_neighbors:\n continue\n else:\n return False\n return True",
"def equal_lists(list_a: Union[list, np.ndarray], list_b: Union[list, np.ndarray]):\n return len(list_a) == len(list_b) and all(\n a == b or np.isclose(a, b, equal_nan=True) for a, b in zip(list_a, list_b)\n )",
"def same_bond_topology(bt1, bt2):\n natoms = len(bt1.atom)\n if len(bt2.atom) != natoms:\n return False\n nbonds = len(bt1.bond)\n\n if len(bt2.bond) != nbonds:\n return False\n for i, t1 in enumerate(bt1.atom):\n if t1 != bt2.atom[i]:\n return False\n for i, b1 in enumerate(bt1.bond):\n b2 = bt2.bond[i]\n if b1.atom_a != b2.atom_a:\n return False\n if b1.atom_b != b2.atom_b:\n return False\n if b1.bond_type != b2.bond_type:\n return False\n return True",
"def are_equivalent_pyxb(a_pyxb, b_pyxb):\n return are_equivalent(\n serialize_for_transport(a_pyxb), serialize_for_transport(b_pyxb)\n )",
"def __identical_list(list1: list, list2: list) -> bool:\n\n return (len(set(list1) - set(list2)) == 0 and len(set(list2) - set(list1)) == 0)",
"def test_equivalent(self):\n for index1, element1 in enumerate(element_list[0:10]):\n for index2, element2 in enumerate(element_list[0:10]):\n atom1 = Atom(element=element1, radical_electrons=1, charge=0, label='*1', lone_pairs=0)\n atom2 = Atom(element=element2, radical_electrons=1, charge=0, label='*1', lone_pairs=0)\n if index1 == index2:\n self.assertTrue(atom1.equivalent(atom2))\n self.assertTrue(atom2.equivalent(atom1))\n else:\n self.assertFalse(atom1.equivalent(atom2))\n self.assertFalse(atom2.equivalent(atom1))",
"def _eq__list(self, other):\n\n if self._list_length() != other._list_length():\n return False\n\n elems1, tail1 = self._list_decompose()\n elems2, tail2 = other._list_decompose()\n\n if tail1 != tail2:\n return False\n else:\n for e1, e2 in zip(elems1, elems2):\n if e1 != e2:\n return False\n return True",
"def has_same_info(B, A):\n # This is basically the same as \"less specific\", but easier to reason with\n n_values_A = sum((0 if c.is_empty else 1 for c in A))\n n_values_B = sum((0 if c.is_empty else 1 for c in B))\n if n_values_A > n_values_B:\n return False\n\n for ca, cb in zip(A, B):\n if ca.is_empty:\n continue\n if not ca == cb:\n return False\n return True",
"def test_conserve(symbs_dict):\n return all(\n symbs_dict[i] == symbs_dict[j]\n for i, j in zip(ins, outs)\n )",
"def tibbles_are_equivalent(A, B):\n \n A_copy = A.copy()\n B_copy = B.copy()\n \n Atib = canonicalize_tibble(A_copy)\n Btib = canonicalize_tibble(B_copy)\n \n \n return Atib.equals(Btib)",
"def two_list_should_be_same(list1, list2): \n for i in range (0, len(list1)):\n if (list1[i] != '' and list1[i] != []):\n if list1[i] != list2[i]:\n print 'list1[i]',list1[i],'!=','list2[i]',list2[i]\n return 'success'",
"def _check_all_close(tensors1, tensors2) -> bool:\n assert len(tensors1) == len(tensors2), \\\n 'Must pass in same number of tensors to check if they are equal'\n equal = True\n for t1, t2 in zip(tensors1, tensors2):\n equal = equal and torch.allclose(t1, t2)\n return equal",
"def members_are_equal(cls, point_1, point_2):\n return point_1 == point_2",
"def bonds_compatible(self, mapping):\n # Get the bonds to the already mapped graph\n sub_atom_bonds = set(self.sub.atoms[mapping[0]].bonds).intersection(self.sub_atoms_mapped)\n master_atom_bonds = set(self.master.atoms[mapping[1]].bonds).intersection(self.master_atoms_mapped)\n # Convert the sub atoms to master atoms\n master_atom_bonds_from_sub = set(self.sub_to_master(atom) for atom in sub_atom_bonds)\n\n return master_atom_bonds == master_atom_bonds_from_sub",
"def test_eq_two_lists_same() -> None:\n ll1 = setup_linked_list([108, 148, 165])\n ll2 = setup_linked_list([108, 148, 165])\n\n assert ll1 == ll2",
"def test_equality(self):\n self.assertEqual(self.bond1, self.bond1)\n self.assertNotEqual(self.bond1, self.bond2)\n self.assertNotEqual(self.bond1, self.bond3)\n self.assertNotEqual(self.bond1, self.bond4)",
"def __eq__(self, other):\n return self._pairs == other._pairs",
"def are_equal_pyxb(a_pyxb, b_pyxb):\n return are_equal_xml(a_pyxb.toxml(\"utf-8\"), b_pyxb.toxml(\"utf-8\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that the number of imaginary frequencies make sense. Theoretically, a TS should only have one "large" imaginary frequency, however additional imaginary frequency are allowed if they are very small in magnitude. This method does not consider the normal mode displacement check.
|
def check_imaginary_frequencies(imaginary_freqs: Optional[List[float]]) -> bool:
if imaginary_freqs is None:
# Freqs haven't been calculated for this TS guess, do consider it as an optional candidate.
return True
if len(imaginary_freqs) == 0:
return False
if len(imaginary_freqs) == 1 \
and LOWEST_MAJOR_TS_FREQ < abs(imaginary_freqs[0]) < HIGHEST_MAJOR_TS_FREQ:
return True
else:
return len([im_freq for im_freq in imaginary_freqs if LOWEST_MAJOR_TS_FREQ < abs(im_freq) < HIGHEST_MAJOR_TS_FREQ]) == 1
|
[
"def is_real(self):\n return self.degrees().count(2) == self.number_of_irreducible_components()",
"def has_complex_result(self):\n tfq_sup = self._model.metadata.time_freq_support\n if not tfq_sup:\n return False\n if tfq_sup.complex_frequencies == None:\n return False\n return True",
"def _is_complex(input):\n return input.shape[-1] == 2",
"def test_isSignificantlyComplex(self):\n r = Rates([0,0,0.2j,0,0,0,0,0,0], self.abc_pairs)\n assert r.isSignificantlyComplex()\n assert r.isSignificantlyComplex(0.01)\n assert not r.isSignificantlyComplex(0.2)\n assert not r.isSignificantlyComplex(0.3)\n \n r = Rates([0,0,0.1,0,0,0,0,0,0], self.abc_pairs)\n assert not r.isSignificantlyComplex()\n assert not r.isSignificantlyComplex(1e-30)\n assert not r.isSignificantlyComplex(1e3)",
"def _is_complex(data):\n return (NUMPY and numpy.iscomplex(data).any()) or (isinstance(data, complex))",
"def iterate_fake_complex(c_real, c_imag, max_iters):\n real = 0.0\n imag = 0.0\n for step in range(max_iters):\n real2 = real * real\n imag2 = imag * imag\n if real2 + imag2 > 4.0:\n break\n imag = 2 * real * imag + c_imag\n real = real2 - imag2 + c_real\n return step",
"def _check_for_anisotropy(self):\n types = [type(self.sigma_T), type(self.sigma_S), type(self.sigma_G)]\n\n if (list in types) or (np.ndarray in types):\n self.is_anisotropic = True\n\n if type(self.sigma_G) in [list, np.ndarray]:\n if len(self.sigma_G) != 3:\n raise ValueError(\"Conductivity vector but not with size 3\")\n self.sigma_G = np.array(self.sigma_G)\n else:\n self.sigma_G = np.array([self.sigma_G, self.sigma_G, self.sigma_G])\n if type(self.sigma_T) in [list, np.ndarray]:\n if len(self.sigma_T) != 3:\n raise ValueError(\"Conductivity vector but not with size 3\")\n self.sigma_T = np.array(self.sigma_T)\n else:\n self.sigma_T = np.array([self.sigma_T, self.sigma_T, self.sigma_T])\n\n if type(self.sigma_S) in [list, np.ndarray]:\n if len(self.sigma_S) != 3:\n raise ValueError(\"Conductivity vector but not with size 3\")\n self.sigma_S = np.array(self.sigma_S)\n else:\n self.sigma_S = np.array([self.sigma_S, self.sigma_S, self.sigma_S])\n \n self._anisotropic_saline_scaling()\n if (self.sigma_G[0] == self.sigma_G[1] == self.sigma_G[2]) and \\\n (self.sigma_T[0] == self.sigma_T[1] == self.sigma_T[2]) and \\\n (self.sigma_S[0] == self.sigma_S[1] == self.sigma_S[2]):\n print \"Isotropic conductivities can be given as scalars.\" \n else:\n self.is_anisotropic = False",
"def is_real_num(X):\n def each_elem_true(x):\n try:\n float(x)\n return not (np.isnan(x) or np.isinf(x))\n except:\n return False\n f = np.vectorize(each_elem_true)\n return f(X)",
"def iscomplex(self):\n return np.any(np.iscomplex(self.data))\n # return np.iscomplexobj(self._data)",
"def realityCheck(startPhi, startPi, startEnergy, endPhi, endPi, endEnergy):\n if np.max(np.imag(endPhi)/np.real(endPhi)) > 1e-15:\n raise ConsistencyCheckFailure(\"phi is not real\")\n if np.max(np.imag(endPi)/np.real(endPi)) > 1e-15:\n raise ConsistencyCheckFailure(\"pi is not real\")",
"def _validate_exposure(self, average_counts):\n return abs(average_counts - self._target_counts) <= self._counts_tolerance",
"def test_freq_ir_warns_negative_values(self):\n magnitudes = -torch.ones((1, 30), device=self.device, dtype=self.dtype)\n with self.assertWarnsRegex(UserWarning, \"^.+should not contain negative values.$\"):\n F.frequency_impulse_response(magnitudes)",
"def isreal(self):\n return np.all(np.isreal(self.data))\n # return np.isrealobj(self._data)",
"def s_magnitude(self, img):\n thresh = self.s_magnitude_thresh\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n s_channel = hls[:, :, 2]\n magnitude_binary = np.zeros_like(s_channel)\n magnitude_binary[\n (s_channel >= thresh[0]) & (s_channel <= thresh[1])] = 1\n return magnitude_binary",
"def is_spectrum(self):\n count = 0\n for key in self._get_first_header():\n if key.lower().startswith(\"mca\"):\n count += 1\n if count >= 2:\n return True\n return False",
"def check_ionfrac_lt_1(self, mode):\n return self['ion_frac'][key][:]<=1.0",
"def __validate(self):\n return all(isinstance(x, int) for x in self.spectrum)",
"def test_61_spectral_index_probable_errors_filtering():\n\tcasalog.origin(\"test_61_spectral_index_probable_errors_filtering\")\n\tcasalog.post(\"starting\")\n\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.alpha.error', \n\t 'imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM1>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.alpha.error.filtered')",
"def complex_frequencies(self):\n return self._get_frequencies(cplx=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Boolean. Checks if custom dates are set or the desired spend on the account
|
def has_custom_dates(self):
# return self.desired_spend_start_date != None and self.desired_spend_end_date != None
return False # Temporarily disabling this feature
|
[
"def requirements(self):\n if datetime.datetime.today().day == 15:\n return True\n return False",
"def checkDateOptions(options: Dict, timeStart: datetime, timeStop: datetime) -> bool:\n # now check the user provided dates\n if options[\"start\"] and options[\"start\"] > timeStop:\n # this data has nothing to contribute in the optional date range\n return False\n if options[\"stop\"] and options[\"stop\"] < timeStart:\n # this data has nothing to contribute in the optional date range\n return False\n return True",
"def _compute_is_rented(self):\n for rec in self:\n contract = self.env['gc.contract'] \\\n .search([('apartment.id', '=', rec.id)])\n start_date = fields.Date.from_string(contract.start_date) \\\n or False\n end_date = fields.Date.from_string(contract.end_date) \\\n or False\n current_date = fields.Date.from_string(fields.Date.today())\n available = False\n if start_date:\n if end_date:\n if start_date <= current_date <= end_date:\n available = True\n else:\n if current_date >= start_date:\n available = True\n if available:\n rec.rented = True\n else:\n rec.rented = False",
"def tax_relevant(self):\n return False",
"def interest_time(self):\n return ((self._current_date.day == self._interest_paydate['day']) or\n (self._caccount > 0))\n #return (((self._current_date.day == self._interest_paydate['day']) and\n # (self._current_date.month == self._interest_paydate['month'])) or\n # (self._caccount > 0))",
"def eliagable_for_increment(self, date:date):\n if self.is_starting_salary() and self.date.month > 7 and date.year <= self.date.year+1:\n return False\n else:\n return True",
"def __contains__(self, date):\n return self._first_day <= date <= self._last_day",
"def has_paid_for_current_month(self, current_date=datetime.date.today()):\n if self.paid_until is None:\n return False\n\n return current_date < self.paid_until",
"def test_is_payday_negative2(self):\n date_to_check = date_class(2019,11,22)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == False\n\n date_to_check = date_class(2020,12,18)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == False",
"def can_charge(customer):\n if customer.date_purged is not None:\n return False\n if customer.default_source:\n return True\n return False",
"def _check_date(self, cr, uid, ids):\n for deleg in self.browse(cr, uid, ids):\n if deleg.dismissal_date <= deleg.employee_id.first_employement_date:\n return False\n return True",
"def credit(self, account):\n #stefan\n if self.account >= \"500\": # initialize self.account\n return True\n else:\n return False",
"def has_paid_for_current_month(self, current_date=datetime.date.today()):\n\n return current_date < self.paid_until",
"def is_future_effective(self) -> bool:\n with suppress(AttributeError, TypeError):\n if self._storage.effective_date > self._storage.payment_completion_date:\n return True\n return False",
"def should_deliver_report(args, reporting_config):\n valid_data_type = reporting_config['data_type'] in (args.data_type or DATA_TYPES)\n enterprise_customer_specified = bool(args.enterprise_customer)\n meets_schedule_requirement = is_current_time_in_schedule(\n reporting_config['frequency'],\n reporting_config['hour_of_day'],\n reporting_config['day_of_month'],\n reporting_config['day_of_week']\n )\n return reporting_config['active'] and \\\n valid_data_type and \\\n (enterprise_customer_specified or meets_schedule_requirement)",
"def check_for_additional_transaction(\n self, month: int,\n year: int, amount: int\n ):\n data = self.check_for_transactions(month, year)\n current_amount = 0\n for item in data:\n current_amount += item['amount']\n return self.amount - current_amount >= amount",
"def _positive(self, cr, uid, ids, context=None):\n for fact in self.browse(cr, uid, ids, context=context):\n if fact.week_factor<0 or fact.holiday_factor<0 or fact.maximum<0 :\n return False\n return True",
"def test_is_payday_negative1(self):\n date_to_check = date_class(2020,12,25)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == False",
"def expiration_day_occurred() -> bool:\n different_ticker = self._specific_ticker != self._future_ticker.ticker\n\n if last_date_in_chain in prices_df.index:\n different_prices = not self._chain[fields_list].loc[last_date_in_chain].equals(\n prices_df[fields_list].loc[last_date_in_chain])\n else:\n different_prices = True\n\n return different_ticker or different_prices",
"def condition(self):\n return self.order.paid is not None and \\\n self._is_created_recently() and \\\n self.order.record is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.