query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Handles incoming event by invoking a specific action according to a request type.
def __handle(event, context) -> Tuple[Optional[Dict[Any, Any]], Optional[str]]: serialized_event = json.dumps(event, default=lambda o: "<not serializable>") logger.info(f"Got new request. Event: {serialized_event}.") action = Action(event) if event["RequestType"] == "Create": return action.create() if event["RequestType"] == "Update": return action.update() if event["RequestType"] == "Delete": return action.delete() raise KeyError("Unsupported request type! Type: {}".format(event["RequestType"]))
[ "def dispatch(self, request, attendees, action, event, **kwargs):\n action_func = getattr(self, action, None)\n if callable(action_func):\n return action_func(request, attendees, event, **kwargs)", "def handle_request(self, tpe, obj_dict):\n if tpe == 'DataRequest':\n return self._process_data_request(obj_dict)\n if tpe == 'ConfigRequest':\n return self._process_config_request(obj_dict)\n return warning(f'Unknown command type {tpe}')", "def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])", "async def __call__(self, client, interaction_event):\n interaction_event_type = interaction_event.type\n if interaction_event_type is INTERACTION_TYPE_APPLICATION_COMMAND:\n await self._dispatch_application_command_event(client, interaction_event)\n \n elif interaction_event_type is INTERACTION_TYPE_MESSAGE_COMPONENT:\n await self._dispatch_component_event(client, interaction_event)\n \n elif interaction_event_type is INTERACTION_TYPE_APPLICATION_COMMAND_AUTOCOMPLETE:\n await self._dispatch_application_command_autocomplete_event(client, interaction_event)\n \n elif interaction_event_type is INTERACTION_TYPE_FORM_SUBMIT:\n await self._dispatch_form_submit_event(client, interaction_event)", "def webhook_handler():\n\n data = json.loads(request.data.decode(\"utf-8\"))\n logger.get_logger().warning(\"Webhook has been called for %s with action %s\", request.headers['x-github-event'],\n data[GithubApiFields.ACTION] if GithubApiFields.ACTION in data else \"\")\n\n if not __signature_valid():\n abort(401, message=\"Invalid signature\")\n\n event_type = request.headers['x-github-event']\n\n if event_type == GithubEventValues.PUSH:\n PushHandler(data).enact()\n\n elif event_type == GithubEventValues.PULL_REQUEST and data[GithubApiFields.ACTION] == GithubApiValues.OPENED:\n PROpenedHandler(data).enact()\n\n elif event_type == GithubEventValues.INSTALLATION and data[GithubApiFields.ACTION] == GithubApiValues.CREATED:\n InstallationCreatedHandler(data).enact()\n\n elif event_type == GithubEventValues.INSTALLATION and data[GithubApiFields.ACTION] == GithubApiValues.DELETED:\n InstallationDeletedHandler(data).enact()\n\n return jsonify({})", "def dispatch(self, event):\n if self._dispatch_map is not None:\n lst = self._dispatch_map.get(type(event), None)\n if lst is None:\n raise ValueError(\"unknown event type: %s\" % type(event))\n for l_ in lst:\n l_(event)", "def handle_request(self,req):\r\n self.process_request(req)", "def _request_handler(self, remote_address: data_type, request: BaseRequestPacket):\n logger.info(\n \"Request received. remote_address=%s command_id=%s request_id=%s\",\n remote_address,\n request.command_id,\n request.request_id,\n )\n if request.command_id in self._command_handlers:\n internal_handler = self._command_handlers[request.command_id]\n internal_handler(remote_address, request)\n else:\n logging.error(\"No handler\")", "def handle_request(self, request):\n try:\n handler = self.dispatcher[request.method]\n except KeyError:\n exception = KeyError(\"`{}` is not an available method.\".format(\n request.method))\n self.return_error(Failure(exception), request.id)\n else:\n try:\n if request.id is not None:\n d = maybeDeferred(handler, *request.args, **request.kwargs)\n d.addCallback(self.return_response, request.id)\n d.addErrback(self.return_error, request.id)\n else:\n handler(*request.args, **request.kwargs)\n except Exception as e:\n self.return_error(Failure(e), request.id)", "def handle():\n arguments = pluginsupport.getArguments()\n def __getArgument(arg):\n if not arguments.has_key(arg):\n return None\n val = arguments[arg]\n del arguments[arg]\n return val\n\n if __isAction(arguments):\n action = __getArgument('action')\n log.debug(\"invoking action '%s'\" % action)\n actionHandlers[action].call(arguments)\n else:\n mode = __getArgument('mode') or \"ROOT\"\n handler = modeHandlers[mode]\n log.debug(\"invoking mode %r handler with arguments %r\" % (mode, arguments))\n result = handler.call(arguments)\n log.debug(\"results from mode %r handler: %r\" % (mode, result))\n if handler.playable:\n log.debug(\"playing results for mode %r\" % mode)\n pluginsupport.play(result.items)\n else:\n log.debug(\"listing results for mode %r\" % mode)\n pluginsupport.list(result, handler.getContentType(arguments))\n pluginsupport.done()", "def handle_action(self, param):\n\n action_id = self.get_action_identifier() # action_id determines what function to execute\n self.debug_print(\"%s HANDLE_ACTION action_id:%s parameters:\\n%s\" % (F5_Connector.BANNER, action_id, param))\n\n supported_actions = {\"test connectivity\": self._test_connectivity,\n \"block ip\": self.block_ip,\n \"unblock ip\": self.unblock_ip}\n\n run_action = supported_actions[action_id]\n\n return run_action(param)", "def custom_dispatch(self, *args, **kw):\n self.request.rest_keys = self.request.route_kwargs\n\n action_name = self.request.path.rstrip('/').rsplit('/', 1)[-1]\n generic = not self.resource.get_pk(self.request)\n\n if self.find_action(action_name, generic, self.request.method):\n result = self.call_action_handler(\n self.request.method,\n self.request,\n action_name,\n generic\n )\n else:\n result = self.call_rest_handler(self.request.method, self.request)\n\n return self.response_from_result(result)", "def _event_handler(event_type, slack_event):\n team_id = slack_event[\"team_id\"]\n if event_type == \"message\":\n if slack_event[\"event\"][\"channel_type\"] in [\"channel\", \"group\"]:\n if \"text\" in slack_event[\"event\"] and slack_event[\"event\"][\"text\"] is not None:\n event_id = slack_event[\"event_id\"]\n if not pyBot.check_event_id(event_id):\n try:\n event_text = slack_event[\"event\"][\"text\"]\n match = re.search(r\"\\?(?P<package>\\w+)::(?P<function>\\w+)\", event_text)\n if match:\n print(slack_event)\n channel_id = slack_event[\"event\"][\"channel\"]\n # if message comes from a thread, reply to the thread\n # otherwise, create a new thread \n if \"thread_ts\" in slack_event[\"event\"]:\n thread_id = slack_event[\"event\"][\"thread_ts\"] \n else:\n thread_id = slack_event[\"event\"][\"ts\"] \n pkg = match.group('package')\n fun = match.group('function')\n pyBot.update_client(team_id)\n pyBot.documentation_message(pkg, fun, channel_id, thread_id)\n pyBot.store_event_id(event_id)\n return make_response(\"Documentation message sent\", 200,)\n except Exception as err:\n print(err)\n print(slack_event)\n\n # If the event_type does not have a handler\n message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})", "def invoke_dispatch(self, event_type=None):\n if event_type is None:\n for dispatcher in self.__dispatchers_set:\n if dispatcher.is_deployed:\n dispatcher.dispatch()\n else:\n if self.__dispatchers[event_type].is_deployed:\n dispatcher.dispatch()", "def handler(event, _):\n\n # Verify that this is a request with IAM credentials\n if iam_user_id(event) is None:\n logger.warning({\"message\": \"User ARN not found in event\"})\n return response(\"Unauthorized\", 403)\n\n # Extract the request body\n try:\n body = json.loads(event[\"body\"])\n except Exception as exc: # pylint: disable=broad-except\n logger.warning(\"Exception caught: %s\", exc)\n return response(\"Failed to parse JSON body\", 400)\n\n for key in [\"products\", \"address\"]:\n if key not in body:\n logger.info({\n \"message\": \"Missing '{}' in body\".format(key),\n \"body\": body\n })\n return response(\"Missing '{}' in body\".format(key), 400)\n\n # Calculate the delivery pricing\n pricing = get_pricing(body[\"products\"], body[\"address\"])\n logger.debug({\n \"message\": \"Estimated delivery pricing to {}\".format(pricing),\n \"pricing\": pricing\n })\n\n # Send the response back\n return response({\n \"pricing\": pricing\n })", "def dispatch(self):\r\n for action in Server.instance.actions:\r\n if action.enabled:\r\n try:\r\n action.on_event(self)\r\n except Exception as error:\r\n Log.Error(f\"[{Server.instance.name}] failed to run action {action.name}\")\r\n traceback.print_exc()", "def action(self, req, identity, body={}):\r\n\r\n if len(body) < 1:\r\n raise exc.HTTPBadRequest(_(\"No action specified\"))\r\n\r\n if len(body) > 1:\r\n raise exc.HTTPBadRequest(_(\"Multiple actions specified\"))\r\n\r\n ac = body.keys()[0]\r\n if ac not in self.ACTIONS:\r\n raise exc.HTTPBadRequest(_(\"Invalid action %s specified\") % ac)\r\n\r\n if ac == self.SUSPEND:\r\n self.rpc_client.stack_suspend(req.context, identity)\r\n elif ac == self.RESUME:\r\n self.rpc_client.stack_resume(req.context, identity)\r\n else:\r\n raise exc.HTTPInternalServerError(_(\"Unexpected action %s\") % ac)", "def add_event(self, event_type, event_action): #TODO create a class Action for event_action?\n\n self.data['parameters']['events'].update({ event_type: event_action })", "def dispatch_event(self, event):\n # Dispatch the event to all the associated listeners \n if event.type in list(self._events.keys()):\n listeners = self._events[ event.type ]\n \n for listener in listeners:\n listener( event )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Email administrator a success message with need for manual attachments.
def email_success_attachments(dirname, attachments, addresses, smtp_server, smtp_user, smtp_password): # Set up multipart message msg = MIMEMultipart() msg['Subject'] = '%s requires manual intervention' % dirname msg['To'] = ', '.join(addresses) msg['From'] = "p2b@localhost" msg.preamble = 'You will not see this in a MIME-aware mail reader.\n' # Create and add body body = "%s/Output.xml is ready to be uploaded.\n" % dirname body += "Additionally the following files will need to be manually attached: \n" for att in attachments: body += os.path.basename(att) + "\n" part1 = MIMEText(body, 'plain') msg.attach(part1) # Send the email using SMTP s = smtplib.SMTP(smtp_server, 25) if smtp_user and smtp_password: s.login(smtp_user, smtp_password) s.sendmail("p2b@localhost", addresses, msg.as_string()) s.quit()
[ "def email_sent():\n tkMessageBox.showinfo(\"Email Sent\", \"Email was sent successfully.\",icon='info')", "def send_alert_attached(subject, flist):\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = mailsender\n msg['To'] = mailreceip\n #message = \"Thank you\"\n msg.attach(MIMEText(\"Galindo Reyes Agustin\", 'plain'))\n \"\"\"for file in flist:\n png_file = file.split('.')[0] + '.png'\n print(png_file)\n fp = open(png_file, 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n msg.attach(img)\"\"\"\n fp = open(\"pred.png\", 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n mserver = smtplib.SMTP(mailserver)\n mserver.starttls()\n # Login Credentials for sending the mail\n mserver.login(mailsender, password)\n\n mserver.sendmail(mailsender, mailreceip, msg.as_string())\n mserver.quit()", "def fulfill(self):\n self.send_email_sc()\n self.status = self.FULFILLED\n self.save()", "def send_feedback_email_task():\n print(\"Sent feedback email\")\n return", "def email_failed():\n tkMessageBox.showinfo(\"Email Failed\", \"Email failed to send.\", icon='warning')", "def on_success(result):\n if verbose:\n print \"Successfully sent an email to %s.\"%to\n reactor.stop()\n os.kill(os.getpid(),9) # suicide", "def notify_owner(sender, instance, created, **kwargs):\n if created:\n video = instance\n channel = None\n if video.channel_id:\n channel = channel.objects.get(id=video.channel_id)\n subject = \"New video uploaded!\"\n from_mail = settings.DEFAULT_FROM_EMAIL\n user = User.objects.get(id=video.uploader_id)\n video_link = \"http://%s/%s\" % ( Site.objects.get_current().domain, video.slug )\n\n message = render_to_string(\"email/collaborate.txt\", {\n 'user': user,\n 'video': video,\n 'video_link': video_link,\n 'channel': channel\n })\n send_mail(subject, message, from_mail, [user.email], fail_silently=False)", "def send_email_with_attachment():\r\n # basic info\r\n smtpServer = \"smtp.163.com\"\r\n account = \"onebigbera@163.com\"\r\n password = \"george9527\"\r\n sender = \"onebigbera@163.com\"\r\n receiver = \"2578288992@qq.com\"\r\n\r\n # instantiation an mail object\r\n message = MIMEMultipart()\r\n message['From'] = sender\r\n message['To'] = receiver\r\n content = \"<html><h4 style='color:red'>亲爱的小心有熊出没:</br>爱可能会迟到,但永远不会缺席!</br></h4><p><span>下面为测试报告,请查看!</span></p></html>\"\r\n subject = '寒冷的季节,温暖的是人心 ^_^ !'\r\n message[\"Subject\"] = Header(subject, 'utf-8')\r\n\r\n # attach the content\r\n message.attach(MIMEText(content, 'html', 'utf-8'))\r\n\r\n # instantiation attachment object\r\n html_path = r'F:\\Testing_Development\\UnittestProjects\\automated_testing\\automated_testing\\module_structure_management\\test_report\\2019-10-12_11_21_57result.html'\r\n # get attachment stream\r\n attachment_1 = MIMEText(open(html_path).read(), 'base64', 'utf-8')\r\n\r\n # set property\r\n attachment_1['Content-Type'] = 'application/octet-stream'\r\n attachment_1['Content-Disposition'] = 'attachment; filename=\"report.html\"'\r\n\r\n message.attach(attachment_1)\r\n\r\n att2 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test1.jpg',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # set attachment\r\n att2[\"Content-Type\"] = 'application/octet-stream'\r\n att2[\"Content-Disposition\"] = 'attachment; filename=\"test1.jpg\"'\r\n message.attach(att2)\r\n\r\n # txt file\r\n att3 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test.txt',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # attachment setting\r\n att3[\"Content-Type\"] = 'application/octet-stream'\r\n att3[\"Content-Disposition\"] = 'attachment; filename=\"test.txt\"'\r\n message.attach(att3)\r\n\r\n smtp = smtplib.SMTP_SSL(smtpServer, 465)\r\n try:\r\n smtp.helo(smtpServer)\r\n smtp.ehlo(smtpServer)\r\n smtp.login(account, password)\r\n except BaseException as e:\r\n print(e)\r\n\r\n try:\r\n print(\"Begin to send >>>\")\r\n smtp.sendmail(sender, receiver, message.as_string())\r\n print(\"Send finished...\")\r\n except BaseException as e:\r\n print(e)", "def test_users_activation_email_send(self):\n pass", "def respond(sender_id, message_text, attachment_type, attachment_url, postback, quick_reply, context):\n\n new_context = dict(project_id=postback)\n conversation = dict(name='update_project_status', stage='add_image')\n response = dict(message_text=\"Great! Take or upload a image to update your progress\")\n return response, new_context, conversation", "async def send_special_email(\n event_id: str = Form(...),\n subject: str = Form(...),\n message: str = Form(...),\n file: Optional[UploadFile] = Form(None),\n user: dict = Depends(get_current_user),\n):\n if not file:\n file = None\n\n sended = await MailController.send_special(event_id, subject, message, file, user)\n\n if sended == 403:\n exceptions.forbidden_403(\"Operation forbidden\")\n if sended == 404:\n exceptions.not_fount_404(\"Event not found\")\n\n return {\"detail\": \"Email sended\", \"target\": f\"Event: {event_id}\"}", "def notify(run):\n user = User.objects.get(username=run.user)\n addr_to = user.email\n addr_from = settings.EMAIL_ADDRESS\n url = settings.HOST_URL\n url += run.get_absolute_url()\n status = run.get_status_display()\n if status == \"Done\":\n color = \"green\"\n else:\n color = \"red\"\n\n msg = MIMEMultipart('multipart')\n msg['Subject'] = \"Status update for your workflow run\"\n msg['To'] = addr_to\n msg['From'] = addr_from\n html = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Hi {user},</p>\n <p>There is an update in the status of the following workflow run:<p>\n <ul style=\"list-style-type:cicle\">\n <li><b>Run ID:</b> <a href=\"{url}\">{run_id}</a></li>\n <li><b>Workflow name:</b> {workflow_name}</li>\n <li><b>Date:</b> {date}</li>\n <li><b>Current status:</b> <font color=\"{color}\">{status}</font></li>\n </ul>\n <p>\n You can access the results and logfiles or re-run the workflow using the Run ID link above.<br>\n Please do not reply to this email.<br><br>\n Cheers,<br>\n Integrated data analysis platform (IDAP),<br>\n Shahlab Dev Team.\n </p>\n </body>\n </html>\n \"\"\".format(\n user = user.first_name,\n run_id = run.run_id,\n workflow_name = run.get_workflow_display(),\n date = run.date,\n status = status,\n color = color,\n url = url,\n )\n\n body = MIMEText(html, 'html')\n msg.attach(body)\n\n try:\n server = smtplib.SMTP(settings.SMTP_SERVER, settings.SMTP_PORT)\n server.ehlo()\n server.starttls()\n server.login(addr_from, settings.EMAIL_PASSWORD)\n server.sendmail(addr_from, [addr_to], msg.as_string())\n server.close()\n return True\n except:\n traceback.print_exc()\n return False", "def trip_submit(self, *args, **kwargs):\n try:\n email_setting = self.model.objects.get(trigger_name__trigger_name=self.message).email_setting\n except:\n raise ObjectDoesNotExist \n entry = kwargs.get('entry')\n if email_setting.status:\n subject = email_setting.subject\n message = email_setting.message\n t = Template(message)\n c = Context(locals())\n final_message = t.render(c)\n msg = EmailMessage(subject, final_message, \"\"\"Cabforpool <support@cabforpool.com>\"\"\", [entry.email])\n msg.content_subtype = \"html\"\n msg.send()", "def send_approval_notification(self):\n if self.channel:\n link = \"\".join([\"http://\", Site.objects.get_current().domain, self.approval_link()])\n message = render_to_string('email/approval_notification.txt', {\n 'video': self,\n 'link': link\n })\n subject = \"Video Approval\"\n self.channel.owner.email_user(subject, message)\n self.is_active = False\n self.save()", "def success(message):\n content = {'type': 'success', 'content': message} \n flash(content)", "def post(self):\n current_user.email_notifications = True\n current_user.save()\n return 'OK', 200", "def activation_sent(request):\n messages.success(request, 'Vérifiez vos mails pour activer votre compte')\n return redirect('website:products')\n # return render(request, 'auth/activation_request.html') # This will activate user’s account", "def on_generate_document_success(self, filename):\n parent = str(Path(filename).parent)\n file = str(Path(filename).name)\n QMessageBox.information(self, 'Success',\n f'Document <a href=\"file:///{parent}\">{file}</a> generated successfully!')", "def sendMailToProprio():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get value from mapping with handling special cases.
def get_value(self, value): if pd.isna(value): return None if value not in self.mapping.keys(): return value return self.mapping[value]
[ "def try_map(m, value):\n if value in m:\n return m[value]\n else:\n return value", "def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):\n return {\"value\": traverse_get(mapping, *traverse)}", "def get_value_for(self, instance):\n value = super(ValueMapFullTextAttr, self).get_value_for(instance)\n # handle error if value_map doesn't have mapping for the given value\n return self.value_map.get(value, None)", "def get(self, key: int) -> int:\n return -1 if self.my_map.get(key) == None else self.my_map.get(key)", "def safe_get_value(maybe_dict, key: str):\n if isinstance(maybe_dict, dict):\n return maybe_dict.get(key, None)\n return maybe_dict", "def value_or_none(dictionary: dict, key: str):\n return dictionary[key] if key in dictionary else None", "def enum_value_for(name_to_enum_entry_map: Dict[str, Any], name: str):\n assert name_to_enum_entry_map is not None\n assert len(name_to_enum_entry_map) >= 1\n assert name is not None\n assert name.strip() == name\n\n if name == '':\n result = None\n else:\n try:\n result = name_to_enum_entry_map[name.lower()]\n except KeyError:\n enum_type = type(next(iter(name_to_enum_entry_map.values())))\n valid_names = sorted(name_to_enum_entry_map.keys())\n raise ValueError('name %r for enum %s must be one of: %s'\n % (name, enum_type.__name__, valid_names))\n return result", "def ex_inquiry_map(inquiry):\n return ex_inquiry[inquiry].value", "def get_field(field, row, field_map, default_value=None):\n\n field_info = field_map.get(field)\n\n if not field_info:\n return default_value\n else:\n return row[field_info['idx']]", "def _get_value(obj, key, default=missing):\n if \".\" in key:\n return _get_value_for_keys(obj, key.split(\".\"), default)\n else:\n return _get_value_for_key(obj, key, default)", "def _get_value(json_data: dict, key: str) -> str:\n\n try:\n value = json_data[key]\n except KeyError:\n value = None\n return value", "def get_value(self, code):\n return self.code_to_value_map.get(code)", "def _interp_map(val, val_map):\n return interp(val, list(val_map.keys()), list(val_map.values()))", "def __getitem__(self, item):\n if self.tissue_mapping is None:\n return self.data[item]\n else:\n return self.map_tissue_ids(self.data[item])", "def __getitem__(self, key):\n #retrieve the value\n curValue = self._d[key.lower().strip()]\n \n #check if the value is a bool\n if curValue.strip().lower() in ['yes','true']:\n return True\n if curValue.strip().lower() in ['no','false']:\n return False\n \n #check if value is a int\n if curValue.strip().isdigit():\n return int(curValue)\n \n #try to convert it to a float\n try:\n curValue = float(curValue)\n return curValue\n except ValueError:\n pass\n \n #return it as a string\n return curValue", "def get_value(self, key: str) -> Any:\r\n if self.get_index(key) is None:\r\n return None\r\n return self.hash_table[self.get_index(key)][1]", "def lookup(collection, key, if_none=None):\n if key in collection:\n return collection[key]\n else:\n return if_none", "def get_context_value(ctx, key, type_):\n rval = None\n\n # return None for invalid key\n if key is None:\n return rval\n\n # get default language\n if type_ == '@language' and type_ in ctx:\n rval = ctx[type_]\n\n # get specific entry information\n if key in ctx['mappings']:\n entry = ctx['mappings'][key]\n if entry is None:\n return None\n\n # return whole entry\n if type_ is None:\n rval = entry\n # return entry value for type\n elif type_ in entry:\n rval = entry[type_]\n\n return rval", "def _value_by_key(row, key):\n value = row[key].iloc[0]\n if pd.isna(value):\n return None\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the previous hash of block.
def previous_hash(self): return self.__previous_hash
[ "def getPreviousBlockHeaderHash(self) -> str:\n return self.blockHeader.prevBlockHeaderHash", "def getPreviousTransactionHash(self) -> str:\n return self.__previousTransactionHash", "def getPriorBlockHash(self):\n return self.parentBlockHash", "def get_previous_block(self):\n # Return previous block\n \n return self.chain[-1]", "def _get_previous_block_root_state_hash(self, blkw):\n if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:\n return INIT_ROOT_KEY\n else:\n prev_blkw = self._block_cache[blkw.previous_block_id]\n return prev_blkw.state_root_hash", "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def getHash(self):\n # using following attributes to find the block hash\n # version, priorBlockHash, target, time and nonce\n blockHash = hashlib.sha256()\n blockHash.update(self.version.to_bytes(32,\"big\"))\n blockHash.update(self.parentBlockHash.to_bytes(32,\"big\"))\n blockHash.update(self.target.to_bytes(32,\"big\"))\n blockHash.update(self.time.to_bytes(32,\"big\"))\n blockHash.update(self.nonce.to_bytes(32,\"big\"))\n\n return int.from_bytes(blockHash.digest(),\"big\")", "def hash(self):\n return sha256_proto(self.block.block_header)", "def block_hash(self, block):\n return block._hash.hexdigest() + \"\\n\"", "def setPreviousBlockHeaderHash(self, prevBlockHash: str):\n self.blockHeader.prevBlockHeaderHash = prevBlockHash", "def header_hash(self): \n return hashlib.sha256((str(self.index) + str(self.timestamp) + str(self.tx) + str(self.previous_block)).encode('utf-8')).hexdigest()", "def get_latest_hash(self):\n db_query = u\"SELECT hash_block FROM block_chain ORDER BY ROWID DESC LIMIT 1;\"\n db_result = self.execute(db_query).fetchone()\n\n return str(db_result[0]) if db_result else ''", "def hash(self, block):\r\n block_string = json.dumps(block, sort_keys=True).encode()\r\n return hashlib.sha256(block_string).hexdigest()", "def get_block_hash(\n recent_block_hashes: Sequence[Hash32],\n current_block_slot_number: int,\n slot: int,\n epoch_length: int) -> Hash32:\n if len(recent_block_hashes) != epoch_length * 2:\n raise ValueError(\n \"Length of recent_block_hashes != epoch_length * 2\"\n \"\\texpected: %s, found: %s\" % (\n epoch_length * 2, len(recent_block_hashes)\n )\n )\n\n slot_relative_position = current_block_slot_number - epoch_length * 2\n return _get_element_from_recent_list(\n recent_block_hashes,\n slot,\n slot_relative_position,\n )", "def getTransactionHash(self) -> str:\n return self.__transactionHash", "def get_latest_block(self):\n return self.chain[-1]", "def prev_version(self):\n if self.prev_key is None: return None\n return self.prev_key.metadata.get('version', None)", "def get_block_hash(self,block_height:int,request_id:str) -> str:\n method = 'getblockhash'\n return self.conn_properties.connect (self.conn_properties, method=method,\n params=[int(block_height)], id=request_id)", "def get_last_hash(self):\n last_commit_hash = subprocess.check_output(\n ['git', 'rev-parse', 'HEAD'],\n universal_newlines=True, cwd=self._destination\n )\n return last_commit_hash.strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the transactions of block.
def transactions(self): return self.__transactions
[ "def get_transactions():\n response = {\n 'transaction': list(node.blockchain.current_transactions),\n }\n return jsonify(response), 200", "def getTransactionList(self) -> list:\n return self.__transactions", "def get_internal_transaction_by_block(self, block_number):\n\n # TODO handle exception\n tx_list = []\n num_tx = self.get_block_transaction_count_by_number(block_number)\n for i in range(num_tx):\n tx_json = self.get_transaction_by_block_number_and_index(block_number, i)\n if tx_json['result'] is None:\n continue\n\n tx_hash = tx_json['result']['hash']\n\n tx_list_json = self.get_internal_transaction_by_hash(tx_hash)\n if tx_list_json['result'] is None:\n continue\n\n tx_list.extend(tx_list_json['result'])\n\n return tx_list", "def transactions(self):\n return Transaction.objects.filter(tags__slug=self.slug,\n user=self.user).all()", "def transactions(self) -> Transactions:\n return self._incoming_transactions", "def getTxByBlockNumber(self, blockNumber):\n\t\tquery = 'SELECT * from transactions where block_id = %s'\n\t\tself.executeQuery(query, (blockNumber,))\n\t\trawTransactionList = self.fetchAll()\n\t\ttransactionList = []\n\t\tfor rawTransaction in rawTransactionList:\n\t\t\ttransaction = Transaction.Transaction(rawTransaction[1])\n\t\t\ttransaction.setTransactionFromDb(rawTransaction)\n\t\t\ttransactionList.append(transaction)\n\t\treturn transactionList", "def getcommitmenttransactions(url):\n channel = Channel.get(url)\n commitment = channel.signed_commitment()\n return [commitment,]", "def get_transaction(self, block_num: int, trx_in_block: int):\n return self.bts.rpc.get_transaction(block_num, trx_in_block)", "def getFullTxByBlockNumber(self, blockNumber):\n\t\tquery = 'SELECT * from transactions where block_id = %s'\n\t\tself.executeQuery(query, (blockNumber,))\n\t\ttransactionRawList = self.fetchAll()\n\t\ttransactionList = []\n\t\tfor transactionRaw in transactionRawList:\n\t\t\ttransaction = Transaction.Transaction(transactionRaw[1])\n\t\t\ttransaction.setTransactionFromDb(transactionRaw)\n\t\t\ttransaction.inputList = self.getInputByTxId(transaction.id)\n\t\t\ttransaction.outputList = self.getOutputByTxId(transaction.id)\n\t\t\ttransactionList.append(transaction)\n\t\treturn transactionList", "def get_internal_transaction_by_block_range(self, min_block, max_block):\n result_list = []\n for i in range(min_block, max_block):\n result_list.extend(self.get_internal_transaction_by_block(i))\n print(\"block \", i, \" finished.\\n\")\n time.sleep(60)\n return result_list", "def transactions(self):\n self._trans_lock.acquire()\n try:\n tx_copy = copy.deepcopy(list(\n self._hash_transactions_map.values()))\n finally:\n self._trans_lock.release()\n return tx_copy", "def get_txs(self):\n tx_set = {\"transactions\": []}\n for descriptor in ['value_transfer_txns', 'data_request_txns', 'commit_txns', 'reveal_txns', 'tally_txns', 'mint']:\n txs = getattr(self.block.txns, descriptor)\n if descriptor != \"mint\":\n tx_set[descriptor] = [sha256_proto(tx) for tx in txs]\n else:\n tx_set[descriptor] = [sha256_proto(txs)]\n tx_set['transactions'].extend(tx_set[descriptor])\n return tx_set", "def list_transaction(self):\n self.parsed_list_transaction = TransactionList()\n\n self.transaction()\n self.parsed_transaction.compute_hash()\n self.parsed_list_transaction.add(self.parsed_transaction)\n\n self.list_transaction_next()", "def get_coins(self, blockchain):\n coins = []\n for block in blockchain.blocks:\n tx = block.transaction\n for coin in tx.created_coins:\n if coin.wallet_id == self.id:\n coins.append(coin)\n if isinstance(tx, CoinCreation):\n continue\n for coin in tx.consumed_coins:\n if coin.wallet_id == self.id:\n coins.remove(coin)\n return coins", "def get_transaction(self, hash: str, block: int = None):\n\n if (hash is None):\n cprint(\"Missing Argument 'hash'?\", \"red\")\n return 0\n\n if (block is None):\n block = w3.eth.blockNumber\n\n cprint(\"transaction {} details = \\n {}\".format(hash, (w3.eth.getTransaction(hash))), \"yellow\") #TODO: make this print pretty json", "def getVendingTransactions(self):\n return self._VendingTransactions", "def get_league_transactions(self):\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/league/\" + self.get_league_key() + \"/transactions\",\n [\"league\", \"transactions\"])", "def get_all_exp_transaction(self):\n return self.filter(status=True)", "def test_list_xrp__ripple_transactions_by_block_hash(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the proof of block.
def proof(self): return self.__proof
[ "def __create_proof(self):\n\n # Create the block base on which the salt will be concatenated\n base_block_str = ''\n for transaction in self.__transactions:\n base_block_str += str(transaction)\n base_block_str += self.__previous_hash\n\n # Find a salt that creates the right hash\n while True:\n guess_salt = hex(self.__xorshift.getrandbits(self.proof_bitsize)).lstrip('0x')\n guess = base_block_str + guess_salt\n hash_try = self.__hash.hash(guess)\n\n if hash_try.endswith('0' * self.proof_complexity):\n self.__proof = guess_salt\n return", "def proof_of_work():\n last_block = blockchain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not valid_proof(open_transactions, last_hash, proof):\n proof += 1\n return proof", "def proof_of_stake(self, block):\n\t\tbee = Bee(address=block.validator, honeycomb=0)\n\t\thoneycomb, stakes = bee.calculate_balance(self.chain, block.index)\n\t\tif honeycomb < int(block.stake):\n\t\t\treturn None\n\n\t\treturn block", "def verify_non_interactive_proof_showing_protocol(proof,pk,right_side_commit,disclosed_attributes, random_signature, message):\n nb_attr = int((len(pk) - 3) / 2)\n\n R = jsonpickle.decode(proof[0])\n sm = proof[1]\n st = jsonpickle.decode(proof[2])\n random_signature = (jsonpickle.decode(random_signature[0]),jsonpickle.decode(random_signature[1]))\n right_side_commit = jsonpickle.decode(right_side_commit)\n\n #computing challenge from all public info: public key, commitment and R, as well as message m\n #doing SHA256 hash of the concat binary of the public info\n challenge = right_side_commit.to_binary() + R.to_binary() + message\n for i in range(0,len(pk)):\n challenge = challenge + jsonpickle.decode(pk[i]).to_binary()\n challenge = hashlib.sha256(challenge).digest()\n #convert challenge to Bn\n challenge = Bn.from_binary(challenge)\n\n verif = right_side_commit.pow(challenge)\n for i in sm:\n verif = verif * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(jsonpickle.decode(sm[i])))\n verif = verif * (random_signature[0].pair(jsonpickle.decode(pk[1 + nb_attr]))).pow(st)\n\n #need to compute left side to check if it's equal to right side commitment using the bilinear function:\n left_side = random_signature[1].pair(jsonpickle.decode(pk[1 + nb_attr]))\n for i in disclosed_attributes:\n left_side = left_side * ((random_signature[0].pair(jsonpickle.decode(pk[3 + nb_attr + i]))).pow(-Bn.from_binary(disclosed_attributes[i].encode())))\n left_side = left_side / (random_signature[0].pair(jsonpickle.decode(pk[2 + nb_attr])))\n\n #check if verif == R and if left_side == right_side_commitment\n return ((R == verif) and (left_side == right_side_commit))", "def proof_of_stake_v2(self, block):\n\t\tbee = Bee(address=block.validator, honeycomb=0)\n\t\thoneycomb, stakes = bee.calculate_balance(self.chain, block.index)\n\t\tif honeycomb < block.stake:\n\t\t\treturn None\n\n\t\tcomputed_hash = block.compute_hash()\n\t\twhile not int(computed_hash, 16) < (int(Blockchain.threshold, 16)\n\t\t\t\t\t\t\t\t\t\t\t* block.stake):\n\t\t\tblock.nonce += 1\n\t\t\tcomputed_hash = block.compute_hash()\n\n\t\treturn block", "def _proposal_code_of_block(self, block_id):\n\n sql = \"\"\"\nSELECT Proposal_Code\n FROM ProposalCode AS pc\n JOIN Block AS b ON pc.ProposalCode_Id = b.ProposalCode_Id\n WHERE Block_Id=%(block_id)s\n \"\"\"\n df = self._query(sql, params=dict(block_id=block_id))\n\n # sanity check: does the block exist?\n if len(df) == 0:\n raise ValueError(\n \"There exists no block with id {block_id}\".format(block_id=block_id)\n )\n\n return df[\"Proposal_Code\"][0]", "def gen_proof(self) -> Proof:\n assert not self.current_goals, \"non empty goal stack\"\n init_goal = self.get_goal_by_id(0)\n return self.gen_proof_for_goal(init_goal)", "def add_block(self, block, proof):\n \n previous_hash = self.last_block.hash\n\n if previous_hash != block.previous_hash:\n return False\n \n if not self.is_valid_proof(block, proof):\n return False\n\n # Add the hash after the PoW\n block.hash = proof\n \n # Add block to the chain\n self.chain.append(block)\n \n return True", "def make_genesis_block():\n block = Block(index=0,\n timestamp=datetime.now().isoformat(),\n data={'proof-of-work': 9, 'transactions': []},\n previous_hash=\"0\")\n return block", "def proofing(self, page):\n\n stages = page.stages\n\n ocrLines = stages[\"line\"]\n normalized = stages[\"normalized\"]\n (h, w) = normalized.shape[:2]\n\n scale = 1 if w == 0 else 1000 / w\n\n def g(m, asStr=True):\n scaledM = m if scale == 1 else int(round(m * scale))\n return str(scaledM) if asStr else scaledM\n\n page.proofW = g(w, asStr=False)\n page.proofH = g(h, asStr=False)\n\n linesHtml = \"\".join(\n TEMPLATE[\"line\"]\n .replace(\"«left»\", g(left))\n .replace(\"«top»\", g(top))\n .replace(\"«width»\", g(right - left))\n .replace(\"«height»\", g(bottom - top))\n .replace(\"«text»\", f\"{ln:>01}\")\n for (stripe, block, ln, left, top, right, bottom) in ocrLines\n )\n\n for stage in (\"char\", \"word\"):\n stageData = stages.get(stage, [])\n boxesHtml = []\n for (\n stripe,\n block,\n ln,\n left,\n top,\n right,\n bottom,\n conf,\n *rest,\n ) in stageData:\n boxesHtml.append(\n TEMPLATE[stage]\n .replace(\"«left»\", g(left))\n .replace(\"«top»\", g(top))\n .replace(\"«width»\", g(right - left))\n .replace(\"«height»\", g(bottom - top))\n .replace(\"«background»\", getProofColor(conf))\n .replace(\"«text»\", \"\".join(rest))\n )\n\n boxesHtml = \"\".join(boxesHtml)\n proofData = (\n TEMPLATE[\"doc\"]\n .replace(\"«width»\", g(w))\n .replace(\"«height»\", g(h))\n .replace(\"«source»\", f\"{page.bare}.{DEFAULT_EXTENSION}\")\n .replace(\"«lines»\", linesHtml)\n .replace(\"«boxes»\", boxesHtml)\n )\n proofStage = f\"proof{stage}\"\n with open(page.stagePath(proofStage), \"w\") as f:\n f.write(proofData)\n stages[proofStage] = f\"see proof at {stage} level\"", "def genproof(publickey, data, authenticators, challenge):\n pass", "def getLastMainChainBlock(self):\n\t\tquery = 'SELECT * from blocks WHERE orphan = False ORDER BY id ASC LIMIT 1'\n\t\tself.executeQuery(query)\n\t\trawBlock = self.fetchOne()\n\t\tif rawBlock is not None:\n\t\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\t\tblock.setBlockFromDb(rawBlock)\n\t\t\treturn block\n\t\treturn rawBlock", "def get_parent(self, block):\n if block.height == 0:\n return None\n return self.get_block(block.parentHash)", "def find_block(self, block):\n startlines = {\n 'surcharge': ('Node Surcharge Summary', 9),\n 'depth': ('Node Depth Summary', 8),\n # todo:\n #'inflow':,\n #'flooding':,\n #'volume':,\n #'loading':,\n #'link_flow':,\n #'classification':,\n #'conduit_surcharge':,\n }\n\n\n blockstart, comment_lines = startlines[block]\n\n return self._find_line(blockstart) + comment_lines #b/c variable comment lines", "def IsBlock(self) -> bool:", "async def verify_proof(self, proof_req: dict, proof: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('Verifier.verify_proof: >>> proof_req: {}, proof: {}'.format(\n proof_req,\n proof))\n\n claims = proof['identifiers']\n uuid2schema = {}\n uuid2claim_def = {}\n for claim_uuid in claims:\n claim_s_key = schema_key_for(claims[claim_uuid]['schema_key'])\n schema = json.loads(await self.get_schema(claim_s_key))\n uuid2schema[claim_uuid] = schema\n uuid2claim_def[claim_uuid] = json.loads(await self.get_claim_def(\n schema['seqNo'],\n claims[claim_uuid]['issuer_did']))\n\n rv = json.dumps(await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps(uuid2schema),\n json.dumps(uuid2claim_def),\n json.dumps({}))) # revoc_regs_json\n\n logger.debug('Verifier.verify_proof: <<< {}'.format(rv))\n return rv", "def get_current_block(self):\n return self._block", "def currentBlock(self):\n\n index = self.text.index(self.text.marker.mark)\n return self.lang.get_block_of_code(self.text, index)", "def _prove_CP() -> Proof:\n # Optional Task 6.7d\n p = _prove_NNE()\n lines = [line for line in p.lines]\n con1_line = len(lines)-1 # (~~p->p)\n form1_line = lines[-1]\n\n p = prove_specialization(prove_NN(), InferenceRule([], Formula.parse(\"(q->~~q)\")))\n add_lines_of_proof(con1_line, lines, p)\n con2_line = len(lines)-1 # (q->~~q)\n form2_line = lines[-1]\n\n p = prove_specialization(prove_hypothetical_syllogism(), InferenceRule([Formula.parse(\"(~~p->p)\"),\n Formula.parse(\"(p->q)\")],\n Formula.parse(\"(~~p->q)\")))\n\n for line in p.lines:\n new_line = add_lines2(con2_line, form1_line, line)\n lines.append(new_line)\n\n con4_line = len(lines)-1 # (~~p->q)\n form4_line = lines[-1]\n\n p = prove_specialization(prove_hypothetical_syllogism(), InferenceRule([Formula.parse(\"(~~p->q)\"),\n Formula.parse(\"(q->~~q)\")],\n Formula.parse(\"(~~p->~~q)\")))\n\n for line in p.lines:\n new_line = add_lines3(con4_line, form2_line, form4_line, line)\n lines.append(new_line)\n\n p = Proof(InferenceRule([Formula.parse('(p->q)')], Formula.parse('(~~p->~~q)')),\n {MP, I0, I1, D, N}, lines)\n p = remove_assumption(p)\n\n lines = [line for line in p.lines]\n form6_line = lines[-1]\n\n lines.append(Proof.Line(Formula.parse(\"((~~p->~~q)->(~q->~p))\"), N, []))\n con7_line = len(lines)-1 # ((~~p->~~q)->(~q->~p))\n form7_line = lines[-1]\n p = prove_specialization(prove_hypothetical_syllogism(), InferenceRule([Formula.parse(\"((p->q)->(~~p->~~q))\"),\n Formula.parse(\"((~~p->~~q)->(~q->~p))\")],\n Formula.parse(\"((p->q)->(~q->~p))\")))\n add_lines4(con7_line, form2_line, form4_line, form6_line, form7_line, lines, p)\n\n return Proof(InferenceRule([], Formula.parse(\"((p->q)->(~q->~p))\")), {MP, I0, I1, D, N}, lines)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identify a matching untriaged crash in the db
def check_untriaged_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, DBCONN=None): newid = 0 numreports = 0 if not crashfunction: crashfunction = None if not lasttest: lasttest = None dbconn = DBCONN try: if not dbconn: dbconn = psycopg2.connect(dbname="crashinfo", user="crashinfo", password="blah", host="localhost") cur = dbconn.cursor() # First let's see if we have a matching crash cur.execute("SELECT new_crashes.id, count(triage.newcrash_id) as hitcounts FROM new_crashes, triage WHERE new_crashes.reason=%s AND new_crashes.func=%s AND new_crashes.backtrace=%s AND new_crashes.id = triage.newcrash_id group by new_crashes.id", (crashtrigger, crashfunction, crashbt)) if cur.rowcount > 1: print("Error! not supposed to have more than one matching row in new crashes") if cur.rowcount > 0: row = cur.fetchone() newid = row[0] numreports = row[1] dbconn.commit() cur.close() except psycopg2.DatabaseError as e: print(str(e)) return (None, None) # huh, and what am I supposed to do here? finally: if not DBCONN and dbconn: dbconn.close() return newid, numreports
[ "def match_reason(self):\n conn, cursor = sqlite_base.sqlite_connect()\n # Read today crashes reasons.\n _td_reasons = self.get_specific_range_crashes() #[table_id, ROWID, CRASH_ID, PROJECT, REASON]\n # Remove duplicate data.\n _uniqueness_l = self.make_uniquenesss_list(_td_reasons) #[{tableid:rowid}, ROWID, 1, CRASH_ID, PROJECT, REASON),()]\n # Get all the reasons that has been logged.\n for _reason in self.search_sql_reason(conn, cursor): #[(ROWID, PROJECT, REASON), ()]\n if _reason.__len__() != 0:\n for _per_reason in _reason:\n # Clean that reason from table reasons.\n _s_clear = self.mutable_remove(str(_per_reason[-2:])) #_per_reason[-2:]要改成_per_reason[-1]吧\n # Traverse today's reasons list.\n for _iro_key, _iro_value in enumerate(_uniqueness_l):\n # Clean that reason from today's list.\n _iro_clear = self.mutable_remove(str(_iro_value[-2:])) #同上\n # Compute similarity.\n _sim_percent = self.compute_similarity(_iro_clear, _s_clear)\n if _sim_percent == 1:\n del _uniqueness_l[_iro_key]\n # Update frequency once 100% match.\n sqlite_base.update(conn, cursor,\n end=False,\n table_name='reasons',\n columns=['FREQUENCY'],\n values=[_iro_value[2]],\n condition=\"WHERE ROWID = '%d'\" % _per_reason[0])\n # Update that all tables data releation with this reason.\n for i in _iro_value[0].keys():\n conditions = 'WHERE '\n _ll = _iro_value[0][i]\n for key, value in enumerate(_ll):\n if key >= 1:\n conditions += ' or '\n conditions += 'ROWID = %d' % value\n sqlite_base.update(conn, cursor,\n end=False,\n table_name='backtrack_%s' % str(i),\n columns=['REASON_ID'],\n values=[_per_reason[0]],\n condition=conditions)\n\n if conn:\n cursor.close()\n conn.close()\n if _uniqueness_l.__len__() != 0:\n # This list is the new crash relative to old data\n return _uniqueness_l\n else:\n # Empty means today's crash already submitted to JIRA server.\n return []", "def get_specific_range_crashes(self):\n _tables_id = self.get_day_from_statistics()\n if _tables_id:\n for table_id in _tables_id:\n conn, cursor = sqlite_base.sqlite_connect()\n # conn, cursor = sqlite_base.sqlite_connect(sql_abs_path=self.statistic_sql)\n _new_reason = sqlite_base.search(conn, cursor,\n columns='ROWID, CRASH_ID, PROJECT, REASON',\n table_name='backtrack_%d' % table_id,\n condition='where INSERT_TIME > %s and REASON NOT NULL' %\n ReportGenerator.get_yesterday_timestamp())\n if _new_reason:\n for _x_reason in _new_reason:\n _x_reason = list(_x_reason)\n _x_reason.insert(0, table_id) #0为插入位置\n yield list(_x_reason)\n else:\n LOG.cri(' %-20s ]-[ Table %s not match this insert time: %s' %\n (LOG.get_function_name(), table_id, ReportGenerator.get_yesterday_timestamp()))\n else:\n # TODO: SQLITE ERROR MESSAGE NOT COMPLETED YET. THAT WILL LET THE LOGIC EXCEPTION IN THE CALL METHOD.\n LOG.info(' %-20s ]-[ Look like have not any new crash today: %s' %\n (LOG.get_function_name(), ReportGenerator.get_yesterday_timestamp()))", "def is_crash(mutation: dict) -> bool:\n return 'crashes' in mutation['path'].parent.name", "def _checkFdbEntryMiss(self):\n result = self.duthost.command(\"show mac\")\n out = result['stdout']\n pytest_assert(self.arp_entry[self.dst_ip].lower() not in out.lower(), \"{} present in FDB\"\n .format(self.arp_entry[self.dst_ip]))\n logger.info(\"'{}' not present in fdb as expected\".format(self.arp_entry[self.dst_ip]))", "def validateHost():\n c = conn.cursor()\n # GET LAST SCAN ID\n c.execute(\"select distinct id from scans order by 1 desc limit 1;\")\n row = c.fetchone()\n count = 0\n if row:\n c.execute(\"select * from scans where id = \"+str(row[0])+\" and mac not in (select mac from whitelist);\")\n rows = c.fetchall()\n for row in rows:\n print(\"Intruder detected in scan [%d] IP:[%s] MAC:[%s] VENDOR:[%s]\" % (row[0], row[1], row[2], row[3]))\n count = count+1\n return count", "def crash_check():\n global CURRENT\n # Grab status\n stat = grab_status()\n\n # Check for seg\n if \"SIGSEGV\" in stat:\n return True\n return False", "def test_find_no_match(self):\n conn, cursor = get_db_cursor()\n build = \"toy_build\"\n transcript_dict = talon.make_transcript_dict(cursor)\n conn.close()\n\n edges = ( 14, 15, 16 )\n gene_ID, transcripts = talon.search_for_transcript_prefix(edges, transcript_dict)\n\n # Make sure that no match got returned\n assert gene_ID == None\n conn.close()", "def parse_crashes_field(page):\n crashes_pattern = re.compile(r'Traffic Fatality #(\\d{1,3})')\n return match_pattern(page, crashes_pattern)", "def is_crash_nonproper_and_directional(crash_id: int) -> str:\n if not str(crash_id).isdigit():\n return False\n\n check_nonproper_polygon_query = \"\"\"\n query find_service_road_location($crashId: Int!) {\n find_service_road_location_for_centerline_crash(args: {input_crash_id: $crashId})\n {\n location_id\n }\n }\n \"\"\"\n\n try:\n \"\"\"\n We will attempt to find the record through a query using the find_service_road_location_for_centerline_crash function via Hasura.\n If the location_id key does not contain a location_id, then the crash is not a canidate for being linked to a service road location.\n \"\"\"\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": check_nonproper_polygon_query,\n \"variables\": {\n \"crashId\": crash_id\n }\n }\n ),\n headers=HEADERS,\n verify=HASURA_SSL_VERIFY\n )\n if (response.json()[\"data\"][\"find_service_road_location_for_centerline_crash\"][0][\"location_id\"] is None):\n return ''\n else:\n return response.json()[\"data\"][\"find_service_road_location_for_centerline_crash\"][0][\"location_id\"]\n except:\n \"\"\"\n In case the response is broken or invalid, we need to:\n - Output the problem for debugging\n - Default to empty string, False by another name, but fitting in the expected str datatype\n \"\"\"\n return False", "def findResIndexes(self):\n self.reactant['solvated']={}\n self.reactant['solvated']['pdb']=self.simdir+'/reactant/solvated/'+self.id+'.premin.pdb'\n nres=162 #protein residues + DHP + NPD + energy-sink\n pdbf=self.reactant['solvated']['pdb']\n ptin=open(pdbf,'r'); l=ptin.readline()\n resindexes=[]; currires=None; catalytic=False\n while l:\n if l[0:5]=='ATOM ':\n iat=int(l[6:11])-1; ires=l[22:26]; resname=l[17:20]\n if not currires:\n currires=ires #initialize residue index\n group=[] #initialize list of atom residues\n if ires!=currires:\n resindexes.append(group)\n currires=ires\n group=[]\n if resname=='Na+' and not catalytic:\n \"\"\"introduce the catalytic site residue\"\"\"\n for index in self.hot_spot['indexes']:\n group.append(index)\n resindexes.append(group); catalytic=True; group=[];\n if iat not in self.hot_spot['indexes']: group.append(iat)\n l=ptin.readline()\n resindexes.append(group) #enter last group\n self.resinfo={'indexes':resindexes,'nres':nres}\n pdb.set_trace()\n return True", "def search_dmiss(statesfile) :\n dmiss = False\n f = open(statesfile, 'r')\n ln = 0\n for line in f.readlines():\n ln = ln + 1\n if re.search(\"dline_loc_miss\", line):\n dmiss = True\n return dmiss", "def find_crashes(carts: List[Cart]) -> List[Vec]:\n cart_at: Dict[Vec, Cart] = {}\n crashes: List[Vec] = []\n\n for cart in carts:\n if not cart.crashed:\n if cart.pos in cart_at:\n crashes.append(cart.pos)\n cart.crashed = True\n cart_at[cart.pos].crashed = True\n else:\n cart_at[cart.pos] = cart\n return crashes", "def foundBug(self):\n pass", "def check_db_entry(self):\n raise NotImplementedError", "def _ds_detect_errors(self):\n clean_cells = []\n dk_cells = []\n\n self.holo_env.logger.info('starting error detection...')\n for err_detector in self.error_detectors:\n temp = err_detector.get_noisy_dknow_dataframe(\n self.holo_env.dataengine.get_table_to_dataframe(\n 'Init', self.dataset))\n clean_cells.append(temp[1])\n dk_cells.append(temp[0])\n\n num_of_error_detectors = len(dk_cells)\n intersect_dk_cells = dk_cells[0]\n union_clean_cells = clean_cells[0]\n for detector_counter in range(1, num_of_error_detectors):\n intersect_dk_cells = intersect_dk_cells.intersect(\n dk_cells[detector_counter])\n union_clean_cells = union_clean_cells.unionAll(\n clean_cells[detector_counter])\n\n self.holo_env.dataengine.add_db_table(\n 'C_clean', union_clean_cells, self.dataset)\n\n self.holo_env.logger.info('The table: ' +\n self.dataset.table_specific_name('C_clean') +\n \" has been created\")\n self.holo_env.logger.info(\" \")\n\n self.holo_env.dataengine.add_db_table(\n 'C_dk', intersect_dk_cells, self.dataset)\n\n self.holo_env.logger.info('The table: ' +\n self.dataset.table_specific_name('C_dk') +\n \" has been created\")\n self.holo_env.logger.info(\" \")\n self.holo_env.logger.info('error detection is finished')\n del union_clean_cells\n del intersect_dk_cells\n del self.error_detectors\n return", "def _get_compare_error_string_mismatching(self, idx, mismatching, dm_1_name, dm_2_name):\n self_error_string = str(idx) + '. Following fuses are mismatching between ' + dm_1_name + ' and ' + dm_2_name + ':' + '\\n\\n'\n\n # Create a table of the missing fuses\n tp = TablePrinter([0])\n\n # Header for the table\n tp.insert_data(0, 0, 'S.No.')\n tp.insert_data(0, 1, 'DataModel')\n tp.insert_data(0, 2, 'Address')\n tp.insert_data(0, 3, 'Region_ID')\n tp.insert_data(0, 4, 'Operation')\n tp.insert_data(0, 5, 'Value')\n\n for i, fuse_data in enumerate(mismatching):\n idx = (i * 3) + 1\n\n pre_r_id = '*' if (fuse_data[1][0] != fuse_data[2][0]) else ''\n pre_op = '*' if (fuse_data[1][1] != fuse_data[2][1]) else ''\n pre_val = '*' if (fuse_data[1][2] != fuse_data[2][2]) else ''\n\n # Insert info from dm_1 in the table\n tp.insert_data(idx, 0, str(i + 1))\n tp.insert_data(idx, 1, dm_1_name)\n tp.insert_data(idx, 2, str(fuse_data[0]))\n tp.insert_data(idx, 3, pre_r_id + str(fuse_data[1][0]))\n tp.insert_data(idx, 4, pre_op + str(fuse_data[1][1]))\n tp.insert_data(idx, 5, pre_val + str(fuse_data[1][2]))\n\n # Increase index for second data model\n idx += 1\n\n # Insert info from dm_1 in the table\n tp.insert_data(idx, 1, dm_2_name)\n tp.insert_data(idx, 2, str(fuse_data[0]))\n tp.insert_data(idx, 3, pre_r_id + str(fuse_data[2][0]))\n tp.insert_data(idx, 4, pre_op + str(fuse_data[2][1]))\n tp.insert_data(idx, 5, pre_val + str(fuse_data[2][2]))\n\n self_error_string += tp.get_data()\n return self_error_string", "def find_crash_location(crash_id: int) -> Optional[str]:\n if not str(crash_id).isdigit():\n return None\n\n find_location_query = \"\"\"\n query getLocationAssociation($crash_id: Int!) {\n find_location_for_cr3_collision(args: {id: $crash_id}){\n location_id\n }\n }\n \"\"\"\n\n\n try:\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": find_location_query,\n \"variables\": {\n \"crash_id\": crash_id\n }\n }\n ),\n headers=HEADERS,\n verify=HASURA_SSL_VERIFY\n )\n return response.json()[\"data\"][\"find_location_for_cr3_collision\"][0][\"location_id\"]\n except:\n return None", "def check_db_entry(self):\n _date = self.db_entry['date_utc'].strftime('%Y%m%d')\n\n _pipe_names = ['bright_star', 'faint_star']\n\n for _pipe_name in _pipe_names:\n\n try:\n # pipe self\n _path_pipe = os.path.join(self.config['path']['path_archive'], _date, self.id, _pipe_name)\n\n # path exists? if yes -- processing must have occurred (to some extent at least)\n if os.path.exists(_path_pipe):\n # do not check enqueued stuff here. make sure 100p.fits exists\n if (_pipe_name in self.db_entry['pipelined']) and \\\n (not self.db_entry['pipelined'][_pipe_name]['status']['enqueued']):\n # check modified date:\n _fits = '100p.fits' if _pipe_name == 'bright_star' \\\n else '{:s}_summed.fits'.format(self.db_entry['_id'])\n if _fits in os.listdir(_path_pipe):\n time_tag = datetime.datetime.utcfromtimestamp(os.stat(os.path.join(_path_pipe, _fits)).st_mtime)\n # time_tag = mdate_walk(_path_pipe)\n # bad time tag? force redo!\n if abs((time_tag -\n self.db_entry['pipelined'][_pipe_name]['last_modified']).total_seconds()) > 1.0:\n\n # make sure nothing propagates accidentally before DB record is updated\n self.db_entry['pipelined'].pop(_pipe_name, None)\n self.db_entry['distributed']['status'] = False\n self.db_entry['distributed']['location'] = []\n _utc_now = utc_now()\n self.db_entry['distributed']['last_modified'] = _utc_now\n\n return {'status': 'ok',\n 'message': 'DB entry for {:s} does not reflect reality'.format(self.id),\n 'db_record_update': ({'_id': self.id},\n {\n '$set': {\n 'distributed.status': False,\n 'distributed.location': [],\n 'distributed.last_modified': _utc_now\n },\n '$unset': {\n 'pipelined.{:s}'.format(_pipe_name): 1\n }\n }\n )\n }\n # path does not exist? make sure it's not present in DB entry and/or not marked 'done'\n elif (_pipe_name in self.db_entry['pipelined']) and \\\n self.db_entry['pipelined'][_pipe_name]['status']['done']:\n\n self.db_entry['pipelined'].pop(_pipe_name, None)\n self.db_entry['distributed']['status'] = False\n self.db_entry['distributed']['location'] = []\n _utc_now = utc_now()\n self.db_entry['distributed']['last_modified'] = _utc_now\n\n return {'status': 'ok', 'message': 'DB entry for {:s} does not reflect reality'.format(self.id),\n 'db_record_update': ({'_id': self.id},\n {\n '$set': {\n 'distributed.status': False,\n 'distributed.location': [],\n 'distributed.last_modified': _utc_now\n },\n '$unset': {\n 'pipelined.{:s}'.format(_pipe_name): 1\n }\n }\n )\n }\n\n except Exception as _e:\n traceback.print_exc()\n\n self.db_entry['pipelined'].pop(_pipe_name, None)\n self.db_entry['distributed']['status'] = False\n self.db_entry['distributed']['location'] = []\n _utc_now = utc_now()\n self.db_entry['distributed']['last_modified'] = _utc_now\n\n return {'status': 'error', 'message': str(_e),\n 'db_record_update': ({'_id': self.id},\n {\n '$set': {\n 'distributed.status': False,\n 'distributed.location': [],\n 'distributed.last_modified': _utc_now\n },\n '$unset': {\n 'pipelined.{:s}'.format(_pipe_name): 1\n }\n }\n )\n }\n\n return {'status': 'ok', 'message': None}", "def test_failure(database):\n\n # test for cfda_number that doesn't exist in the table\n cfda = CFDAProgram(program_number=12.340)\n fabs_1 = FABSFactory(cfda_number='54.321', correction_delete_indicatr='')\n fabs_2 = FABSFactory(cfda_number='AB.CDE', correction_delete_indicatr='c')\n fabs_3 = FABSFactory(cfda_number='11.111', correction_delete_indicatr=None)\n\n errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, cfda])\n assert errors == 3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if we have a matching crash and add it, if we have a new one, add a new one
def add_new_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, link, CREATETIME=None, DBCONN=None): if not crashfunction: crashfunction = None if not lasttest: lasttest = None dbconn = DBCONN newid, numreports = check_untriaged_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, DBCONN=dbconn) if newid is None: # Error? bail out return newid, numreports try: if not dbconn: dbconn = psycopg2.connect(dbname="crashinfo", user="crashinfo", password="blah", host="localhost") cur = dbconn.cursor() if newid == 0: # Need to add it cur.execute("INSERT INTO new_crashes(reason, func, backtrace) VALUES(%s, %s, %s) RETURNING id", (crashtrigger, crashfunction, crashbt)) newid = cur.fetchone()[0] if CREATETIME: cur.execute("INSERT INTO triage(link, testline, fullcrash, testlogs, newcrash_id, created_at) VALUES (%s, %s, %s, %s, %s, %s)", (link, lasttest, fullcrash, testlogs, newid, CREATETIME)) else: cur.execute("INSERT INTO triage(link, testline, fullcrash, testlogs, newcrash_id) VALUES (%s, %s, %s, %s, %s)", (link, lasttest, fullcrash, testlogs, newid)) dbconn.commit() cur.close() except psycopg2.DatabaseError as e: print(str(e)) return (0, 0) # huh, and what am I supposed to do here? finally: if not DBCONN and dbconn: dbconn.close() return (newid, numreports)
[ "def get_specific_range_crashes(self):\n _tables_id = self.get_day_from_statistics()\n if _tables_id:\n for table_id in _tables_id:\n conn, cursor = sqlite_base.sqlite_connect()\n # conn, cursor = sqlite_base.sqlite_connect(sql_abs_path=self.statistic_sql)\n _new_reason = sqlite_base.search(conn, cursor,\n columns='ROWID, CRASH_ID, PROJECT, REASON',\n table_name='backtrack_%d' % table_id,\n condition='where INSERT_TIME > %s and REASON NOT NULL' %\n ReportGenerator.get_yesterday_timestamp())\n if _new_reason:\n for _x_reason in _new_reason:\n _x_reason = list(_x_reason)\n _x_reason.insert(0, table_id) #0为插入位置\n yield list(_x_reason)\n else:\n LOG.cri(' %-20s ]-[ Table %s not match this insert time: %s' %\n (LOG.get_function_name(), table_id, ReportGenerator.get_yesterday_timestamp()))\n else:\n # TODO: SQLITE ERROR MESSAGE NOT COMPLETED YET. THAT WILL LET THE LOGIC EXCEPTION IN THE CALL METHOD.\n LOG.info(' %-20s ]-[ Look like have not any new crash today: %s' %\n (LOG.get_function_name(), ReportGenerator.get_yesterday_timestamp()))", "def check_untriaged_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, DBCONN=None):\n newid = 0\n numreports = 0\n if not crashfunction:\n crashfunction = None\n if not lasttest:\n lasttest = None\n dbconn = DBCONN\n try:\n if not dbconn:\n dbconn = psycopg2.connect(dbname=\"crashinfo\", user=\"crashinfo\", password=\"blah\", host=\"localhost\")\n cur = dbconn.cursor()\n # First let's see if we have a matching crash\n cur.execute(\"SELECT new_crashes.id, count(triage.newcrash_id) as hitcounts FROM new_crashes, triage WHERE new_crashes.reason=%s AND new_crashes.func=%s AND new_crashes.backtrace=%s AND new_crashes.id = triage.newcrash_id group by new_crashes.id\", (crashtrigger, crashfunction, crashbt))\n if cur.rowcount > 1:\n print(\"Error! not supposed to have more than one matching row in new crashes\")\n if cur.rowcount > 0:\n row = cur.fetchone()\n newid = row[0]\n numreports = row[1]\n dbconn.commit()\n cur.close()\n except psycopg2.DatabaseError as e:\n print(str(e))\n return (None, None) # huh, and what am I supposed to do here?\n finally:\n if not DBCONN and dbconn:\n dbconn.close()\n return newid, numreports", "def is_crash(mutation: dict) -> bool:\n return 'crashes' in mutation['path'].parent.name", "def add_bug(self, request, new_request_data):\n # Extract the body from the new request data\n new_body = utils.get_response_body(new_request_data)\n with open(os.path.join(logger.LOGS_DIR, 'payload_buckets.txt'), 'a') as file:\n # Check to see if we have logged any bugs for this request yet\n if request.method_endpoint_hex_definition not in self._buckets:\n self._buckets[request.method_endpoint_hex_definition] = set()\n # Write the header for this particular request to the log\n file.write(f'{request.method} {request.endpoint_no_dynamic_objects}\\n')\n\n error_str = self._get_error_str(request, new_body) or 'Other'\n if error_str not in self._buckets[request.method_endpoint_hex_definition]:\n if error_str == INVALID_JSON_STR:\n # body is invalid JSON, so just extract what's at the end of the\n # request for logging purposes\n new_body = new_request_data.split(DELIM)[-1]\n self._buckets[request.method_endpoint_hex_definition].add(error_str)\n file.write(f'\\t{error_str}\\n\\t{new_body}\\n\\n')\n return (error_str, new_body)\n return None", "def put(self, crash_report_id):\n pass", "def match_reason(self):\n conn, cursor = sqlite_base.sqlite_connect()\n # Read today crashes reasons.\n _td_reasons = self.get_specific_range_crashes() #[table_id, ROWID, CRASH_ID, PROJECT, REASON]\n # Remove duplicate data.\n _uniqueness_l = self.make_uniquenesss_list(_td_reasons) #[{tableid:rowid}, ROWID, 1, CRASH_ID, PROJECT, REASON),()]\n # Get all the reasons that has been logged.\n for _reason in self.search_sql_reason(conn, cursor): #[(ROWID, PROJECT, REASON), ()]\n if _reason.__len__() != 0:\n for _per_reason in _reason:\n # Clean that reason from table reasons.\n _s_clear = self.mutable_remove(str(_per_reason[-2:])) #_per_reason[-2:]要改成_per_reason[-1]吧\n # Traverse today's reasons list.\n for _iro_key, _iro_value in enumerate(_uniqueness_l):\n # Clean that reason from today's list.\n _iro_clear = self.mutable_remove(str(_iro_value[-2:])) #同上\n # Compute similarity.\n _sim_percent = self.compute_similarity(_iro_clear, _s_clear)\n if _sim_percent == 1:\n del _uniqueness_l[_iro_key]\n # Update frequency once 100% match.\n sqlite_base.update(conn, cursor,\n end=False,\n table_name='reasons',\n columns=['FREQUENCY'],\n values=[_iro_value[2]],\n condition=\"WHERE ROWID = '%d'\" % _per_reason[0])\n # Update that all tables data releation with this reason.\n for i in _iro_value[0].keys():\n conditions = 'WHERE '\n _ll = _iro_value[0][i]\n for key, value in enumerate(_ll):\n if key >= 1:\n conditions += ' or '\n conditions += 'ROWID = %d' % value\n sqlite_base.update(conn, cursor,\n end=False,\n table_name='backtrack_%s' % str(i),\n columns=['REASON_ID'],\n values=[_per_reason[0]],\n condition=conditions)\n\n if conn:\n cursor.close()\n conn.close()\n if _uniqueness_l.__len__() != 0:\n # This list is the new crash relative to old data\n return _uniqueness_l\n else:\n # Empty means today's crash already submitted to JIRA server.\n return []", "def crash_check():\n global CURRENT\n # Grab status\n stat = grab_status()\n\n # Check for seg\n if \"SIGSEGV\" in stat:\n return True\n return False", "def find_crashes(carts: List[Cart]) -> List[Vec]:\n cart_at: Dict[Vec, Cart] = {}\n crashes: List[Vec] = []\n\n for cart in carts:\n if not cart.crashed:\n if cart.pos in cart_at:\n crashes.append(cart.pos)\n cart.crashed = True\n cart_at[cart.pos].crashed = True\n else:\n cart_at[cart.pos] = cart\n return crashes", "def panic_has_occured(self,fn):\n\t\tprint(\"++ A panic has occured! ++\", fn) \n\n\t\t# Extract the IP address from the panic log.\n\t\tfound = re.findall(r'(?:[\\d]{1,3})\\.(?:[\\d]{1,3})\\.(?:[\\d]{1,3})\\.(?:[\\d]{1,3})',fn)\n\t\tif found:\n\t\t\tpanic_client = found[len(found)-1] \t# The host which crashed\n\t\t\tprint(panic_client)\t\t\t\n\n\t\t# Save the panic log to the file system. \n\t\tprint(\"++ Backing up panic log ++\") \n\t\tx = str(uuid.uuid4())\n\n\t\tcrash_path = os.path.join(self.crash_path,x)\n\t\tpanic_path = os.path.join(self.panic_path,fn)\n\n\t\tos.makedirs(crash_path)\n\n\t\ttry:\n\t\t\tshutil.move(panic_path,crash_path)\n\t\texcept:\n\t\t\tprint(\"++ ERR: Could not move panic log - This is fatal!\")\n\t\t\tprint(\"++ Check permissions on the panic directory\")\n\t\t\tsys.exit(1)\n\n\t\t# Look up associated log file from the network logger\n\t\tprint(\"++ Getting associated fuzzer log file ++\")\n\t\tlines = self.get_network_logger_data(found[len(found)-1])\n\t\t\n\t\t# If we have found log data\n\t\tif lines:\n\t\t\tprint(len(lines))\n\n\t\t\t# Save the fuzzer logfile within the crash dir. \n\t\t\trepro_path = os.path.join(crash_path,x + \".c\")\n\t\t\tfd = open(repro_path,\"w\")\n\n\t\t\t# Format the log file properly. \n\t\t\tfor l in lines:\n\t\t\t\tfd.write(l + \"\\n\")\n\t\t\tfd.close()\n\n\t\t\t# Try create a repro file.\n\t\t\tself.create_repro(repro_path) \n\n\t\t# Try to backup the binary and python script used, incase we cannot repro from log file only. \n\t\ttry:\n\t\t\tshutil.copy(self.bughunt_start_src,crash_path)\n\t\texcept:\n\t\t\tprint(\"++ Could not copy bughunt python script to crash dir ++\")\n\t\ttry:\n\t\t\tshutil.copy(self.bughunt_binary_src,crash_path)\n\t\texcept:\n\t\t\tprint(\"++ Could not copy bughunt binary to crash dir ++\")\n\t\t\t\n\t\t# TODO: Send it off to database \n\n\t\t# Revert the state back to the snapshot. \n\t\tself.revert_vm(panic_client)\n\n\t\t# Start the fuzzer running again. ", "def process_missing_ref(self, match):\n message = LogFileMessage()\n message[\"type\"] = f\"Missing {match.group(1)}\"\n message[\"key\"] = match.group(2)\n message[\"page\"] = match.group(3)\n message[\"line\"] = match.group(4)\n\n self.missing_refs.append(message)\n return message", "def mark_as_crash(self, event, filename):\n if filename:\n event.add_attributes({\"crash_app\": filename})\n event.add_tags([\"win_crash\"])", "def test_severity_add_error_already_exists(self):\n self.execute('severity add blocker')\n rv, output = self.execute('severity add blocker')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)", "def parse_crashes_field(page):\n crashes_pattern = re.compile(r'Traffic Fatality #(\\d{1,3})')\n return match_pattern(page, crashes_pattern)", "def try_to_append(self, sub):\n if sub is None:\n return\n if sub.index != self.index:\n return\n self.index += sub.window\n self.substrings.append(sub)\n self.systems.update(sub.get_systems())", "def has_crashes(self, has_crashes):\n\n self._has_crashes = has_crashes", "def foundBug(self):\n pass", "def insertException(self, calException):\n date = calException.day.date()\n if (calException is None or\n date < self.valid_from.date() or\n date > self.valid_until.date()):\n return\n # First delete the old CalendarEntry, then add the new corresponding one\n self.deleteBaseCalendarEntries(from_date=date, to_date=date)\n chg_ctrl = CalendarEntry.MakeChangeControlAttributes()\n CalendarEntry.Create(day=calException.day,\n day_type_id=calException.day_type_id,\n description=calException.description,\n calendar_profile_id=self.cdb_object_id,\n weekday=date2weekday(date),\n cdb_cpersno=chg_ctrl['cdb_cpersno'],\n cdb_mpersno=chg_ctrl['cdb_mpersno'],\n cdb_cdate=chg_ctrl['cdb_cdate'],\n cdb_mdate=chg_ctrl['cdb_mdate'])", "def add(self, match: CompleteMatch) -> bool:\n if match not in self.all_matches:\n self.all_matches.append(match)\n self.complete_matches[match.start][match.external].append(match)\n self.complete_matches[match.start][None].append(match)\n return True\n return False", "def consider_new_frame_type( self, new_frame_type, new_frame_inst): # void\n # comparing with already existing frames\n for frame_type in self.frame_types:\n if frame_type.is_identical_with( new_frame_type):\n frame_type.reconnect_insts(new_frame_type)\n frame_type.add_inst( new_frame_inst)\n break\n else: # no identical frame was found\n self.frame_types.append( new_frame_type)\n\n return new_frame_inst" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Automatically generate formatted inputs and outputs from input_shapes
def make_inputs_outputs(input_shapes, dtype, is_bfloat16=False): input_list = [ np.random.random(shape).astype(dtype) for shape in input_shapes ] output_shape = find_output_shape(input_list) output_list = [ x + np.zeros(output_shape).astype(x.dtype) for x in input_list ] if is_bfloat16: input_list = [ convert_float_to_uint16(input_list[i]) for i in range(len(input_list)) ] output_list = [ convert_float_to_uint16(output_list[i]) for i in range(len(output_list)) ] output_formatted = { "Out": [(f"out{i}", output_list[i]) for i in range(len(output_list))] } input_formatted = { "X": [(f"x{i}", input_list[i]) for i in range(len(input_list))] } return input_formatted, output_formatted
[ "def test_inputs(self):\n assert list(self._iter_input_shapes())", "def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:\n\n def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):\n if isinstance(tensor, (tuple, list)):\n return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]\n\n else:\n # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)\n axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: \"batch\"}\n if is_input:\n if len(tensor.shape) == 2:\n axes[1] = \"sequence\"\n else:\n raise ValueError(f\"Unable to infer tensor axes ({len(tensor.shape)})\")\n else:\n seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]\n axes.update({dim: \"sequence\" for dim in seq_axes})\n\n print(f\"Found {'input' if is_input else 'output'} {name} with shape: {axes}\")\n return axes\n\n tokens = nlp.tokenizer(\"This is a sample output\", return_tensors=framework)\n seq_len = tokens.input_ids.shape[-1]\n outputs = nlp.model(**tokens) if framework == \"pt\" else nlp.model(tokens)\n if isinstance(outputs, ModelOutput):\n outputs = outputs.to_tuple()\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n # Generate input names & axes\n input_vars = list(tokens.keys())\n input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}\n\n # flatten potentially grouped outputs (past for gpt2, attentions)\n outputs_flat = []\n for output in outputs:\n if isinstance(output, (tuple, list)):\n outputs_flat.extend(output)\n else:\n outputs_flat.append(output)\n\n # Generate output names & axes\n output_names = [f\"output_{i}\" for i in range(len(outputs_flat))]\n output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}\n\n # Create the aggregated axes representation\n dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)\n return input_vars, output_names, dynamic_axes, tokens", "def _construct_input_spec(self):", "def create_inputs(config):\n return([(\"fastq/{sample}\" + expand(\"{ending}{suffix}\", \\\n ending=R1_file_ending, suffix=suffix)[0]+\"\"),\n (\"fastq/{sample}\" + expand(\"{ending}{suffix}\", \\\n ending=R2_file_ending, suffix=suffix)[0]+\"\")])", "def write_shape_input_file(\n input_file,\n name,\n structure,\n num_vertices,\n central_atom_id,\n ref_shapes,\n):\n\n title = '$shape run by atools\\n'\n size_of_poly = f'{num_vertices} {central_atom_id}\\n'\n codes = ' '.join(ref_shapes)+'\\n'\n\n structure_string = f'{name}\\n'\n pos_mat = structure.get_position_matrix()\n for atom in structure.get_atoms():\n ele = atom.__class__.__name__\n x, y, z = pos_mat[atom.get_id()]\n structure_string += f'{ele} {x} {y} {z}\\n'\n\n string = title+size_of_poly+codes+structure_string\n\n with open(input_file, 'w') as f:\n f.write(string)", "def build(self, input_shape):\n self._check_if_input_shape_is_none(input_shape)\n self._check_size_of_dimensions(input_shape)\n self._create_input_spec(input_shape)\n self.built = True\n super().build(input_shape)", "def outputs(self) -> Dict[str, TypeShape]:\n raise NotImplementedError()", "def _get_output_shapes(\n self, model_fn: Callable, output_names: Optional[Sequence[str]]\n ) -> Union[Sequence[SHAPE_TYPE], Dict[str, SHAPE_TYPE]]:\n\n pass", "def register_inputs(self, input_node_names, input_node_shapes):\r\n for name, shape in zip(input_node_names, input_node_shapes):\r\n self.register_input(name, shape)", "def _populate_from_inputs(self, outputs):\n for output in outputs:\n # If a argument in the string from input\n if '${' in output[0]:\n name = output[0].partition('{')[-1].rpartition('}')[0]\n if name in list(self.src_inputs.keys()):\n for val in self.src_inputs.get(name).split(','):\n out1 = output[0].replace('${%s}' % name, val)\n out2 = output[1]\n out3 = output[2].replace('${%s}' % name, val)\n self.outputs.append((out1, out2, out3))\n else:\n err = '${ found in the output but the format is unknown. \\\nThe format {} can not be read by the spider because {} is not an input.'\n raise AutoSpiderError(err.format(output[0], name))\n else:\n self.outputs.append(output)", "def format_input(self):\n # to get the weight matrix for the embedding layer\n self.get_weights_matrix()\n\n try:\n shutil.rmtree('./data/inputs/word2vec')\n except:\n pass\n os.mkdir('./data/inputs/word2vec')\n\n self.path_sentences = './data/inputs/sentences.txt'\n self.path_labels = './data/inputs/labels.txt'\n self.path_sentences_output = './data/inputs/word2vec/sentences.npy'\n self.path_labels_output = './data/inputs/word2vec/labels.npy'\n\n with open(self.path_sentences, 'r+') as f:\n lines = f.readlines()\n max_lenght = max([len(line.split()) for line in lines])\n sentences = np.zeros((len(lines), max_lenght)) # size = samples x max lenght of sentences\n i = 0\n nb_unknown = 0\n nb_token = 0\n for line in lines:\n sentence_formated = []\n for word in line.split():\n nb_token += 1\n try:\n sentence_formated.append(self.index_dict[word.decode('utf8')])\n except:\n sentence_formated.append(0)\n nb_unknown += 1\n lenght = len(sentence_formated)\n sentences[i, :lenght] = sentence_formated[:lenght]\n i += 1\n print('there was', nb_unknown, 'unknown tokens out of', nb_token, 'total tokens, which account for', int((float(nb_unknown) / float(nb_token))*100), '% of all tokens')\n\n with open(self.path_labels, 'r+') as f:\n lines = f.readlines()\n lines = map(int, lines)\n lb = LabelBinarizer()\n labels = lb.fit_transform(lines)\n # labels = np.zeros((len(lines), 1))\n # i = 0\n # for line in lines:\n # labels[i] = line\n # i += 1\n\n with open(self.path_sentences_output, 'wb') as f:\n np.save(f, sentences)\n with open(self.path_labels_output, 'wb') as f:\n np.save(f, labels)\n\n print('shape of sentences (nb_sample, max_len):', sentences.shape)\n print('shape of labels (nb_sample):', labels.shape)\n return sentences, labels", "def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)\n meta_interactor_num = self.meta_interactor_num or hp.Choice('meta_interactor_num',\n [1, 2, 3, 4, 5, 6],\n default=3)\n interactors_name = []\n for idx in range(meta_interactor_num):\n tmp_interactor_type = self.interactor_type or hp.Choice('interactor_type_' + str(idx),\n list(self.name2interactor.keys()),\n default='InnerProductInteraction')\n interactors_name.append(tmp_interactor_type)\n\n outputs = [self.name2interactor[interactor_name]().build(hp, input_node)\n for interactor_name in interactors_name]\n\n # DO WE REALLY NEED TO CAT THEM?\n outputs = [tf.keras.layers.Flatten()(node) if len(node.shape) > 2 else node for node in outputs]\n outputs = tf.concat(outputs, axis=1)\n return outputs", "def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape", "def Network_gen(inputs, topo, outputs, name, input_size, output_size):\n\n # Creates a new file\n topo_done = open(\"%s.vhd\" %name, \"w+\")\n\n topo_string = 'import pandas as pd \\nimport math \\n \\ndef main(): \\n'\n \n in_size = input_size\n out_size = output_size\n\n input_list = []\n for i in range(1, inputs):\n # Gero tambem uma lista com os nomes das entradas\n # Gero a primeira camada da minha rede, a camada de inputs\n inputs_list.append('input_%s' %i)\n topo_string = topo_string + '\\n'\n\n topo_string = topo_string + ');'\n \n\n \n for layer in range(len(topo)):\n # Gero cada camada da topologia\n layer_nodes = topo[layer]\n\n for node in range(layer_nodes):\n topo_string = topo_string + ''", "def output_shapes(self, l_in):\r\n shapes = [(self.in_channel, l_in)]\r\n for conv1d_unit in self.conv_layers:\r\n shapes.append(conv1d_unit.output_shape(shapes[-1][1]))\r\n return shapes", "def generate_input_output():\n for solution in get_solutions():\n # get and load solution module\n module_path = solution[0]\n module = _get_module(module_path)\n module_dir, module_name = os.path.split(module_path)\n module_name = get_module_name(module_name)\n # generate input data and obtain output\n input_fpath, output_fpath = map(\n lambda ext: os.path.join(\n module_dir,\n \"{}.{}\".format(module_name, ext)\n ),\n [\"in\", \"out\"]\n )\n # and write them to disk\n with open(input_fpath, \"w\") as input_fout, \\\n open(output_fpath, \"w\") as output_fout:\n for data in module.generate():\n input_fout.write(\"{}\\n\".format(json.dumps(data)))\n output_fout.write(\"{}\\n\".format(\n json.dumps(module.compute(data))\n )\n )", "def inputs(service_template_name, model_storage, logger):\n logger.info('Showing inputs for service template {0}...'.format(service_template_name))\n print_service_template_inputs(model_storage, service_template_name, logger)", "def gen_inputs():\n #generate dates\n d0 = date(1970, 1, 1)\n days = random.randint(1, 20000)\n val_date = d0 + timedelta(days=days)\n days = random.randint(1, 10)\n settle_date = val_date + timedelta(days=days)\n days = random.randint(1, 5000)\n exercise_date = settle_date + timedelta(days=days)\n \n #generate stock, strike, vol\n stock = random.uniform(0, 1000)\n strike = random.uniform(0, 1000)\n vol = random.uniform(0, 2)\n \n put_call = \"put\"\n risk_free = random.uniform(0, 1.0)\n dividend = random.uniform(0, 1.0)\n method = \"PDE\"\n time_steps = 800\n grid_points = 800\n return {\"ValDate\": val_date, \n \"SettleDate\": settle_date,\n \"ExerciseDate\": exercise_date,\n \"Stock\": stock,\n \"Strike\": strike,\n \"Vol\": vol,\n \"PutCall\": put_call,\n \"RiskFreeRate\": risk_free,\n \"Dividend\": dividend,\n \"Method\": method,\n \"TimeSteps\": time_steps,\n \"GridPoints\": grid_points\n }", "def output_shape(arg_name: str, shape: Union[int, Collection[int]]):\n # TODO: how does this work? How do we know the shape before runtime?\n\n def decorator(func):\n _quick_set(func, 'output_shape', arg_name, shape, {})\n return func\n\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload a collection of events to an InfluxDB instance. This uses a POST to upload data to a named database at a URL with a particular timestampresolution. If present, the user and password are used to do the upload. A batch size of events is also used. Events have to be in InfluxDB Line Protocol format, in the collection.
def upload(upload_spec, collection, context=None): assert upload_spec['type'].lower() == "influxdb" raise_exceptions = upload_spec.get('raise-exceptions', False) user = upload_spec.get('user', None) password = upload_spec.get('password', None) timestamp_resolution = upload_spec.get('timestamp-resolution', 'us') batch_size = upload_spec.get('batch-size', 1000) if context is None: context = {} try: url = upload_spec['url'] except KeyError: logger.error("No url supplied to upload for InfluxDB.") if raise_exceptions: raise return try: database = upload_spec['database'] except KeyError: logger.error("No database supplied to upload for InfluxDB.") if raise_exceptions: raise return url = expand_vars(context, url) database = expand_vars(context, database) # map the precision to InfluxDB. try: precision = {'ns': 'n', 'us': 'u', 'ms': 'ms', 's': 's'}[timestamp_resolution] except KeyError: logger.error("Precision isn't one of ns, us, ms or s: %s", timestamp_resolution) if raise_exceptions: raise return if not url.endswith("/"): url = "{}/".format(url) post_url = "{}write?db={}&precision={}".format( url, database, precision) if user: post_url = "{}&u={}".format(post_url, expand_vars(context, user)) if password: post_url = "{}&p={}".format(post_url, expand_vars(context, password)) # Now got all the possible information to be able to do the uplaods. logger.info( "Starting upload to InfluxDB, database: %s, user: %s, " "precision: %s, maximum batch_size: %s", database, user, timestamp_resolution, batch_size) with collection.events(precision=timestamp_resolution) as events: while True: batch_events = itertools.islice(events, batch_size) batch = "\n".join(b[1] for b in batch_events) if not batch: break # count the number of lines: size = batch.count("\n") + 1 logger.info("Uploading %s records to InfluxDB", size) logger.debug("batch\n%s", batch) # Essentially: curl -i -XPOST 'http://172.16.1.95:8086/write? # db=mydb&precision=u' --data-binary @batch.logs try: result = requests.post(post_url, data=batch) except Exception as e: logger.error("Error raised when uploading batch: %s", str(e)) if raise_exceptions: raise return if result.status_code not in (requests.codes.ok, requests.codes.no_content, requests.codes.accepted): logger.error( "Batch upload failed. status_code: %s", result.status_code) if raise_exceptions: result.raise_for_status() logger.error("Abandoning batch upload to InfluxDB") return logger.info( "Finished upload to InfluxDB, database: %s, user: %s, " "precision: %s", database, user, timestamp_resolution)
[ "def upload_to_google_calendar(events):\n batch = MyCalendarBatchInsert()\n\n for event in events:\n batch.add(event.to_gcal_event())\n\n return batch.execute()", "def influx_upload(self, config, data_filename):\n points = '%s-data-points.json' % config['metric']\n jq_point = config.get('measurements', {}).get('jq', None)\n if not jq_point:\n return\n do_jq(jq_point, data_filename, points)\n with open(points) as points_file:\n try:\n points = json.load(points_file)\n except ValueError:\n print >>sys.stderr, \"No influxdb points to upload.\\n\"\n return\n if not self.influx:\n print >>sys.stderr, (\n 'Skipping influxdb upload of metric %s, no db configured.\\n'\n % config['metric']\n )\n return\n points = [ints_to_floats(point) for point in points]\n self.influx.write_points(points, time_precision='s', batch_size=100)", "def _upload_telemetry(self):\n\n # Are there any entries at all?\n queue_entries = self._data_queue.num_entries()\n if queue_entries >= 1:\n # On every upload report current queue size\n data = {'tb-qsize': queue_entries}\n self._data_queue.add(data)\n\n # Build HTTP query string with queue data\n entries = self._data_queue.first_entries(Things.TELEMETRY_MAX_ITEMS_TO_UPLOAD)\n num_entries = len(entries)\n assert len(entries) >= 0\n\n post_data = list()\n for entry in entries:\n data = {'ts': entry['time'], 'values': entry['data']}\n post_data.append(data)\n\n # Upload the collected data\n res = self._post_data('telemetry', post_data)\n if res:\n # Transmission was ok, remove data from queue\n self._data_queue.remove_first(num_entries)\n logger.debug(f'removing {num_entries} entries from queue')\n else:\n logger.warning('could not upload telemetry data, keeping in queue')\n logger.warning(f'{queue_entries} entries in queue')", "def testUploadUsesBatchSize(self):\n client = DatasetImporter(1)\n client.upload(u'user',\n [{'about': u'hello world', 'values': {u'user/bar': 13}},\n {'about': u'wubble', 'values': {u'user/quux': 42}}])\n self.assertTrue(self.log.getvalue().startswith(\n 'Importing 2 new objects.\\nImported 1/2 new objects.\\n'\n 'Imported 2/2 new objects.\\nImported 2 objects in '))", "def send_events_batch(self, data):\n return self._write_request(self._base_url, 'track/', data, batch=True)", "def test_write_points_batch(self):\n with requests_mock.Mocker() as m:\n m.register_uri(requests_mock.POST,\n \"http://localhost:8086/db/db/series\")\n cli = InfluxDBClient('localhost', 8086,\n 'username', 'password', 'db')\n cli.write_points(data=self.dummy_points, batch_size=2)\n self.assertEqual(1, m.call_count)", "def log_to_influxdb(\n client: influxdb.InfluxDBClient,\n fields: Dict[str, Any],\n tags: Dict[str, Any],\n):\n json_body = [{\n 'measurement': 'jvb_stats',\n 'tags': tags,\n 'fields': fields,\n }]\n client.write_points(json_body)", "def upload_data_to_db(self):\n mongo_uri = f\"mongodb+srv://{constant.DB_USER}:{constant.DB_PASSWORD}@userfeeds.48fue.mongodb.net/admin\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:\n executor.submit(update_data, mongo_uri, self.users, self.posts, self.comments)", "def insert_into_influxdb(measurement, value):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n payload = \"{} value={}\".format(measurement, value)\n requests.post(INFLUXDB_URL, headers=headers, data=payload)", "def insert_metric(self, entrylist):\n\n\t\tif type(entrylist) is str:\n\t\t\tentrylist = eval(entrylist)\t\t\n\t\t\n\t\tcount = 0\n\t\tconn = pymongo.Connection(*self.hostaddr)\n\t\tdb = conn[self.dbname]\n\t\tfor host, data in entrylist.items():\n\t\t\tcollection = db[host]\n\t\t\tfor entry in data:\n\t\t\t\ttry:\n\t\t\t\t\tcollection.insert({'time':entry[0], 'metrics':entry[1]})\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error(str(e))\n\t\t\t\telse:\n\t\t\t\t\tcount += 1\n\t\treturn count", "def publish_events(self, events):\n body = ztreamy.serialize_events(events)\n logging.info(\"Connecting to \" + self.hostname + \" on port \" + str(self.port))\n conn = httplib.HTTPConnection(self.hostname, self.port)\n conn.request('POST', self.path, body, ZtreamyClient._headers)\n response = conn.getresponse()\n if response.status == 200:\n logging.info(\"Got 200 status from \" + self.path)\n logging.info(\"Sent :\" + body)\n return True\n else:\n logging.error(str(response.status) + ' ' + response.reason)\n return False", "def add_to_database(results):\n\n err = CLIENT.write(['%s,hashid=%s warnings=%d,errors=%d,status=\"%s\"' % (DB_NAME, results['hashid'], results['warnings'], results['errors'], results['status'])], {'db':DB_NAME}, protocol='line')\n if not err:\n log_to_file (\"[ERROR] %s fail to post to InfluxDB\" % (results['hashid']))", "def import_events(self, events_file):\n data_file = open(events_file).read()\n events_json = json.loads(data_file)\n\n imported_count = 0\n for event in events_json:\n try:\n clause = self.events_table.insert().values(\n id=event['id'],\n datetime=event['datetime'],\n location=event['location'],\n group_id=event['group_id'],\n title=event['title'],\n url=event['url'],\n photo_url=event['photo_url']\n # description = e['description'],\n )\n self.connection.execute(clause)\n\n print 'One event added. ID: {}'.format(event['id'])\n imported_count += 1\n\n except Exception as error:\n print \"Import Exception: {}\".format(error)\n\n print \"Imported <{}> events\".format(imported_count)\n return {'imported_count': imported_count}", "def test_write_points_batch_multiple_series(self):\n dummy_points = [\n {\"points\": [[\"1\", 1, 1.0], [\"2\", 2, 2.0], [\"3\", 3, 3.0],\n [\"4\", 4, 4.0], [\"5\", 5, 5.0]],\n \"name\": \"foo\",\n \"columns\": [\"val1\", \"val2\", \"val3\"]},\n {\"points\": [[\"1\", 1, 1.0], [\"2\", 2, 2.0], [\"3\", 3, 3.0],\n [\"4\", 4, 4.0], [\"5\", 5, 5.0], [\"6\", 6, 6.0],\n [\"7\", 7, 7.0], [\"8\", 8, 8.0]],\n \"name\": \"bar\",\n \"columns\": [\"val1\", \"val2\", \"val3\"]},\n ]\n expected_last_body = [{'points': [['7', 7, 7.0], ['8', 8, 8.0]],\n 'name': 'bar',\n 'columns': ['val1', 'val2', 'val3']}]\n with requests_mock.Mocker() as m:\n m.register_uri(requests_mock.POST,\n \"http://localhost:8086/db/db/series\")\n cli = InfluxDBClient('localhost', 8086,\n 'username', 'password', 'db')\n cli.write_points(data=dummy_points, batch_size=3)\n self.assertEqual(m.call_count, 5)\n self.assertEqual(expected_last_body, m.request_history[4].json())", "def send_to_delivery_stream(events, stream_name):\n if not events:\n logger.info(\"No events provided: nothing delivered to Firehose\")\n return\n\n records = []\n for event in events:\n if not isinstance(event, str):\n # csv events already have a newline\n event = json.dumps(event) + \"\\n\"\n records.append({\"Data\": event})\n firehose = boto3.client(\"firehose\")\n logger.info(\"Delivering %s records to Firehose stream '%s'\",\n len(records), stream_name)\n resp = firehose.put_record_batch(\n DeliveryStreamName=stream_name,\n Records=records)\n return resp", "def _add_multipart_events(self, events):\n for event in events:\n if not \"multipart\" in event:\n # Not this one... skip to the next\n logger.error(\n \" Mismatched multipart record %s in job %s\"\n % (event, self._exec_job_id)\n )\n continue\n if \"integrity_summary\" in event:\n # PM-1390 multipart events\n m = event[\"integrity_summary\"]\n metric = IntegrityMetric(\n type=\"check\", # For time being they always refer to verification\n file_type=\"input\", # should be specified in multipart\n succeeded=m[\"succeeded\"] if \"succeeded\" in m else 0,\n failed=m[\"failed\"] if \"failed\" in m else 0,\n duration=m[\"duration\"] if \"duration\" in m else 0.0,\n )\n self.add_integrity_metric(metric)\n else: # catch all\n self._multipart_events.append(event)", "def send_to_syslog(events, syslog):\r\n for cnt, event in enumerate(events, start=1):\r\n syslog.send(json.dumps(event))\r\n logging.debug('Event %s sent to syslog: %s.', cnt, json.dumps(event))\r\n logging.debug('Total Events: %s ', cnt)", "def upload(self) -> None:\n self.logger.debug(\"Upload data dictionary\")\n for data_dictionary in self.dd_set: # type DataDictionary\n data_dictionary.upload() # raise a relevant exception", "def publish_event(self, event, **kwargs):\n if not event.timestamp:\n event.timestamp = datetime.utcnow\n\n for (key, func) in iteritems(self.metadata_funcs):\n event.metadata[key] = func()\n\n for (name, connection) in iteritems(self._connections):\n try:\n connection.publish_event(event, **kwargs)\n except Exception as ex:\n self._logger.exception(\n \"Exception while publishing event to '%s' connection: %s\", name, ex\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing sentence with no walls and period
def test_parse_no_period_if_no_period(self): options = 0 options |= BIT_STRIP | BIT_NO_PERIOD | BIT_RWALL tokens = parse_tokens(self.tokens_no_walls_no_period, options)[0] self.assertTrue(self.cmp_lists(tokens, ['###LEFT-WALL###', 'eagle', 'has', 'wing']))
[ "def check_sentence(text):\n result = re.search(r\"^[A-Z][a-z\\s]*[.?!]$\", text)\n return result != None", "def filter_paragraph(p):\n # Expect a minimum number of words.\n tokens = p.split()\n if len(tokens) < 6:\n return True\n\n # Require some letters.\n if not re.search(_SOME_ALPHA_RE, p):\n return True\n\n # Keep this one at the end, probably the most complicated logic.\n # We try to detect sentences, which should have a minimum of 3 tokens\n # with only alphabetic characters.\n last = 0\n found_sentence = False\n num_alpha = 0\n for i, x in enumerate(tokens):\n if x == '.':\n if i - last > 3 and num_alpha >= 3:\n found_sentence = True\n break\n last = i\n num_alpha = 0\n if re.match(_ONLY_ALPHA_RE, x):\n num_alpha += 1\n if not found_sentence:\n return True\n\n return False", "def is_real_sentence(only_token, sentence):\n \n first_word = \"\"\n if only_token:\n first_word = sentence[0]\n else:\n first_word = sentence[0][0]\n\n if '---------------------' in first_word or first_word == '-DOCSTART-':\n return False\n else:\n return True", "def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences", "def test_valid_punctuation():\n assert rw('What did they say? Say what again!') == 'say'\n assert rw('I am... that am!') == 'am'", "def generates(self, sentence):\n try:\n parses = self._parser.parse(sentence.get_words())\n return list(parses) != []\n except:\n return False", "def check_end_punctuations(self) -> str:\n if not self.contain_content('!') and not self.contain_content('?') \\\n and not self.contain_content('.'):\n return 'This sentence is not ended with exclamation mark, period mark or question mark.'\n if self.contain_type('SBARQ') or self.contain_type('SQ'):\n if self.find_the_last() == '?':\n return 'The question sentence ended correctly.'\n else:\n if self.find_the_last() == '.' or self.find_the_last() == '!':\n return 'This sentence has a good end punctuation.'\n else:\n return 'The end of this sentence is very likely to have a wrong punctuation.'", "def fix_missing_period(self,line):\n\n if line == \"\": \n return line\n if line[-1] in self.END_TOKENS: \n return line\n return line + \" .\"", "def end_with_punctuation(self, sentence):\n if not re.match(r'[\\.?!]$', sentence[-1]):\n self.error_list.append(\"Every sentence should end with either of '.', '?' or '!'.\")", "def extract_statements(\n text=None, \n nlp=None, \n make_sentence=False, \n n_min_word_paragraph=50, \n n_max_word_paragraph=200\n ):\n \n # remove non ASCII characters\n text = remove_non_ascii(text)\n \n \n lines = []\n prev = \"\"\n n_words = 0\n for line in text.split('\\n'):\n # aggregate consecutive lines where text may be broken down\n # only if next line starts with a space or previous does not end with punctation mark and between\n if((line.startswith(' ') or not prev.endswith(('.','?', '!'))) and n_words <= n_max_word_paragraph):\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n # min words in paragraph\n elif n_words <=n_min_word_paragraph:\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n else:\n # new paragraph\n lines.append(prev)\n prev = line\n n_words = 0\n \n # don't forget left-over paragraph\n lines.append(prev)\n # clean paragraphs from extra space, unwanted characters, urls, etc.\n # best effort clean up, consider a more versatile cleaner\n sentences = []\n for line in lines:\n \n # removing header number\n line = re.sub(r'^\\s?\\d+(.*)$', r'\\1', line)\n # removing trailing spaces\n line = line.strip()\n # words may be split between lines, ensure we link them back together\n line = re.sub('\\\\s?-\\\\s?', '-', line)\n # remove space prior to punctuation\n line = re.sub(r'\\s?([,:;\\.])', r'\\1', line)\n # ESG contains a lot of figures that are not relevant to grammatical structure\n line = re.sub(r'\\d{5,}', r' ', line)\n # remove mentions of URLs\n line = re.sub(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*', r' ', line)\n # remove multiple spaces\n line = re.sub('\\\\s+', ' ', line)\n \n # split paragraphs into well defined sentences using spacy\n if make_sentence:\n try:\n for part in list(nlp(line).sents):\n part_strip = str(part).strip()\n # remove senteces with only 30 characters\n if len(part_strip) > 30:\n sentences.append(part_strip)\n except ValueError:\n print(\"Check if nlp model was loaded\")\n else:\n sentences.append(line)\n \n return sentences", "def _process_sentence(self, sentence):\n raise ValueError(\"Please override this class!\")", "def test_tokenize_by_sentence_inappropriate_sentence(self):\n text = '$#&*@#$*#@)'\n\n expected = ()\n actual = tokenize_by_sentence(text)\n self.assertEqual(expected, actual)", "def sentence_punctuation():\n check50.run(\"python3 readability.py\").stdin(\"Congratulations! Today is your day. You're off to Great Places! You're off and away!\").stdout(\"Grade\\D+3\", \"Grade 3\\n\").exit(0)", "def dependency_parse(self):\n parsed_sentence = self.parsed\n word_type = None\n multiplier = None\n sentence_deps = set([w.dep_ for w in parsed_sentence])\n for word in parsed_sentence:\n #If there's a word prefaced by a number\n #Add a special case for am, treat it as AM.\n #To somewhat mitigate the special case being incorrectly triggered by the actual word 'am',\n #only do this if there are entities in the sentence\n if list(word.lefts) and \"nummod\" in sentence_deps:\n first_left = list(word.lefts)[0]\n log.debug(\"Found nummod type pair {0} and {1}\".format(word.orth_, first_left.orth_))\n if first_left.is_digit:\n word_type = self.check_time_word(word, \"nummod\")\n multiplier = int(first_left.orth_)\n break\n elif word.dep_ == \"pobj\":\n #Check for a cardinal time\n if cardinal_time_pattern.match(word.orth_):\n word_type, multiplier = self.parse_cardinal_time(word)\n break\n elif date_pattern.match(word.orth_):\n log.debug(\"Found date {0}\".format(word.orth_))\n if \"/\" in word.orth_:\n word_type = \"slashdate\"\n else:\n word_type = \"dotdate\"\n break\n else:\n self.relative == True\n word_type = self.check_time_word(word, \"pobj\")\n break\n #Only use a number for a parse if there's not a significant dependency in the sentence\n elif word.is_digit and \"pobj\" not in sentence_deps:\n word_type, multiplier = self.parse_cardinal_time(word)\n break\n self.word_type = word_type\n self.multiplier = multiplier", "def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n return line + \" .\"", "def isValid2ndStage(self):\n strText = self.getTextSentence()\n\n # To filter out web addresses\n if strText.find( \"http\" ) >= 0 \\\n or strText.find( \"www\" ) >= 0 \\\n or strText.find( \"html\" ) >= 0 \\\n or strText.find(\"URL\") >= 0:\n TextCluster.logger.info(\n \"Discard sentence, web address! '%s'\" % strText)\n return False\n\n # regular expression verification by German orthography: https://en.wikipedia.org/wiki/German_orthography\n # pattern = u\"^[a-zA-ZäöüÄÖÜ0-9.,?\\\"'\\-]+$\" # All allowed chars\n # pattern = u\"^[a-zA-ZäöüÄÖÜß]+[.|']?$\" # common char of\n # [a-zäöü] with an optional trailing dot or apostrophe '\n pattern = \"^[a-zA-ZäöüÄÖÜß.']+$\"\n # print( pattern )\n\n recmped = re.compile(pattern) # re compiled\n words = strText.split()\n for word in words:\n # German orthography check\n result = recmped.match(word)\n if result is None:\n TextCluster.logger.info(\"Discard sentence, disobey German orthography rule (%s)! '%s' in '%s'\"\n % (pattern, word, strText))\n return False\n\n # Check for too long word\n if len(word) > MAX_WORD_LENGTH:\n TextCluster.logger.info(\"Discard sentence, too long word '%s' of length '%d'! In '%s'\"\n % (word, len(word), strText))\n\n # For temporary debugging: Output all long words for analysis\n if False:\n cmd = 'echo \"' + \\\n word + '\" >> long_words.txt'\n print(cmd)\n os.system(cmd)\n\n return False\n\n return True", "def test_tokenize_by_sentence_empty_sentence(self):\n text = ''\n\n expected = ()\n actual = tokenize_by_sentence(text)\n self.assertEqual(expected, actual)", "def double_quotes(para):\n words = re.split(u'[\\\"\\u201c\\u201d]', para.text)\n # print words\n if len(words) == 1:\n return False\n # if words[0] is not regular phrase, we regard words[1] as the start of the paragragh\n if len(words[0].rstrip().lstrip()) < 7 and re.match(r'[a-zA-Z ]*[a-zA-Z]+[a-zA-Z ]*', words[0]) == None:\n return True\n else:\n return False", "def test_parse_word_stemming_empty(self):\n feed = \"\"\n expected = \"\"\n\n result = StemParser().stem(feed)\n self.assertEqual(expected, result)", "def is_maybe_off_by_one(text, anno):\n span = anno.text_span()\n start = span.char_start\n end = span.char_end\n start_ok = start == 0 or text[start - 1].isspace()\n end_ok = end == len(text) or text[end].isspace()\n return not (start_ok and end_ok)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing links out when LW and period are presented
def test_parse_links(self): links = parse_links(self.link_str, ['###LEFT-WALL###', 'dad', 'was', 'not', 'a', 'parent', 'before', '.'], 0) # [0 7 2 (Xp)][0 1 0 (Wd)][1 2 0 (Ss*s)][2 5 1 (Osm)][2 3 0 (EBm)][4 5 0 (Ds**c)][5 6 0 (Mp)][7 8 0 (RW)] self.assertTrue(self.cmp_lists(links, [ (0, 7), (0, 1), (1, 2), (2, 5), (2, 3), (4, 5), (5, 6) ]))
[ "def test_unquoted_link(self):\n txt=\"\"\"hello <a href=www.co.uk>slashdot.org/?a=c&f=m</a> \"\"\"\n\n uris=self.candidate.extractor.extracturis(txt)\n self.assertTrue('www.co.uk' in uris)\n self.assertTrue('slashdot.org/?a=c&f=m' in uris)", "def is_valid_link(link):\n index = link.find('/news/')\n if (index < 0):\n return False\n # Check if NEXT 4 characters are a year\n try:\n next_four = link[index + 6:index + 6 + 4]\n yr = int(next_four)\n if ((yr < 2022) and (yr > 1970)):\n return True\n except:\n return False\n return False", "def links_to_follow(href):\n return href and href.startswith(\"/wiki/\") and \":\" not in href", "def validate_link(link):\n\tpass", "def test_url_parsing_pass():\n\n assert True == url.validate(\"http://example.com\")\n assert True == url.validate(\"http://example.com/\")\n assert True == url.validate(\"http://www.example.com\")\n assert True == url.validate(\"http://www.example.com/\")", "def make_linkable_period(text):\n global period_ctr\n newelem = ET.Element(\"a\")\n newelem.text = \". \"\n uid = \"period_\"+str(period_ctr)\n newelem.attrib[\"id\"] = uid\n newelem.attrib[\"href\"] = \"#\"+uid\n period_ctr += 1\n newelem.tail = text\n return newelem", "def links_matches(link):\n link = link.replace('<','').replace('>','')\n dbpedia_reg = re.compile('http://([a-z]{2}.)?dbpedia.org/resource/.*')\n if dbpedia_reg.match(link):\n return True\n\n return False", "def check_url_format(self):\r\n #m = re.match(\"^http://www.amazon.com/dp/[a-zA-Z0-9]+$\", self.product_page_url)\r\n m = re.match(r\"^http://www.statelinetack.com/.*?$\", self.product_page_url)\r\n return not not m", "def test_reformat_weburl_2(self):\n url = ''\n self.assertEqual(self.cmd.reformat_weburl(url), 'Not available')", "def is_valid_link(self, link, ignored_strs, allowed_pages):\n\n valid_link = True\n i = 0\n\n # Look if link is in allowed pages\n for page in allowed_pages:\n i += 1\n if page in link:\n break\n if i == len(allowed_pages):\n valid_link = False\n\n if valid_link:\n # Search if the link has ignored text in it\n for text in ignored_strs:\n if text in link.lower() or link == './':\n valid_link = False\n break\n return valid_link", "def test_link(self):\n response = self.node.query(type=LINK)\n path = self.node.reply_to.split('/')[-1]\n mylink = [l for l in response.get_dicts()\n if l['owningAddr'] and l['owningAddr'].endswith(path)]\n self.assertTrue(mylink)", "def test_link_talk(self):\n expected = 'href=\"{}\"'.format(r('talk_list'))\n self.assertContains(self.resp, expected)", "def check_url_format(self):\n if re.match('^https?://www.walgreens.com/store/c/.+/ID=prod\\d+-product$', self.product_page_url):\n return True\n return False", "def test_lrg_references(parser, description):\n parser(description)", "def tags_links_testing(self):\n totalLinks = 0\n externalLinks = 0\n\n m = []\n\n meta = self.soup.find_all(\"meta\")\n links = self.soup.find_all(\"link\")\n scripts = self.soup.find_all(\"script\")\n\n for tag in meta:\n for link in re.findall(re.compile(\"\\\"http.*?\\\"\"), str(tag)):\n m.append(link)\n\n for tag in links:\n if tag.has_attr(\"href\") and \"http\" in tag.get(\"href\")[:4]:\n m.append(tag.get(\"href\"))\n\n for tag in scripts:\n if tag.has_attr(\"href\") and \"http\" in tag.get(\"href\")[:4]:\n m.append(tag.get(\"href\"))\n\n for link in m:\n if self.domain not in link:\n externalLinks += 1\n totalLinks += 1\n\n if totalLinks != 0:\n percentage = externalLinks / totalLinks\n if percentage >= 0.81:\n self.tagWeight = 1\n return\n elif percentage >= 0.05:\n self.tagWeight = 0.5\n return\n\n self.tagWeight = 0\n return", "def test_link_speakers(self):\n expected = 'href=\"{}#speakers\"'.format(r('home'))\n self.assertContains(self.resp, expected)", "def test15_link(self):\n r = Resource(uri='ln1')\n self.assertEqual(r.link('up'), None)\n self.assertEqual(r.link_href('up'), None)\n r.link_set('up', 'uri:up')\n self.assertEqual(r.link('up'), {'rel': 'up', 'href': 'uri:up'})\n self.assertEqual(r.link_href('up'), 'uri:up')\n r.link_set('down', 'uri:down')\n self.assertEqual(r.link('down'), {'rel': 'down', 'href': 'uri:down'})\n self.assertEqual(r.link_href('down'), 'uri:down')\n r.link_set('up', 'uri:up2')\n self.assertEqual(r.link('up'), {'rel': 'up', 'href': 'uri:up2'})\n r.link_add('up', 'uri:up3')\n self.assertEqual(r.link('up'), {'rel': 'up', 'href': 'uri:up2'}) # still get first\n self.assertEqual(r.ln, [{'rel': 'up', 'href': 'uri:up2'},\n {'href': 'uri:down', 'rel': 'down'},\n {'rel': 'up', 'href': 'uri:up3'}])", "def test_reformat_weburl_3(self):\n url = 'http://www.com.com'\n self.assertEqual(self.cmd.reformat_weburl(url), 'http://www.com.com')", "def test_reformat_weburl_1(self):\n url = 'www.google.com.com'\n self.assertEqual(self.cmd.reformat_weburl(url), 'http://www.google.com.com')", "def test_find_hyperlinks_iii(self):\n\n text = \"[[Dummy-APO-Database/GPD/Material_Strength| MS-GPD]]\"\n\n regex_id, hypermatches = find_hyperlinks(text)\n\n exp_hypermatches = [('',\n 'Dummy-APO-Database/',\n 'GPD/Material_Strength',\n '| MS-GPD]]',\n ' MS-GPD'),\n '']\n\n exp_regex_id = 4\n\n self.assertEqual(\n hypermatches,\n exp_hypermatches,\n \"list of hypermatches do not match!\")\n\n self.assertEqual(\n regex_id,\n exp_regex_id,\n \"Expected regex id does not match!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing postscript with both walls in
def test_parse_postscript_all_walls(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_all_walls, options) pm = parse_metrics(tokens) self.assertEqual(1.0, pm.completely_parsed_ratio) self.assertEqual(0.0, pm.completely_unparsed_ratio) self.assertEqual(1.0, pm.average_parsed_ratio)
[ "def is_body(part):\n if get_content_type(part) == 'text/plain':\n if not is_attachment(part):\n return True\n return False", "def isMeaningfulPost(post):\n # nothing for reshares.\n if post.get('verb', '') == 'share':\n return False\n\n return isMeaningfulContent(re.sub('<.*?>', '', post['object']['content']))", "def parse_postscript(text: str, options: int) -> ([], []):\n p = re.compile('\\[(\\(.+?\\)+?)\\]\\[(.*?)\\]\\[0\\]', re.S)\n\n m = p.match(text.replace(\"\\n\", \"\"))\n\n if m is not None:\n tokens, offset = parse_tokens(m.group(1), options)\n links = parse_links(m.group(2), tokens, offset)\n\n return tokens, links\n\n raise LGParseError(f\"parse_postscript(): regex does not match for:\\n{text}\")", "def test_metadata_filename_matches_postscriptname(self):\n import re\n regex = re.compile(r'\\W')\n\n for x in self.metadata.fonts:\n post_script_name = regex.sub('', x.post_script_name)\n filename = regex.sub('', os.path.splitext(x.filename)[0])\n if filename != post_script_name:\n msg = '\"{0}\" does not match \"{1}\"'\n self.fail(msg.format(x.filename, x.post_script_name))", "def is_image_post(submission):\n return (not submission.is_self) and submission.url.endswith((\".png\", \".jpg\", \".jpeg\", \".gif\"))", "def preprocess_facebook(post):\n app = post.get('application', {}).get('name')\n if ((post.get('type') not in POST_TYPES and\n post.get('status_type') not in STATUS_TYPES) or\n (app and app in APPLICATION_BLACKLIST) or\n # posts with 'story' aren't explicit posts. they're friend approvals or\n # likes or photo tags or comments on other people's posts.\n 'story' in obj):\n logging.info('Skipping %s', post.get('id'))\n return None\n\n # for photos, get a larger version\n image = post.get('image', '')\n if (ptype == 'photo' or stype == 'added_photos') and image.endswith('_s.jpg'):\n post['image'] = image[:-6] + '_o.jpg'\n\n return post", "def parse_post(bs, keywords) :\n # Post Title\n try: \n post_titles = bs.find(\"div\", {\"class\":\"mtm\"}).find(\"a\", {\"class\":\"_8_4r\"})\n if post_titles is None:\n print(\"\\t No title found!\")\n else :\n # TODO: manually traversing to find child, might not work if site changes\n for i in range(4) :\n post_titles = next(post_titles.children)\n post_titles = post_titles.next_sibling\n post_title = post_titles.find(\"span\", {\"class\":False})\n if post_title is None :\n print(\"\\t No title found!\")\n else :\n if keyword_match(\" \".join(list(post_titles.stripped_strings)), keywords) :\n return True\n except AttributeError :\n print(\"\\t AttributeError when trying to find titles: likely titles don't exist\")\n\n\n # Post Content\n post_data = bs.find(\"div\", {\"class\":re.compile(\"userContent\")})\n if post_data is None:\n print(\"No user post content...try checking manually\")\n return False\n\n paragraphs = []\n for p in post_data.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords) :\n return True\n\n # Hidden Texts\n # This should already be handled by the above case, but check just in case\n paragraphs = []\n for hidden in post_data.find_all(\"div\", {\"class\":\"text_exposed_show\"}) :\n for p in hidden.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords):\n return True\n \n return False", "def inScript(text, index, body):\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" in path\n except IndexError:\n return False", "def preprocess_twitter(post):\n # TODO\n return post", "def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False", "def paragraph_is_text_like(p):\n return not isinstance(p, pyth.document.Image)", "def test_parse_raw_siteslinkingin(parse_raw_siteslinkingin):\n msg = 'Re-load of raw SitesLinkingIn Alexa JSON into Kafka error'\n assert parse_raw_siteslinkingin == (68, 68), msg", "def parse_embed_script(self,tokiter,scopes,ends,parse_between=None):\n token=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed',token)\n if token.token_value != 'bash':\n self.error('embed',token,'unknown language \"%s\"'%(\n token.token_value,))\n nametoken=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed script name',token)\n scope=EmbedBash(scopes)\n token=tokiter.next()\n\n while token.token_type==end_of_line_type: token=tokiter.next()\n if token.token_type=='(':\n self.parse_subscope(tokiter,[scope]+scopes,[')'],\n self.parse_between_arguments,\n allow_overwrite=False,\n allow_resolve=False,\n allow_null=True,\n only_scalars=True,\n scope_name='embed script parameters')\n scope=scope.as_parameters(self.con(token,scopes))\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type=='{':\n self.parse_subscope(tokiter,[scope]+scopes,['}'],\n self.parse_between_assignments,\n allow_overwrite=True,\n allow_resolve=True,\n allow_null=False,\n allow_use=True,\n only_scalars=True,\n scope_name='embed script variables')\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type in [ 'qstring', 'dqstring', 'bracestring' ]:\n scope.settemplate(self.action_string([scope]+scopes,token))\n else:\n self.error('embed script contents',token)\n if parse_between: \n parse_between(tokiter)\n return (nametoken.token_value,scope)", "def test_raw_link(self):\n self.assertTrue(valet.view(self.test_file).find(self.test_file + \"?raw\") >= 0)", "def imagecheck(tweet):\n\tpass", "def is_inline_attachment(part):\n return get_disposition(part) == 'inline'", "def check_script(script):\n\n if not isinstance(script, str):\n raise Exception(\"Wrong script format.\")\n elif len(script)/2 != 20:\n raise Exception(\"Wrong signature length \" + str(len(script)/2))\n else:\n return True", "def identify_rtf_article(line):\n if r'\\par' in line[0:4]:\n return True", "def should_process_post(post: PostSummary, cfg: Config) -> bool:\n url = str(post[\"url\"])\n return all(\n [\n has_enough_upvotes(post, cfg),\n not post[\"archived\"],\n post[\"author\"],\n is_transcribable_youtube_video(url) if is_youtube_url(url) else True,\n ]\n )", "def hasRawText(self, text):\r\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\\d)[^>]*?>.*</\\1>',\r\n re.S).sub('', text.strip()).strip()\r\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\r\n return '' != r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing_postscript with no walls in
def test_parse_postscript_no_walls(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_no_walls, options) pm = parse_metrics(tokens) self.assertEqual(1.0, pm.completely_parsed_ratio) self.assertEqual(0.0, pm.completely_unparsed_ratio) self.assertEqual(1.0, pm.average_parsed_ratio)
[ "def isMeaningfulPost(post):\n # nothing for reshares.\n if post.get('verb', '') == 'share':\n return False\n\n return isMeaningfulContent(re.sub('<.*?>', '', post['object']['content']))", "def is_image_post(submission):\n return (not submission.is_self) and submission.url.endswith((\".png\", \".jpg\", \".jpeg\", \".gif\"))", "def should_process_post(post: PostSummary, cfg: Config) -> bool:\n url = str(post[\"url\"])\n return all(\n [\n has_enough_upvotes(post, cfg),\n not post[\"archived\"],\n post[\"author\"],\n is_transcribable_youtube_video(url) if is_youtube_url(url) else True,\n ]\n )", "def preprocess_facebook(post):\n app = post.get('application', {}).get('name')\n if ((post.get('type') not in POST_TYPES and\n post.get('status_type') not in STATUS_TYPES) or\n (app and app in APPLICATION_BLACKLIST) or\n # posts with 'story' aren't explicit posts. they're friend approvals or\n # likes or photo tags or comments on other people's posts.\n 'story' in obj):\n logging.info('Skipping %s', post.get('id'))\n return None\n\n # for photos, get a larger version\n image = post.get('image', '')\n if (ptype == 'photo' or stype == 'added_photos') and image.endswith('_s.jpg'):\n post['image'] = image[:-6] + '_o.jpg'\n\n return post", "def hasRawText(self, text):\r\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\\d)[^>]*?>.*</\\1>',\r\n re.S).sub('', text.strip()).strip()\r\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\r\n return '' != r", "def is_body(part):\n if get_content_type(part) == 'text/plain':\n if not is_attachment(part):\n return True\n return False", "def isPhantom(tagfile):\n return tagfile('phantom_tag', default=False)", "def parse_postscript(text: str, options: int) -> ([], []):\n p = re.compile('\\[(\\(.+?\\)+?)\\]\\[(.*?)\\]\\[0\\]', re.S)\n\n m = p.match(text.replace(\"\\n\", \"\"))\n\n if m is not None:\n tokens, offset = parse_tokens(m.group(1), options)\n links = parse_links(m.group(2), tokens, offset)\n\n return tokens, links\n\n raise LGParseError(f\"parse_postscript(): regex does not match for:\\n{text}\")", "def parse_post(bs, keywords) :\n # Post Title\n try: \n post_titles = bs.find(\"div\", {\"class\":\"mtm\"}).find(\"a\", {\"class\":\"_8_4r\"})\n if post_titles is None:\n print(\"\\t No title found!\")\n else :\n # TODO: manually traversing to find child, might not work if site changes\n for i in range(4) :\n post_titles = next(post_titles.children)\n post_titles = post_titles.next_sibling\n post_title = post_titles.find(\"span\", {\"class\":False})\n if post_title is None :\n print(\"\\t No title found!\")\n else :\n if keyword_match(\" \".join(list(post_titles.stripped_strings)), keywords) :\n return True\n except AttributeError :\n print(\"\\t AttributeError when trying to find titles: likely titles don't exist\")\n\n\n # Post Content\n post_data = bs.find(\"div\", {\"class\":re.compile(\"userContent\")})\n if post_data is None:\n print(\"No user post content...try checking manually\")\n return False\n\n paragraphs = []\n for p in post_data.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords) :\n return True\n\n # Hidden Texts\n # This should already be handled by the above case, but check just in case\n paragraphs = []\n for hidden in post_data.find_all(\"div\", {\"class\":\"text_exposed_show\"}) :\n for p in hidden.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords):\n return True\n \n return False", "def has_post_data(self):\n raise Exception('has_post_data() not implemented...')", "def validated(self, post_data):\n if 'subreddit' not in post_data or not isinstance(post_data['subreddit'], str):\n return False\n if 'start' not in post_data or not isinstance(post_data['start'], int):\n return False\n if 'end' not in post_data or not isinstance(post_data['end'], int):\n return False\n\n return True", "def page_empty_check(page) -> bool:\n txt = page.text\n # Check if the page is in content namespace\n if page.namespace().content:\n # Check if the page contains at least 50 characters\n return len(txt) < 50\n\n if not page.is_categorypage():\n txt = textlib.removeLanguageLinks(txt, site=page.site)\n txt = textlib.removeCategoryLinks(txt, site=page.site)\n return len(txt) < 4\n\n return False", "def preprocess_twitter(post):\n # TODO\n return post", "def gotNoBody(self, article):\n\tself.noXmlBody.append(article.pmid)", "def check_raw_postings(raw_postings):\n source_codes = get_source_codes()\n category_codes = get_category_codes()\n category_groups = get_category_groups()\n country_codes = None # Loaded into memory as required.\n state_codes = None # ditto.\n metro_codes = None # ditto.\n region_codes = None # ditto.\n county_codes = None # ditto.\n city_codes = None # ditto.\n locality_codes = None # ditto.\n zip_codes = None # ditto.\n\n results = []\n for raw_posting in raw_postings:\n try:\n posting = {}\n annotations = []\n images = []\n\n if not isinstance(raw_posting, dict):\n raise ParsingException(\"Posting must be an object or \" +\n \"dictionary\")\n\n remaining_fields = set(raw_posting.keys())\n\n parse_field(raw_posting, \"account_id\", posting, \"account_id\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"source\", posting, \"source\",\n remaining_fields, required=True,\n foreign_key=source_codes)\n\n parse_field(raw_posting, \"category\", posting, \"category\",\n remaining_fields, foreign_key=category_codes)\n\n if \"category\" in raw_posting:\n posting['category_group_id'] = \\\n category_groups[raw_posting['category'].upper()]\n\n if \"location\" in raw_posting:\n raw_loc = raw_posting['location']\n remaining_fields.remove(\"location\")\n\n remaining_loc_fields = set(raw_loc.keys())\n\n parse_field(raw_loc, \"lat\", posting, \"location_latitude\",\n remaining_loc_fields, coerce_to_type=\"decimal\",\n min_value=-90, max_value=+90)\n\n parse_field(raw_loc, \"long\", posting, \"location_longitude\",\n remaining_loc_fields, coerce_to_type=\"decimal\",\n min_value=-180, max_value=+180)\n\n parse_field(raw_loc, \"accuracy\", posting, \"location_accuracy\",\n remaining_loc_fields, coerce_to_type=\"integer\")\n\n if \"bounds\" in raw_loc:\n # Manually copy across the bounds array.\n posting['location_bounds'] = raw_loc['bounds']\n remaining_loc_fields.remove(\"bounds\")\n\n if \"country\" in raw_loc:\n if country_codes == None:\n country_codes = get_country_codes()\n\n parse_field(raw_loc, \"country\", posting, \"location_country\",\n remaining_loc_fields, foreign_key=country_codes)\n\n if \"state\" in raw_loc:\n if state_codes == None:\n state_codes = get_state_codes()\n\n parse_field(raw_loc, \"state\", posting, \"location_state\",\n remaining_loc_fields, foreign_key=state_codes)\n\n if \"metro\" in raw_loc:\n if metro_codes == None:\n metro_codes = get_metro_codes()\n\n parse_field(raw_loc, \"metro\", posting, \"location_metro\",\n remaining_loc_fields, foreign_key=metro_codes)\n\n if \"region\" in raw_loc:\n if region_codes == None:\n region_codes = get_region_codes()\n\n parse_field(raw_loc, \"region\", posting, \"location_region\",\n remaining_loc_fields, foreign_key=region_codes)\n\n if \"county\" in raw_loc:\n if county_codes == None:\n county_codes = get_county_codes()\n\n parse_field(raw_loc, \"county\", posting, \"location_county\",\n remaining_loc_fields, foreign_key=county_codes)\n\n if \"city\" in raw_loc:\n if city_codes == None:\n city_codes = get_city_codes()\n\n parse_field(raw_loc, \"city\", posting, \"location_city\",\n remaining_loc_fields, foreign_key=city_codes)\n\n if \"locality\" in raw_loc:\n if locality_codes == None:\n locality_codes = get_locality_codes()\n\n parse_field(raw_loc, \"locality\", posting,\n \"location_locality\", remaining_loc_fields,\n foreign_key=locality_codes)\n\n if \"zipcode\" in raw_loc:\n if zip_codes == None:\n zip_codes = get_zip_codes()\n\n parse_field(raw_loc, \"zipcode\", posting,\n \"location_zipcode\", remaining_loc_fields,\n foreign_key=zip_codes)\n\n if remaining_loc_fields:\n raise ParsingException(\"Unexpected location field(s): \" +\n \", \".join(remaining_loc_fields))\n\n parse_field(raw_posting, \"external_id\", posting, \"external_id\",\n remaining_fields, required=True,\n coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"external_url\", posting, \"external_url\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"heading\", posting, \"heading\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"body\", posting, \"body\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"html\", posting, \"html\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"timestamp\", posting, \"timestamp\",\n remaining_fields, coerce_to_type=\"datetime\")\n\n if \"expires\" in raw_posting:\n parse_field(raw_posting, \"expires\", posting, \"expires\",\n remaining_fields, coerce_to_type=\"datetime\")\n else:\n posting['expires'] = dateHelpers.datetime_in_utc() \\\n + datetime.timedelta(days=7)\n\n parse_field(raw_posting, \"language\", posting, \"language\",\n remaining_fields, coerce_to_type=\"string\")\n\n parse_field(raw_posting, \"price\", posting, \"price\",\n remaining_fields, coerce_to_type=\"float\")\n\n parse_field(raw_posting, \"currency\", posting, \"currency\",\n remaining_fields, coerce_to_type=\"string\")\n\n if \"images\" in raw_posting:\n raw_images = raw_posting['images']\n remaining_fields.remove(\"images\")\n\n if not isinstance(raw_images, (list, tuple)):\n raise ParsingException(\"images must be an array\")\n\n for raw_image in raw_images:\n remaining_image_fields = set(raw_image.keys())\n\n image = {}\n\n parse_field(raw_image, \"full\", image, \"full_url\",\n remaining_image_fields,\n coerce_to_type=\"string\")\n\n parse_field(raw_image, \"full_width\", image, \"full_width\",\n remaining_image_fields,\n coerce_to_type=\"integer\")\n\n parse_field(raw_image, \"full_height\", image, \"full_height\",\n remaining_image_fields,\n coerce_to_type=\"integer\")\n\n parse_field(raw_image, \"thumbnail\", image, \"thumbnail_url\",\n remaining_image_fields,\n coerce_to_type=\"string\")\n\n parse_field(raw_image, \"thumbnail_width\",\n image, \"thumbnail_width\",\n remaining_image_fields,\n coerce_to_type=\"integer\")\n\n parse_field(raw_image, \"thumbnail_height\",\n image, \"thumbnail_height\",\n remaining_image_fields,\n coerce_to_type=\"integer\")\n\n if remaining_image_fields:\n raise ParsingException(\"Unexpected image field(s): \" +\n \", \".join(remaining_image_fields))\n\n images.append(image)\n\n if len(images) > 0:\n posting['has_image'] = True\n else:\n posting['has_image'] = False\n\n if \"annotations\" in raw_posting:\n raw_annotations = raw_posting['annotations']\n remaining_fields.remove(\"annotations\")\n\n for key,value in raw_annotations.items():\n if value == None: continue\n\n if not isinstance(key, basestring):\n raise ParsingException(\"Annotation keys must be \" +\n \"strings\")\n\n if not isinstance(value, basestring):\n raise ParsingException(\"Annotation values must be \" +\n \"strings\")\n\n annotations.append(key + \":\" + value)\n\n if \"status\" in raw_posting:\n raw_status = raw_posting['status']\n remaining_fields.remove(\"status\")\n\n remaining_status_fields = set(raw_status.keys())\n\n parse_field(raw_status, \"offered\", posting, \"status_offered\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n parse_field(raw_status, \"wanted\", posting, \"status_wanted\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n parse_field(raw_status, \"lost\", posting, \"status_lost\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n parse_field(raw_status, \"stolen\", posting, \"status_stolen\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n parse_field(raw_status, \"found\", posting, \"status_found\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n parse_field(raw_status, \"deleted\", posting, \"status_deleted\",\n remaining_status_fields, coerce_to_type=\"boolean\")\n\n if remaining_status_fields:\n raise ParsingException(\"Unexpected status field(s): \" +\n \", \".join(remaining_status_fields))\n\n parse_field(raw_posting, \"immortal\", posting, \"immortal\",\n remaining_fields, coerce_to_type=\"boolean\")\n\n if remaining_fields:\n raise ParsingException(\"Unexpected field(s): \" +\n \", \".join(remaining_fields))\n except ParsingException,e:\n results.append((False, e.err_msg))\n continue\n\n parsed_posting = {'posting' : posting,\n 'annotations' : annotations,\n 'images' : images}\n\n results.append((True, parsed_posting))\n\n return results", "def paragraph_is_text_like(p):\n return not isinstance(p, pyth.document.Image)", "def contains_ps():\n output = check50.run(\"grep -c -w 'ps' typescript\").stdout()\n if output == \"0\\n\":\n help = \"Make sure that you try all commands in the lab. To start the script command so that it appends to you typescript file, use 'script -a typescript'\"\n raise check50.Failure(help)", "def nxmlHasBody(inData):\n #xml = codecs.open(nxmlName, encoding=\"utf8\").read()\n try:\n root = etreeFromXml(inData)\n body = findChild(root, \"body\")\n scans = findChildren(body,\"supplementary-material\", reqAttrName=\"content-type\", reqAttrValue='scanned-pages')\n if body!=None and len(scans)==0:\n logging.debug(\"Found body tag, no scanned pages within it, seems to contain normal fulltext\")\n return True\n else:\n logging.debug(\"No body tag or only scanned pages: No fulltext\")\n return False\n except IOError:\n logging.error(\"IOError while searching for body tag in xml file\")\n return False", "def ignore_bullets(p):\n\n if p.text.strip()[0:2] in ['o ', 'O ']:\n return False", "def test_no_sections_no_footer(self):\n text = 'text'\n result = extract_sections(text, self.site)\n self._extract_sections_tests(result, text, [], '')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing postscript with no links
def test_parse_postscript_no_links(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_no_links, options) self.assertEqual(0, len(links))
[ "def parse_postscript(text: str, options: int) -> ([], []):\n p = re.compile('\\[(\\(.+?\\)+?)\\]\\[(.*?)\\]\\[0\\]', re.S)\n\n m = p.match(text.replace(\"\\n\", \"\"))\n\n if m is not None:\n tokens, offset = parse_tokens(m.group(1), options)\n links = parse_links(m.group(2), tokens, offset)\n\n return tokens, links\n\n raise LGParseError(f\"parse_postscript(): regex does not match for:\\n{text}\")", "def isPhantom(tagfile):\n return tagfile('phantom_tag', default=False)", "def paragraph_is_text_like(p):\n return not isinstance(p, pyth.document.Image)", "def test_unquoted_link(self):\n txt=\"\"\"hello <a href=www.co.uk>slashdot.org/?a=c&f=m</a> \"\"\"\n\n uris=self.candidate.extractor.extracturis(txt)\n self.assertTrue('www.co.uk' in uris)\n self.assertTrue('slashdot.org/?a=c&f=m' in uris)", "def is_image(hyperlink):\n if '.svg' in hyperlink:\n return True", "def is_image_post(submission):\n return (not submission.is_self) and submission.url.endswith((\".png\", \".jpg\", \".jpeg\", \".gif\"))", "def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False", "def test_link_valign_invalid(self):\n img = pq_img(\n self.p,\n '[[Image:file.png|link=http://example.com|valign=off]]', 'img')\n eq_(None, img.attr('style'))", "def test_raw_link(self):\n self.assertTrue(valet.view(self.test_file).find(self.test_file + \"?raw\") >= 0)", "def test_get_text_without_links(self):\n \"\"\" Jest sobie text html. Z samej rzeczy zawiera znaczniki htmlowe. \"\"\"\n html = '''\\n <p>The halting problem is basically a\\n formal way of asking if you can tell\\n whether or not an arbitrary program\\n will eventually halt.</p>\\n \\n <p>In other words, can you write a\\n program called a halting oracle,\\n HaltingOracle(program, input), which\\n returns true if program(input) would\\n eventually halt, and which returns\\n false if it wouldn't?</p>\\n \\n <p>The answer is: no, you can't.</p>\\n'''\n \"\"\" Za pomoca biblioteki pyquery parsujemy tego htmla \"\"\"\n paragraph = pq(html)\n expected_output = '''The halting problem is basically a\\n formal way of asking if you can tell\\n whether or not an arbitrary program\\n will eventually halt.\\n\\n \\n \\nIn other words, can you write a\\n program called a halting oracle,\\n HaltingOracle(program, input), which\\n returns true if program(input) would\\n eventually halt, and which returns\\n false if it wouldn't?\\n\\n \\n \\nThe answer is: no, you can't.\\n\\n'''\n actual_output = howdoi.get_text(paragraph)\n self.assertEqual(actual_output, expected_output)", "def test_get_page_removes_image_link_from_html(dummy_source):\n result = scr.Scraper._get_page('http://www.test.com/001/page/1',\n dummy_source)\n next_page, html = result[2:]\n assert html.find('a', href=lambda h: h in next_page) is None", "def hasRawText(self, text):\r\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\\d)[^>]*?>.*</\\1>',\r\n re.S).sub('', text.strip()).strip()\r\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\r\n return '' != r", "def isMeaningfulPost(post):\n # nothing for reshares.\n if post.get('verb', '') == 'share':\n return False\n\n return isMeaningfulContent(re.sub('<.*?>', '', post['object']['content']))", "def test_metadata_filename_matches_postscriptname(self):\n import re\n regex = re.compile(r'\\W')\n\n for x in self.metadata.fonts:\n post_script_name = regex.sub('', x.post_script_name)\n filename = regex.sub('', os.path.splitext(x.filename)[0])\n if filename != post_script_name:\n msg = '\"{0}\" does not match \"{1}\"'\n self.fail(msg.format(x.filename, x.post_script_name))", "def validate_link(link):\n\tpass", "def test_good_page_url():\n page_html = site_parser._get_page_html(\n \"https://www.smashingmagazine.com/category/wallpapers/\",\n )\n assert type(page_html) == BeautifulSoup", "def parse_post(bs, keywords) :\n # Post Title\n try: \n post_titles = bs.find(\"div\", {\"class\":\"mtm\"}).find(\"a\", {\"class\":\"_8_4r\"})\n if post_titles is None:\n print(\"\\t No title found!\")\n else :\n # TODO: manually traversing to find child, might not work if site changes\n for i in range(4) :\n post_titles = next(post_titles.children)\n post_titles = post_titles.next_sibling\n post_title = post_titles.find(\"span\", {\"class\":False})\n if post_title is None :\n print(\"\\t No title found!\")\n else :\n if keyword_match(\" \".join(list(post_titles.stripped_strings)), keywords) :\n return True\n except AttributeError :\n print(\"\\t AttributeError when trying to find titles: likely titles don't exist\")\n\n\n # Post Content\n post_data = bs.find(\"div\", {\"class\":re.compile(\"userContent\")})\n if post_data is None:\n print(\"No user post content...try checking manually\")\n return False\n\n paragraphs = []\n for p in post_data.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords) :\n return True\n\n # Hidden Texts\n # This should already be handled by the above case, but check just in case\n paragraphs = []\n for hidden in post_data.find_all(\"div\", {\"class\":\"text_exposed_show\"}) :\n for p in hidden.find_all(\"p\") :\n paragraphs.extend(list(p.stripped_strings))\n if keyword_match(\" \".join(paragraphs), keywords):\n return True\n \n return False", "def page_empty_check(page) -> bool:\n txt = page.text\n # Check if the page is in content namespace\n if page.namespace().content:\n # Check if the page contains at least 50 characters\n return len(txt) < 50\n\n if not page.is_categorypage():\n txt = textlib.removeLanguageLinks(txt, site=page.site)\n txt = textlib.removeCategoryLinks(txt, site=page.site)\n return len(txt) < 4\n\n return False", "def has_link():\n\treturn False if color_dict.get('link') is None else True", "def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply a soft harmonic restraint to the given atoms. This modifies the ``ThermodynamicState`` object.
def restrain_atoms(thermodynamic_state, sampler_state, restrained_atoms, sigma=3.0*unit.angstroms): K = thermodynamic_state.kT / sigma**2 # Spring constant. system = thermodynamic_state.system # This is a copy. # Check that there are atoms to restrain. if len(restrained_atoms) == 0: raise ValueError('No atoms to restrain.') # We need to translate the restrained molecule to the origin # to avoid MonteCarloBarostat rejections (see openmm#1854). if thermodynamic_state.pressure is not None: # First, determine all the molecule atoms. Reference platform is the cheapest to allocate? reference_platform = openmm.Platform.getPlatformByName('Reference') integrator = openmm.VerletIntegrator(1.0*unit.femtosecond) context = openmm.Context(system, integrator, reference_platform) molecules_atoms = context.getMolecules() del context, integrator # Make sure the atoms to restrain belong only to a single molecule. molecules_atoms = [set(molecule_atoms) for molecule_atoms in molecules_atoms] restrained_atoms_set = set(restrained_atoms) restrained_molecule_atoms = None for molecule_atoms in molecules_atoms: if restrained_atoms_set.issubset(molecule_atoms): # Convert set to list to use it as numpy array indices. restrained_molecule_atoms = list(molecule_atoms) break if restrained_molecule_atoms is None: raise ValueError('Cannot match the restrained atoms to any molecule. Restraining ' 'two molecules is not supported when using a MonteCarloBarostat.') # Translate system so that the center of geometry is in # the origin to reduce the barostat rejections. distance_unit = sampler_state.positions.unit centroid = np.mean(sampler_state.positions[restrained_molecule_atoms,:] / distance_unit, axis=0) sampler_state.positions -= centroid * distance_unit # Create a CustomExternalForce to restrain all atoms. if thermodynamic_state.is_periodic: energy_expression = '(K/2)*periodicdistance(x, y, z, x0, y0, z0)^2' # periodic distance else: energy_expression = '(K/2)*((x-x0)^2 + (y-y0)^2 + (z-z0)^2)' # non-periodic distance restraint_force = openmm.CustomExternalForce(energy_expression) # Adding the spring constant as a global parameter allows us to turn it off if desired restraint_force.addGlobalParameter('K', K) restraint_force.addPerParticleParameter('x0') restraint_force.addPerParticleParameter('y0') restraint_force.addPerParticleParameter('z0') for index in restrained_atoms: parameters = sampler_state.positions[index,:].value_in_unit_system(unit.md_unit_system) restraint_force.addParticle(index, parameters) # Update thermodynamic state. system.addForce(restraint_force) thermodynamic_state.system = system
[ "def harmonic_bond(conf, params, box, bond_idxs, param_idxs):\n ci = conf[bond_idxs[:, 0]]\n cj = conf[bond_idxs[:, 1]]\n dij = distance(ci, cj, box)\n kbs = params[param_idxs[:, 0]]\n r0s = params[param_idxs[:, 1]]\n energy = np.sum(kbs/2 * np.power(dij - r0s, 2.0))\n return energy", "def _friction(self):\n\n # calculate water entrainment coefficients\n if self.water_entrainment is True:\n self.ew_link[self.wet_horizontal_links] = get_ew(\n self.U[self.wet_horizontal_links],\n self.Ch_link[self.wet_horizontal_links],\n self.R,\n self.g,\n )\n self.ew_link[self.wet_vertical_links] = get_ew(\n self.U[self.wet_vertical_links],\n self.Ch_link[self.wet_vertical_links],\n self.R,\n self.g,\n )\n self.ew_node[self.wet_nodes] = get_ew(\n self.U_node[self.wet_nodes], self.Ch[self.wet_nodes], self.R, self.g\n )\n else:\n self.ew_link[self.wet_horizontal_links] = 0\n self.ew_link[self.wet_vertical_links] = 0\n self.ew_node[self.wet_nodes] = 0\n\n # calculate friction terms using semi-implicit scheme\n self.u_temp[self.wet_horizontal_links] /= (\n 1\n + (\n self.Cf_link[self.wet_horizontal_links]\n + self.ew_link[self.wet_horizontal_links]\n )\n * self.U[self.wet_horizontal_links]\n * self.dt_local\n / self.h_link[self.wet_horizontal_links]\n )\n self.v_temp[self.wet_vertical_links] /= (\n 1\n + (\n self.Cf_link[self.wet_vertical_links]\n + self.ew_link[self.wet_vertical_links]\n )\n * self.U[self.wet_vertical_links]\n * self.dt_local\n / self.h_link[self.wet_vertical_links]\n )\n self.update_boundary_conditions(\n u=self.u_temp,\n v=self.v_temp,\n u_node=self.u_node_temp,\n v_node=self.v_node_temp,\n )", "def thermodynamics(pyom):\n advect_temperature(pyom)\n advect_salinity(pyom)\n\n if pyom.enable_conserve_energy:\n \"\"\"\n advection of dynamic enthalpy\n \"\"\"\n if pyom.enable_superbee_advection:\n advection.adv_flux_superbee(pyom,pyom.flux_east,pyom.flux_north,pyom.flux_top,pyom.Hd[:,:,:,pyom.tau])\n else:\n advection.adv_flux_2nd(pyom,pyom.flux_east,pyom.flux_north,pyom.flux_top,pyom.Hd[:,:,:,pyom.tau])\n\n pyom.dHd[2:-2, 2:-2, :, pyom.tau] = pyom.maskT[2:-2, 2:-2, :] * (-(pyom.flux_east[2:-2, 2:-2, :] - pyom.flux_east[1:-3, 2:-2, :]) \\\n / (pyom.cost[np.newaxis, 2:-2, np.newaxis] * pyom.dxt[2:-2, np.newaxis, np.newaxis]) \\\n - (pyom.flux_north[2:-2, 2:-2,:] - pyom.flux_north[2:-2, 1:-3, :]) \\\n / (pyom.cost[np.newaxis, 2:-2, np.newaxis] * pyom.dyt[np.newaxis, 2:-2, np.newaxis]))\n pyom.dHd[:,:,0,pyom.tau] += -pyom.maskT[:,:,0] * pyom.flux_top[:,:,0] / pyom.dzt[0]\n pyom.dHd[:,:,1:,pyom.tau] += -pyom.maskT[:,:,1:] * (pyom.flux_top[:,:,1:] - pyom.flux_top[:,:,:-1]) / pyom.dzt[np.newaxis, np.newaxis, 1:]\n\n \"\"\"\n changes in dyn. Enthalpy due to advection\n \"\"\"\n aloc = np.zeros((pyom.nx+4, pyom.ny+4, pyom.nz))\n aloc[2:-2, 2:-2, :] = pyom.grav / pyom.rho_0 * (-pyom.int_drhodT[2:-2, 2:-2, :, pyom.tau] * pyom.dtemp[2:-2, 2:-2, :, pyom.tau] \\\n - pyom.int_drhodS[2:-2, 2:-2, :, pyom.tau] * pyom.dsalt[2:-2, 2:-2, :, pyom.tau]) \\\n - pyom.dHd[2:-2, 2:-2, :, pyom.tau]\n\n \"\"\"\n contribution by vertical advection is - g rho w / rho0, substract this also\n \"\"\"\n aloc[:, :, :-1] += -0.25 * pyom.grav / pyom.rho_0 * pyom.w[:, :, :-1, pyom.tau] \\\n * (pyom.rho[:, :, :-1, pyom.tau] + pyom.rho[:, :, 1:, pyom.tau]) \\\n * pyom.dzw[np.newaxis, np.newaxis, :-1] / pyom.dzt[np.newaxis, np.newaxis, :-1]\n aloc[:, :, 1:] += -0.25 * pyom.grav / pyom.rho_0 * pyom.w[:, :, :-1, pyom.tau] \\\n * (pyom.rho[:, :, 1:, pyom.tau] + pyom.rho[:, :, :-1, pyom.tau]) \\\n * pyom.dzw[np.newaxis, np.newaxis, :-1] / pyom.dzt[np.newaxis, np.newaxis, 1:]\n\n if pyom.enable_conserve_energy and pyom.enable_tke:\n \"\"\"\n dissipation by advection interpolated on W-grid\n \"\"\"\n pyom.P_diss_adv[...] = 0.\n diffusion.dissipation_on_wgrid(pyom, pyom.P_diss_adv, aloc=aloc)\n\n \"\"\"\n distribute pyom.P_diss_adv over domain, prevent draining of TKE\n \"\"\"\n fxa = np.sum(pyom.area_t[2:-2, 2:-2, np.newaxis] * pyom.P_diss_adv[2:-2, 2:-2, :-1] \\\n * pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.maskW[2:-2, 2:-2, :-1]) \\\n + np.sum(0.5 * pyom.area_t[2:-2, 2:-2] * pyom.P_diss_adv[2:-2, 2:-2, -1] \\\n * pyom.dzw[-1] * pyom.maskW[2:-2, 2:-2, -1])\n tke_mask = pyom.tke[2:-2, 2:-2, :-1, pyom.tau] > 0.\n fxb = np.sum(pyom.area_t[2:-2, 2:-2, np.newaxis] * pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.maskW[2:-2, 2:-2, :-1] * tke_mask) \\\n + np.sum(0.5 * pyom.area_t[2:-2, 2:-2] * pyom.dzw[-1] * pyom.maskW[2:-2, 2:-2, -1])\n pyom.P_diss_adv[...] = 0.\n pyom.P_diss_adv[2:-2, 2:-2, :-1] = fxa / fxb * tke_mask\n pyom.P_diss_adv[2:-2, 2:-2, -1] = fxa / fxb\n\n \"\"\"\n Adam Bashforth time stepping for advection\n \"\"\"\n pyom.temp[:,:,:,pyom.taup1] = pyom.temp[:,:,:,pyom.tau] + pyom.dt_tracer * \\\n ((1.5+pyom.AB_eps)*pyom.dtemp[:,:,:,pyom.tau] - (0.5+pyom.AB_eps)*pyom.dtemp[:,:,:,pyom.taum1]) * pyom.maskT\n pyom.salt[:,:,:,pyom.taup1] = pyom.salt[:,:,:,pyom.tau] + pyom.dt_tracer * \\\n ((1.5+pyom.AB_eps)*pyom.dsalt[:,:,:,pyom.tau] - (0.5+pyom.AB_eps)*pyom.dsalt[:,:,:,pyom.taum1]) * pyom.maskT\n\n \"\"\"\n horizontal diffusion\n \"\"\"\n with pyom.timers[\"isoneutral\"]:\n if pyom.enable_hor_diffusion:\n diffusion.tempsalt_diffusion(pyom)\n if pyom.enable_biharmonic_mixing:\n diffusion.tempsalt_biharmonic(pyom)\n\n \"\"\"\n sources like restoring zones, etc\n \"\"\"\n if pyom.enable_tempsalt_sources:\n diffusion.tempsalt_sources(pyom)\n\n \"\"\"\n isopycnal diffusion\n \"\"\"\n if pyom.enable_neutral_diffusion:\n pyom.P_diss_iso[...] = 0.0\n pyom.dtemp_iso[...] = 0.0\n pyom.dsalt_iso[...] = 0.0\n isoneutral.isoneutral_diffusion_pre(pyom)\n isoneutral.isoneutral_diffusion(pyom,pyom.temp,True)\n isoneutral.isoneutral_diffusion(pyom,pyom.salt,False)\n if pyom.enable_skew_diffusion:\n pyom.P_diss_skew[...] = 0.0\n isoneutral.isoneutral_skew_diffusion(pyom,pyom.temp,True)\n isoneutral.isoneutral_skew_diffusion(pyom,pyom.salt,False)\n\n with pyom.timers[\"vmix\"]:\n \"\"\"\n vertical mixing of temperature and salinity\n \"\"\"\n pyom.dtemp_vmix[...] = pyom.temp[:,:,:,pyom.taup1]\n pyom.dsalt_vmix[...] = pyom.salt[:,:,:,pyom.taup1]\n\n a_tri = np.zeros((pyom.nx, pyom.ny, pyom.nz))\n b_tri = np.zeros((pyom.nx, pyom.ny, pyom.nz))\n c_tri = np.zeros((pyom.nx, pyom.ny, pyom.nz))\n d_tri = np.zeros((pyom.nx, pyom.ny, pyom.nz))\n delta = np.zeros((pyom.nx, pyom.ny, pyom.nz))\n\n ks = pyom.kbot[2:-2, 2:-2] - 1\n delta[:, :, :-1] = pyom.dt_tracer / pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.kappaH[2:-2, 2:-2, :-1]\n delta[:, :, -1] = 0.\n a_tri[:, :, 1:] = -delta[:,:,:-1] / pyom.dzt[np.newaxis, np.newaxis, 1:]\n b_tri[:, :, 1:] = 1 + (delta[:, :, 1:] + delta[:, :, :-1]) / pyom.dzt[np.newaxis, np.newaxis, 1:]\n b_tri_edge = 1 + delta / pyom.dzt[np.newaxis, np.newaxis, :]\n c_tri[:, :, :-1] = -delta[:, :, :-1] / pyom.dzt[np.newaxis, np.newaxis, :-1]\n d_tri[...] = pyom.temp[2:-2, 2:-2, :, pyom.taup1]\n d_tri[:, :, -1] += pyom.dt_tracer * pyom.forc_temp_surface[2:-2, 2:-2] / pyom.dzt[-1]\n sol, mask = utilities.solve_implicit(pyom, ks, a_tri, b_tri, c_tri, d_tri, b_edge=b_tri_edge)\n pyom.temp[2:-2, 2:-2, :, pyom.taup1] = np.where(mask, sol, pyom.temp[2:-2, 2:-2, :, pyom.taup1])\n d_tri[...] = pyom.salt[2:-2, 2:-2, :, pyom.taup1]\n d_tri[:, :, -1] += pyom.dt_tracer * pyom.forc_salt_surface[2:-2, 2:-2] / pyom.dzt[-1]\n sol, mask = utilities.solve_implicit(pyom, ks, a_tri, b_tri, c_tri, d_tri, b_edge=b_tri_edge)\n pyom.salt[2:-2, 2:-2, :, pyom.taup1] = np.where(mask, sol, pyom.salt[2:-2, 2:-2, :, pyom.taup1])\n\n pyom.dtemp_vmix[...] = (pyom.temp[:,:,:,pyom.taup1] - pyom.dtemp_vmix) / pyom.dt_tracer\n pyom.dsalt_vmix[...] = (pyom.salt[:,:,:,pyom.taup1] - pyom.dsalt_vmix) / pyom.dt_tracer\n\n \"\"\"\n boundary exchange\n \"\"\"\n if pyom.enable_cyclic_x:\n cyclic.setcyclic_x(pyom.temp[..., pyom.taup1])\n cyclic.setcyclic_x(pyom.salt[..., pyom.taup1])\n\n with pyom.timers[\"eq_of_state\"]:\n calc_eq_of_state(pyom, pyom.taup1)\n\n \"\"\"\n surface density flux\n \"\"\"\n pyom.forc_rho_surface[...] = (\n density.get_drhodT(pyom,pyom.salt[:,:,-1,pyom.taup1],pyom.temp[:,:,-1,pyom.taup1],np.abs(pyom.zt[-1])) * pyom.forc_temp_surface \\\n + density.get_drhodS(pyom,pyom.salt[:,:,-1,pyom.taup1],pyom.temp[:,:,-1,pyom.taup1],np.abs(pyom.zt[-1])) * pyom.forc_salt_surface \\\n ) * pyom.maskT[:,:,-1]\n\n with pyom.timers[\"vmix\"]:\n pyom.P_diss_v[...] = 0.0\n if pyom.enable_conserve_energy:\n \"\"\"\n diagnose dissipation of dynamic enthalpy by vertical mixing\n \"\"\"\n fxa = (-pyom.int_drhodT[2:-2, 2:-2, 1:, pyom.taup1] + pyom.int_drhodT[2:-2, 2:-2, :-1,pyom.taup1]) / pyom.dzw[np.newaxis, np.newaxis, :-1]\n pyom.P_diss_v[2:-2, 2:-2, :-1] += -pyom.grav / pyom.rho_0 * fxa * pyom.kappaH[2:-2, 2:-2, :-1] \\\n * (pyom.temp[2:-2, 2:-2, 1:, pyom.taup1] - pyom.temp[2:-2, 2:-2, :-1,pyom.taup1]) \\\n / pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.maskW[2:-2, 2:-2, :-1]\n fxa = (-pyom.int_drhodS[2:-2, 2:-2, 1:, pyom.taup1] + pyom.int_drhodS[2:-2, 2:-2, :-1,pyom.taup1]) / pyom.dzw[np.newaxis, np.newaxis, :-1]\n pyom.P_diss_v[2:-2, 2:-2, :-1] += -pyom.grav / pyom.rho_0 * fxa * pyom.kappaH[2:-2, 2:-2, :-1] \\\n * (pyom.salt[2:-2, 2:-2, 1:, pyom.taup1] - pyom.salt[2:-2, 2:-2, :-1,pyom.taup1]) \\\n / pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.maskW[2:-2, 2:-2, :-1]\n\n fxa = 2 * pyom.int_drhodT[2:-2, 2:-2, -1, pyom.taup1] / pyom.dzw[-1]\n pyom.P_diss_v[2:-2, 2:-2, -1] += - pyom.grav / pyom.rho_0 * fxa * pyom.forc_temp_surface[2:-2 ,2:-2] * pyom.maskW[2:-2, 2:-2, -1]\n fxa = 2 * pyom.int_drhodS[2:-2, 2:-2, -1, pyom.taup1] / pyom.dzw[-1]\n pyom.P_diss_v[2:-2, 2:-2, -1] += - pyom.grav / pyom.rho_0 * fxa * pyom.forc_salt_surface[2:-2 ,2:-2] * pyom.maskW[2:-2, 2:-2, -1]\n\n if pyom.enable_conserve_energy:\n \"\"\"\n determine effect due to nonlinear equation of state\n \"\"\"\n aloc[:,:,:-1] = pyom.kappaH[:,:,:-1] * pyom.Nsqr[:,:,:-1,pyom.taup1]\n pyom.P_diss_nonlin[:,:,:-1] = pyom.P_diss_v[:,:,:-1] - aloc[:,:,:-1]\n pyom.P_diss_v[:,:,:-1] = aloc[:,:,:-1]\n else:\n \"\"\"\n diagnose N^2 pyom.kappaH, i.e. exchange of pot. energy with TKE\n \"\"\"\n pyom.P_diss_v[:,:,:-1] = pyom.kappaH[:,:,:-1] * pyom.Nsqr[:,:,:-1,pyom.taup1]\n pyom.P_diss_v[:,:,-1] = -pyom.forc_rho_surface * pyom.maskT[:,:,-1] * pyom.grav / pyom.rho_0", "def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]", "def numeric_force(atoms, a, i, d=0.001):\n p0 = atoms.get_positions()\n p = p0.copy()\n p[a, i] += d\n atoms.set_positions(p, apply_constraint=False)\n eplus = atoms.get_potential_energy()\n p[a, i] -= 2 * d\n atoms.set_positions(p, apply_constraint=False)\n eminus = atoms.get_potential_energy()\n atoms.set_positions(p0, apply_constraint=False)\n return (eminus - eplus) / (2 * d)", "def analyze_harmonic_bond_force(self, force):\n\n # initialize a dict to put energy in\n energy_dict = dict()\n for idx in self.idxs:\n energy_dict[idx] = Quantity(0.0, kilojoule/mole)\n\n n_bonds = force.getNumBonds()\n\n for idx in range(n_bonds):\n atom0, atom1, eq_l, k = force.getBondParameters(idx)\n if (atom0 in self.idxs) and (atom1 in self.idxs):\n dist = self.dist(atom0, atom1)\n energy = 0.5 * k * np.power((dist - eq_l), 2)\n\n # since we consider the bond energy is contributed by two atoms,\n # we divide the enrgy by two\n\n energy_dict[atom0] += 0.5 * energy\n energy_dict[atom1] += 0.5 * energy\n\n return energy_dict", "def _apply_external_forces(self, external_f, f, i_X_p):\n for i in range(0, len(f)):\n f[i] -= cs.mtimes(i_X_p[i].T, external_f[i])\n return f", "def zero_modify_moms(moms_func):\n nonlocal prn_eq_0, freq_p0, freq_zm, solve_n_base\n if freq_zm is False:\n return moms_func\n # else work to do\n @wraps(moms_func)\n def wrapped_moms_func(n):\n n_base = solve_n_base(n, freq_p0)\n ans = np.array(moms_func(n_base))\n return (1 - freq_p0) / (1 - prn_eq_0(n_base)) * ans\n return wrapped_moms_func", "def get_binding_free_energy(self, thermodynamic_state):\n\n # Compute thermal energy.\n kT = kB * thermodynamic_state.temperature\n\n # Form the integrand function for integration in reduced units (r/sigma).\n platform = openmm.Platform.getPlatformByName('Reference')\n integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)\n context = openmm.Context(self.system, integrator, platform)\n context.setPositions(self.positions)\n\n def integrand_openmm(xvec, args):\n \"\"\"OpenMM implementation of integrand (for sanity checks).\"\"\"\n [context] = args\n positions = unit.Quantity(np.zeros([2,3],np.float32), unit.angstrom)\n integrands = 0.0 * xvec\n for (i, x) in enumerate(xvec):\n positions[1,0] = x * self.sigma\n context.setPositions(positions)\n state = context.getState(getEnergy=True)\n u = state.getPotentialEnergy() / kT # effective energy\n integrand = 4.0*pi*(x**2) * np.exp(-u)\n integrands[i] = integrand\n\n return integrands\n\n def integrand_numpy(x, args):\n \"\"\"NumPy implementation of integrand (for speed).\"\"\"\n u = 4.0*(self.epsilon)*(x**(-12) - x**(-6)) / kT\n integrand = 4.0*pi*(x**2) * np.exp(-u)\n return integrand\n\n # Compute standard state volume\n V0 = (unit.liter / (unit.AVOGADRO_CONSTANT_NA * unit.mole)).in_units_of(unit.angstrom**3)\n\n # Integrate the free energy of binding in unitless coordinate system.\n xmin = 0.15 # in units of sigma\n xmax = 6.0 # in units of sigma\n from scipy.integrate import quadrature\n [integral, abserr] = quadrature(integrand_numpy, xmin, xmax, args=[context], maxiter=500)\n # correct for performing unitless integration\n integral = integral * (self.sigma ** 3)\n\n # Correct for actual integration volume (which exceeds standard state volume).\n rmax = xmax * self.sigma\n Vint = (4.0/3.0) * pi * (rmax**3)\n integral = integral * (V0 / Vint)\n\n # Clean up.\n del context, integrator\n\n # Compute standard state binding free energy.\n binding_free_energy = -kT * np.log(integral / V0)\n\n return binding_free_energy", "def solve(self, extra_constraints=None):\n self.solver.push()\n try:\n if extra_constraints:\n self.solver.add(extra_constraints)\n self.solver.add(*self.soft_constraints.values())\n\n check = self.solver.check()\n if check == z3.sat:\n solution = self.solver.model()\n # deleted_bindings = [\n self.soft_constraints = {s: k for s, k\n in self.soft_constraints.items()\n if z3.is_true(solution[s])}\n return solution\n else:\n raise ConstraintError(self.solver, extra_constraints)\n finally:\n self.solver.pop()", "def relax_system():\n sim = Sim(mesh, Ms, unit_length=1e-9)\n sim.set_m((1, 0, 0))\n sim.alpha = 1\n sim.do_precession = False\n sim.add(Exchange(A))\n sim.add(Demag(solver=\"FK\"))\n sim.relax()\n np.save(initial_m_file, sim.m)", "def local_relax(gas_particles, hydro, gravity_field=None,\n monitor_func=check_energy_conservation,\n bridge_options=dict()):\n if monitor_func == \"energy\":\n monitor_func = monitor_energy\n t_end_in_t_dyn = 0.1 # Relax for this many dynamical timescales\n t_end = t_end_in_t_dyn \\\n * gas_particles.dynamical_timescale(mass_fraction=0.9)\n n_steps = 10\n velocity_damp_factor = 1.0 - (2.0*numpy.pi*t_end_in_t_dyn) \\\n /n_steps # Critical damping\n \n in_hydro = hydro.gas_particles.add_particles(gas_particles)\n if gravity_field is None:\n system = hydro\n else:\n system = Bridge(timestep=(t_end/n_steps).as_quantity_in(units.yr),\n **bridge_options)\n system.add_system(hydro, [gravity_field])\n \n for i_step, time in enumerate(t_end * numpy.linspace(1.0/n_steps,\n 1.0, n_steps)):\n system.evolve_model(time)\n hydro.gas_particles.velocity = velocity_damp_factor \\\n * hydro.gas_particles.velocity\n monitor_func(system, i_step, time, n_steps)\n \n return in_hydro.copy()", "def linear_friction(domain):\n\n from math import sqrt\n\n w = domain.quantities['stage'].centroid_values\n z = domain.quantities['elevation'].centroid_values\n h = w-z\n\n uh = domain.quantities['xmomentum'].centroid_values\n# vh = domain.quantities['ymomentum'].centroid_values\n tau = domain.quantities['linear_friction'].centroid_values\n\n xmom_update = domain.quantities['xmomentum'].semi_implicit_update\n# ymom_update = domain.quantities['ymomentum'].semi_implicit_update\n\n N = domain.number_of_elements\n eps = domain.minimum_allowed_height\n g = domain.g #Not necessary? Why was this added?\n\n for k in range(N):\n if tau[k] >= eps:\n if h[k] >= eps:\n \tS = -tau[k]/h[k]\n\n \t#Update momentum\n \txmom_update[k] += S*uh[k]", "def setPeriodic(dim, tags, tagsMaster, affineTransform):\n api_tags_, api_tags_n_ = _ivectorint(tags)\n api_tagsMaster_, api_tagsMaster_n_ = _ivectorint(tagsMaster)\n api_affineTransform_, api_affineTransform_n_ = _ivectordouble(affineTransform)\n ierr = c_int()\n lib.gmshModelMeshSetPeriodic(\n c_int(dim),\n api_tags_, api_tags_n_,\n api_tagsMaster_, api_tagsMaster_n_,\n api_affineTransform_, api_affineTransform_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetPeriodic returned non-zero error code: \",\n ierr.value)", "def thaw_solarheat_dp(self, msid):\n p = 'solarheat__{}__dP_\\d+'.format(msid.lower())\n found = False\n for par in self.model.pars:\n if re.match(p, par.full_name):\n par['frozen'] = False\n found = True\n if not found:\n print('Solarheat \"dP\" parameters not found')", "def apply_impulse(self, p):\n\t\tself.force=p", "def _add_nonbonded_force_terms(self):\n # Add a regular nonbonded force for all interactions that are not\n # changing.\n standard_nonbonded_force = openmm.NonbondedForce()\n self._hybrid_system.addForce(standard_nonbonded_force)\n self._hybrid_system_forces['standard_nonbonded_force'] = standard_nonbonded_force\n\n # Create a CustomNonbondedForce to handle alchemically interpolated\n # nonbonded parameters.\n # Select functional form based on nonbonded method.\n # TODO: check _nonbonded_custom_ewald and _nonbonded_custom_cutoff\n # since they take arguments that are never used...\n if self._nonbonded_method in [openmm.NonbondedForce.NoCutoff]:\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n elif self._nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic,\n openmm.NonbondedForce.CutoffNonPeriodic]:\n epsilon_solvent = self._old_system_forces['NonbondedForce'].getReactionFieldDielectric()\n r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n standard_nonbonded_force.setReactionFieldDielectric(\n epsilon_solvent)\n standard_nonbonded_force.setCutoffDistance(r_cutoff)\n elif self._nonbonded_method in [openmm.NonbondedForce.PME,\n openmm.NonbondedForce.Ewald]:\n [alpha_ewald, nx, ny, nz] = self._old_system_forces['NonbondedForce'].getPMEParameters()\n delta = self._old_system_forces['NonbondedForce'].getEwaldErrorTolerance()\n r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n standard_nonbonded_force.setPMEParameters(alpha_ewald, nx, ny, nz)\n standard_nonbonded_force.setEwaldErrorTolerance(delta)\n standard_nonbonded_force.setCutoffDistance(r_cutoff)\n else:\n errmsg = f\"Nonbonded method {self._nonbonded_method} not supported\"\n raise ValueError(errmsg)\n\n standard_nonbonded_force.setNonbondedMethod(self._nonbonded_method)\n\n sterics_energy_expression += self._nonbonded_custom_sterics_common()\n\n sterics_mixing_rules = self._nonbonded_custom_mixing_rules()\n\n custom_nonbonded_method = self._translate_nonbonded_method_to_custom(\n self._nonbonded_method)\n\n total_sterics_energy = \"U_sterics;\" + sterics_energy_expression + sterics_mixing_rules\n\n sterics_custom_nonbonded_force = openmm.CustomNonbondedForce(\n total_sterics_energy)\n\n if self._softcore_LJ_v2:\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_LJ_v2_alpha)\n else:\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_alpha)\n\n # Lennard-Jones sigma initial\n sterics_custom_nonbonded_force.addPerParticleParameter(\"sigmaA\")\n # Lennard-Jones epsilon initial\n sterics_custom_nonbonded_force.addPerParticleParameter(\"epsilonA\")\n # Lennard-Jones sigma final\n sterics_custom_nonbonded_force.addPerParticleParameter(\"sigmaB\")\n # Lennard-Jones epsilon final\n sterics_custom_nonbonded_force.addPerParticleParameter(\"epsilonB\")\n # 1 = hybrid old atom, 0 otherwise\n sterics_custom_nonbonded_force.addPerParticleParameter(\"unique_old\")\n # 1 = hybrid new atom, 0 otherwise\n sterics_custom_nonbonded_force.addPerParticleParameter(\"unique_new\")\n\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_core\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_electrostatics_core\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_insert\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_delete\", 0.0)\n\n sterics_custom_nonbonded_force.setNonbondedMethod(\n custom_nonbonded_method)\n\n self._hybrid_system.addForce(sterics_custom_nonbonded_force)\n self._hybrid_system_forces['core_sterics_force'] = sterics_custom_nonbonded_force\n\n # Set the use of dispersion correction to be the same between the new\n # nonbonded force and the old one:\n if self._old_system_forces['NonbondedForce'].getUseDispersionCorrection():\n self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(True)\n if self._use_dispersion_correction:\n sterics_custom_nonbonded_force.setUseLongRangeCorrection(True)\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(False)\n\n if self._old_system_forces['NonbondedForce'].getUseSwitchingFunction():\n switching_distance = self._old_system_forces['NonbondedForce'].getSwitchingDistance()\n standard_nonbonded_force.setUseSwitchingFunction(True)\n standard_nonbonded_force.setSwitchingDistance(switching_distance)\n sterics_custom_nonbonded_force.setUseSwitchingFunction(True)\n sterics_custom_nonbonded_force.setSwitchingDistance(switching_distance)\n else:\n standard_nonbonded_force.setUseSwitchingFunction(False)\n sterics_custom_nonbonded_force.setUseSwitchingFunction(False)", "def restrain(self):\n dofs = self.dofs\n for i in range(6):\n dofs[i] = BoundaryDof()", "def _update_forces_thermostat(self, kinetic_energy):\n # Compute Ekin from tensor\n # kinetic_energy = torch.einsum(\"abii->ab\", kinetic_energy)\n # Compute forces on thermostat (R x M)\n self.t_forces[..., 0] = (\n kinetic_energy - self.degrees_of_freedom_particles * self.kb_temperature\n ) / self.t_masses[..., 0]\n\n # Get kinetic energy of barostat (R x M)\n kinetic_energy_cell = self._compute_kinetic_energy_cell()\n # Compute forces on cell thermostat\n self.t_forces_cell[..., 0] = (\n kinetic_energy_cell - 9.0 * self.kb_temperature\n ) / self.t_masses_cell[..., 0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current market value of the bond.
def market_value(self) -> float: return self._market_dirty_price
[ "def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand", "def value(self):\n return self.shares() * self.price()", "def value_current(self):\n # get current value from Stockexchange\n #TODO: Transform to € if $\n value = self.history.iloc[-1]\n if self.info['currency'] != self.currency:\n currency = Converter(\n self.info['currency'], self.currency\n )\n value = currency.convert(value)\n\n return value", "def market_close(self):\n return self._market_close", "def get_asset_value(self, exchange, asset_symbol):\n try:\n instrument_symbol = asset_symbol + \"/USD\"\n bars_list = self.get_latest_bars(exchange, instrument_symbol, 1)\n except KeyError:\n logger.error(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return np.array([getattr(b[1], 'close') for b in bars_list])", "def value(self):\n value = 0.0\n for bond in self._bonds:\n value += bond.value()\n return value", "def calc_value(self, model='bsm'):\n\n time_to_expire = (self.expiration - datetime.date.today()).days / 365\n if model == 'bsm':\n return bsm(self.underlying.price, self.strike, RISK_FREE_RATE, time_to_expire, self.underlying.vol)", "def get_value(self):\n if self.card:\n return self.card.get_value()\n return 0", "def price(self):\n if self.yticker.info.get('regularMarketPrice') is None:\n raise NotFoundError(\n f'Cannot retrieve ticker (\"{self._ticker}\") '\n 'from Yahoo Finance')\n return self.yticker.info['regularMarketPrice']", "def getValue(self) -> \"double const *\":\n return _coin.SbDPRotation_getValue(self)", "def get_buy_price(self) -> float:\n return self.buy_price", "def price(self):\r\n if len(self.trades) > 0:\r\n by_timestamp = sorted(self.trades,\r\n key=lambda trade: trade.timestamp,\r\n reverse=True)\r\n return by_timestamp[0].price_per_share\r\n else:\r\n error_message = \"The Price for the last trade is not available\"\r\n raise AttributeError(error_message)", "def current_values(self):\n\t\t# remove duplicate tickers\n\t\tsymbs = list(set(np.array(self.portfolio['Ticker'])))\n\n\t\tdf_curr = get_current_prices(symbs)\n\t\tsymbs_prices = np.array(get_current_prices(symbs))\n\t\t\n\t\t# update portfolio with duplicates\n\t\tfor symb, symb_price in zip(symbs, symbs_prices):\n\t\t\twhere_same = np.where(self.portfolio[\"Ticker\"]==symb)[0]\n\t\t\tself.portfolio.loc[where_same, \"CurrentPrice\"] = symb_price\n\n\t\tself.current_net_value = np.dot(self.portfolio['CurrentPrice'], self.portfolio['NumShares'])\n\n\t\t## Portfolio without duplicate buys\n\t\tportfolio_reduced = self.portfolio[['Ticker','NumShares','CurrentPrice']]\n\t\tportfolio_reduced = portfolio_reduced.groupby('Ticker').agg({ 'NumShares':np.sum, 'CurrentPrice': 'first'}).reset_index()\n\t\tself.portfolio_reduced = portfolio_reduced", "def bid_price(self, stock=''):\n data = self.quote_data(stock)\n return float(data['bid_price'])", "def get_current_price():\n try:\n return exchange.fetch_ticker(conf.pair)['bid']\n\n except (ccxt.ExchangeError, ccxt.AuthenticationError, ccxt.ExchangeNotAvailable, ccxt.RequestTimeout) as error:\n log.error('Got an error %s %s, retrying in about 5 seconds...', type(error).__name__, str(error.args))\n sleep_for(4, 6)\n return get_current_price()", "def get_current_value(self):\r\n return self.curr_val", "def futures_get_mark_price(self, symbol):\n markprice = 0.0\n \n if symbol in STABLE_COINS:\n return 1.0\n try:\n markprice = self.client.futures_mark_price(symbol=symbol, recvWindow=RECV_WINDOW)['markPrice']\n markprice = float(markprice)\n except Exception as e:\n self.handle_exception(e, f\"Could not get futures mark price for {symbol}\")\n return markprice", "def get_price(self):\n\t\treturn self._price_p_night", "def calc_values(self):\n atm_contract_index = (\n np.abs(self.chain[\"strike\"] - self.underlying_price)\n ).idxmin()\n atm_impliedvol = self.chain.iloc[atm_contract_index][\"impvol\"]\n\n # Calculate option value for all options using ATM volatility\n self.chain[\"model_value\"] = self.chain.apply(\n lambda x: bs_price(\n x[\"right\"],\n x[\"underprice\"],\n x[\"strike\"],\n self.dte / 252,\n atm_impliedvol,\n self.risk_free_rate,\n ),\n axis=1,\n )\n self.chain[\"mid_price\"] = (self.chain[\"bid\"] + self.chain[\"ask\"]) / 2\n self.chain[\"skew_premium\"] = self.chain[\"mid_price\"] - self.chain[\"model_value\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the current market value of the bond.
def market_dirty_price(self, value: float): self._market_dirty_price = value
[ "def market_price(self, market_price):\n\n self._market_price = market_price", "def market_close(self, market_close):\n\n self._market_close = market_close", "def place_bet(self, amount):\n self.bet = amount", "def market_value(self) -> float:\n return self._market_dirty_price", "def set_buy_price(self, buy_price: float) -> None:\n self.buy_price = buy_price", "def set_sell_price(self, sell_price: float) -> None:\n self.sell_price = sell_price", "def set_contract_amount(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_CONTRACT_AMOUNT).\n send_keys(value))", "def market_open(self, market_open):\n\n self._market_open = market_open", "def setValue(self, *args):\n return _coin.SoMFEngine_setValue(self, *args)", "def setPriceDozen(self,price):\n self.priceDozen=float(price)", "def setValue(self, newvalue: 'double') -> \"void\":\n return _coin.SoSFDouble_setValue(self, newvalue)", "def setValue(self, *args) -> \"void\":\n return _coin.SoMFEngine_setValue(self, *args)", "def setValue(self, value: 'double') -> \"void\":\n return _coin.SoMFDouble_setValue(self, value)", "def set_market_description(self, value):\n self.gui.txt_market_description.clear()\n self.gui.txt_market_description.setText(value)", "def set_balance(self, value):\n self.balance = value # updates player balance after each game", "def setValue(self, *args) -> \"void\":\n return _coin.SoSFBool_setValue(self, *args)", "def at_set(self, new_value):\r\n pass", "def setValue(self, *args):\n return _coin.SoSFEngine_setValue(self, *args)", "def update_balance(self):\n self.balance = self.intrade.get_balance()[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the theoretical dirty value of a bond given a particular discount curve. The theoretical is V = \\sum_i c_i d_i where c_i is the ith coupon and d_i is the associated discount factor associated with the coupon. d_i is calculated as discount_curve(coupon_payment_date)
def calculate_dirty_value(self, discount_curve: Callable[[date], float], today: date) -> float: future_coupons = (c for c in self.coupons if c.ex_date > today) discounted_coupon_value = sum( discount_curve(c.payment_date) * c.coupon_amount for c in future_coupons ) discounted_face_value = discount_curve(self.maturity) * self.principle return discounted_coupon_value + discounted_face_value
[ "def bond_price(maturity, principal=100, coupon_rate=.03, coupons_per_year=12, discount_rate=.03):\n\n cash_flows = bond_cash_flows(maturity, principal, coupon_rate, coupons_per_year)\n pv = present_value(cash_flows, discount_rate / coupons_per_year)\n\n return pv", "def discount_curve(self, currency):\n return self.map_curves[currency][\"discount\"]", "def PriceFloatingRateBondAddDiscountingCurve(builder, discountingCurve):\n return AddDiscountingCurve(builder, discountingCurve)", "def coupon_payment(notional):\n return 0.05 * notional", "def apply_discount(price, discount):\n return (money_to_float(price)\n .fold(lambda cost:\n (percent_to_float(discount)\n .fold(lambda savings: cost * (1 - savings)))))", "def bond_duration(maturity, principal=100, coupon_rate=0.03, discount_rate=0.03, coupons_per_year=12):\n up_yield = discount_rate-0.0001\n down_yield = discount_rate+0.0001\n bond_price_up = bond_price(maturity=maturity, principal=principal, coupon_rate=coupon_rate, coupons_per_year=coupons_per_year, discount_rate=up_yield)\n bond_price_down = bond_price(maturity=maturity, principal=principal, coupon_rate=coupon_rate, coupons_per_year=coupons_per_year, discount_rate=down_yield)\n bond_price_c = bond_price(maturity=maturity, principal=principal, coupon_rate=coupon_rate, coupons_per_year=coupons_per_year, discount_rate=discount_rate)\n duration = (bond_price_up - bond_price_down)/(2*0.0001*bond_price_c)\n \n return duration", "def cal_npv( cash_flow, \r\n disct_curve,\r\n val_date = \"\" ):\r\n if len(cash_flow) == 0:\r\n print(\"Error in Portfolio--->cal_key_dv01...\")\r\n print(\"Cannot find cash flow table...\")\r\n print(\"Please first run cal_cash_flow...\")\r\n print(\"Return 0...\")\r\n return 0\r\n NPV = 0\r\n cf_loc = 0\r\n if val_date == \"\":\r\n curve_start = disct_curve[0][0]\r\n base_df = 1\r\n else:\r\n curve_start = val_date\r\n loc = 0\r\n while val_date > disct_curve[loc][0]:\r\n loc += 1\r\n pre_point = disct_curve[loc-1]\r\n cur_point = disct_curve[loc]\r\n base_df = interpolation_act( val_date,\r\n pre_point[0],\r\n pre_point[1],\r\n cur_point[0],\r\n cur_point[1] )\r\n while cash_flow[cf_loc][0] < curve_start:\r\n \"\"\" Cash flow may start back dated \r\n make sure NPV caculation only\r\n starts when cash flow is in the current range\r\n \"\"\"\r\n cf_loc += 1\r\n \r\n for loc in range(1, len(disct_curve)):\r\n pre_point = disct_curve[loc-1]\r\n cur_point = disct_curve[loc]\r\n if cf_loc < len(cash_flow):\r\n cf_point = cash_flow[cf_loc] \r\n else:\r\n break\r\n \"\"\" Whenever get a hit walking through all suitable cases\r\n \"\"\"\r\n while cf_point[0] >= pre_point[0] \\\r\n and cf_point[0] < cur_point[0]:\r\n DF = interpolation_act( cf_point[0],\r\n pre_point[0],\r\n pre_point[1],\r\n cur_point[0],\r\n cur_point[1] )\r\n NPV += DF*cf_point[1]/base_df\r\n if cf_loc + 1 >= len(cash_flow):\r\n break\r\n cf_loc += 1\r\n cf_point = cash_flow[cf_loc] \r\n \r\n return NPV", "def discount_update(self, discount, actor):\n\n finance = self.cart['finance']\n try:\n # validate discount value\n try:\n discount = Decimal(discount)\n except:\n discount = Decimal(0)\n\n subtotal = finance['prod_cost'] + finance['shipping_cost']\n if discount > subtotal:\n discount = subtotal\n if discount < 0:\n discount = Decimal(0)\n\n # we store and display discounts as a negative value\n discount *= -1\n c = get_cursor()\n c.execute(\"\"\"\n update cart\n set discount_cost = %s\n where cart_id = %s\"\"\",\n (discount, self.cart['cart_id']))\n finance['discount_cost'] = discount\n self.recompute()\n self.log(\"Discount set to {}\".format(discount), actor)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")\n return", "def discount(t,r):\r\n return (1+r)**(-t)", "def discount_price(product, discount):\n if config_value('TAX', 'DEFAULT_VIEW_TAX'):\n return taxed_discount_price(product, discount)\n else:\n return untaxed_discount_price(product, discount)", "def apply_cash_coupon(self,cash_amount):\r\n return self.price - cash_amount", "def discounted_return(self, discount):\n\n discounted_return = 0\n factor = 1\n for step_record in self:\n discounted_return += factor * step_record.reward\n factor *= discount\n return discounted_return", "def DiscountRate(self):\n i = 2\n discountRate = 1\n while i < self.params['time_next_visit']: \n discountRate += 1/math.pow(1+rateDiscount,i)\n i += 1\n discount = math.pow(1+rateDiscount,self.params['CurrentMonth'])\n discountRate /= discount\n return discountRate", "def final_price(self):\n return self.price - self.price * self.discount", "def calc_discounted_return(rewards, discount, final_value):\n seq_len = len(rewards)\n discounted_returns = torch.zeros(seq_len)\n discounted_returns[-1] = rewards[-1] + discount * final_value\n for i in range(seq_len - 2, -1, -1):\n discounted_returns[i] = rewards[i] + discount * discounted_returns[i + 1]\n return discounted_returns", "def discount(self, discount: float) -> None:\n self.price = self.price * discount", "def apply_percent_coupon(self):\r\n return self.price - self.price*self.coupon.percent_amount", "def _get_discounted_payoffs(self, stock_path, dimension):\n if self.mean_type == 'arithmetic':\n avg = (self.start_price / 2. +\n stock_path[:, :-1].sum(1) +\n stock_path[:, -1] / 2.) / \\\n float(dimension)\n elif self.mean_type == 'geometric':\n avg = exp((log(self.start_price) / 2. +\n log(stock_path[:, :-1]).sum(1) +\n log(stock_path[:, -1]) / 2.) /\n float(dimension))\n if self.call_put == 'call':\n y_raw = maximum(avg - self.strike_price, 0)\n else: # put\n y_raw = maximum(self.strike_price - avg, 0)\n y_adj = y_raw * exp(-self.interest_rate * self.exercise_time)\n return y_adj", "def discount_ratio(discount):\n pcnt = discount.percentage\n if pcnt > 1:\n pcnt = pcnt/100\n\n return 1-pcnt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a date to the first available business date.
def _business_date(reference_date: date) -> date: if reference_date.weekday() > 4: return FixedRateACGB._business_date( reference_date + timedelta(days = 1)) return reference_date
[ "def next_bday(date):\n add_day = 1\n next_bday = datetime.strptime(date, '%Y-%m-%d') + BDay(add_day)\n # check it is not holiday\n while is_holiday(date=next_bday.strftime('%Y-%m-%d')) \\\n or is_offdays(date=next_bday.strftime('%m/%d/%y')):\n add_day += 1\n next_bday = datetime.strptime(date, '%Y-%m-%d') + BDay(add_day)\n\n return next_bday.strftime('%Y-%m-%d')", "def business_date_operation(date, days):\n ret_date = date + relativedelta(days=days)\n # If weekend (saturday/sunday), add another day so that\n # \"days\" number of business days are incremented\n if ret_date.weekday() in [5,6]:\n if days > 0:\n ret_date = ret_date + relativedelta(days=1)\n elif days < 0:\n ret_date = ret_date + relativedelta(days=-1)\n return ret_date", "def start_of_month(adate, holidays=[]):\r\n\tthe1st = date(adate.year, adate.month, 1)\r\n\treturn business_day(the1st, 0, holidays)", "def business_day(adate, n, holidays=[]):\r\n\tif type(adate) == date:\r\n\t\tadate = datetime(adate.year, adate.month, adate.day) \r\n\r\n\tif n > 0:\r\n\t\tinc = 1\r\n\telif n < 0:\r\n\t\tinc = -1\r\n\telse:\r\n\t\tinc = 1\r\n\t\tn = 1\r\n\t\tadate += timedelta(days=-1)\r\n\r\n\twhile n != 0:\r\n\t\tadate += timedelta(days=inc)\r\n\t\tif (adate.weekday() < 5) and (not adate in holidays):\r\n\t\t\tn -= inc\r\n\r\n\treturn adate", "def week_commencing_date(date):\n if not is_week_commencing_date(date):\n return date - datetime.timedelta(days=date.isoweekday() - 1)\n return date", "def closest_biz_day(self, day, forward=True):\n\n if forward:\n delta = timedelta(days=1)\n else:\n delta = timedelta(days=-1)\n while day.weekday() in self.weekends or day in self.holidays:\n day = day + delta\n return day", "def start_date(date):\n if date.month < 12:\n return datetime.date(date.year - 1, date.month + 1, 1)\n else:\n return datetime.date(date.year, 1, 1)", "def first(self) -> datetime.date:\n return self.__dates__[0]", "def _get_first_working_day_of_month(date=None):\n\n if not date:\n date = datetime.date.today()\n\n # Saturday YYYY-MM-03\n # Saturday YYYY-MM-03\n # M-F YYYY-MM-01\n return date.replace(day=3) if (date.weekday == 6) else date.replace(day == 2) if (\n date.weekday == 0) else date.replace(day=1)", "def next_monday(date):\n if date.weekday():\n one_day = datetime.timedelta(days=1)\n return date + ((7 - date.weekday()) * one_day)\n else:\n return date", "def next_vernal_equinox(date):\n return holiday(date, twopi, 0)", "def test_reverseDate_with_first_date(self):\n term = create_term(name=\"Fall 2012\", code=\"Fa12\", start=datetime.date(2012, 8, 13))\n #note under the condition that week starts at 0 and day also starts at 0.\n first_week_first_day = term.reverseDate(term.start)\n self.assertEqual((0, 0), first_week_first_day)", "def first(self, onerror=constants.RAISE) -> Calendar:\n return self.apply(lambda period: period[0], onerror=onerror).combine()", "def _FirstSunday(self, dt):\r\n return dt + datetime.timedelta(days=(6-dt.weekday()))", "def next_winter_solstice(date):\n return holiday(date, twopi, halfpi)", "def next_equinox(date):\n return holiday(date, pi, 0)", "def get_date_object(date=None):\n if date == None:\n return dt.date.today()\n else:\n return dt.datetime.strptime(date, '%Y-%m-%d').date()", "def from_fixed(cls, fixed_date):\n crescent = JAFFA.phasis_on_or_before(fixed_date)\n g_year = GregorianDate.to_year(fixed_date)\n ny = cls.new_year(g_year)\n new_year = cls.new_year(g_year - 1) if (fixed_date < ny) else ny\n month = iround((crescent - new_year) / 29.5) + 1\n year = HebrewDate.from_fixed(new_year).year + (1 if month >= HebrewMonth.TISHRI else 0)\n day = fixed_date - crescent + 1\n return HebrewObservationalDate(year, month, day)", "def firstWeekdayOfMonth(year, month):\r\n return weekday(year, month, 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a series of anual coupons starting on the first pay date and ending on or before the last pay date.
def construct_yearly_coupon_series(first_pay_date: date, last_pay_date: date, rate: float, principle: float) -> List[Coupon]: coupons = [] coupon_date = first_pay_date amount = principle * rate while coupon_date <= last_pay_date: ex_date = FixedRateACGB._business_date( coupon_date - timedelta(days = 9)) payment_date = FixedRateACGB._business_date(coupon_date) coupons.append(Coupon(amount, ex_date, payment_date)) coupon_date = date(coupon_date.year + 1, coupon_date.month, coupon_date.day) return coupons
[ "def construct(first_coupon_pay_date: date, \n second_coupon_pay_date: date, maturity_date: date, \n coupon_rate: float, principle: float):\n coupons_a = FixedRateACGB.construct_yearly_coupon_series(\n first_coupon_pay_date, maturity_date, \n coupon_rate / 2.0, principle)\n coupons_b = FixedRateACGB.construct_yearly_coupon_series(\n second_coupon_pay_date, maturity_date, \n coupon_rate / 2.0, principle)\n\n return FixedRateACGB(principle, coupons_a + coupons_b, maturity_date)", "def make_invoices(self):\n\n billing_schedules = {\"Annual\": 1, \"Two-Pay\": 2, \"Quarterly\": 4, \"Monthly\": 12}\n months_after_eff_date_dict = {\n \"Annual\": 12,\n \"Two-Pay\": 6,\n \"Quarterly\": 3,\n \"Monthly\": 1,\n }\n\n invoices = []\n first_invoice = Invoice(\n self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium,\n )\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule in billing_schedules:\n invoices_quantity = billing_schedules.get(self.policy.billing_schedule)\n first_invoice.amount_due = first_invoice.amount_due / invoices_quantity\n months_between_invoices = months_after_eff_date_dict.get(\n self.policy.billing_schedule\n )\n for i in range(1, invoices_quantity):\n a = i * months_between_invoices\n bill_date = self.policy.effective_date + relativedelta(months=a)\n invoice = Invoice(\n self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium\n / billing_schedules.get(self.policy.billing_schedule),\n )\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def create_subscription(self, account, subscription, current_period, next_period):\n\t\t# determine amount of charge, normaly it is just price *-1\n\t\t# but in case of first time use, there may be prorated charges\n\t\tstart_date = subscription.start_date \n\t\t# datetime.datetime.strptime(subscription.start.date,\"%Y-%m-%d\")\n\t\tstart_period = get_next_billing_period_for_date(start_date)\n\n\t\tif (start_period == current_period and start_date.day > 1):\n\t\t# we need to prorate\n\t\t\tnum_of_day_in_month = monthrange(start_date.year, start_date.month)[1]\n\t\t\tprice_per_day = subscription.price / num_of_day_in_month\n\n\t\t\tnum_days_to_charge = (num_of_day_in_month + 1) - start_date.day\n\n\t\t\tamount = ((price_per_day * num_days_to_charge) + subscription.price) * -1\n\t\t\tcharge_memo = _(\"%(description)s charge %(current_period)s - %(next_period)s \"\n\t\t\t\t\t\t\"plus first %(num_days)s days\") % \\\n\t\t\t\t\t\t{'description': subscription.product.description,\n\t\t\t\t\t\t\t'current_period': current_period, 'next_period': next_period, \n\t\t\t\t\t\t\t'num_days': num_days_to_charge}\n\t\telse:\n\t\t\tamount = subscription.price * -1\n\t\t\tcharge_memo = _(\"%(description)s charge %(current_period)s - %(next_period)s\") % \\\n\t\t\t\t\t\t\t{'description': subscription.product.description, \n\t\t\t\t\t\t\t'current_period': current_period, 'next_period': next_period}\n\t\t# append location to memo if needed\n\t\tif (subscription.practice_location != None):\n\t\t\tlocation = \" (%s)\" % (subscription.practice_location.practice_name)\n\t\t\tcharge_memo = charge_memo + location\n\n\t\tsubscription_transaction = AccountTransaction(account=account,\n\t\t\t\t\t\t\t\ttx_type='2', # monthly product charge\n\t\t\t\t\t\t\t\tamount=amount,\n\t\t\t\t\t\t\t\tperiod_start=current_period,\n\t\t\t\t\t\t\t\tperiod_end=next_period,\n\t\t\t\t\t\t\t\tmemo=charge_memo)\n\t\tsubscription_transaction.save()\n\n\t\treturn subscription_transaction", "def bond_cash_flows(maturity, principal=100, coupon_rate=0.03, coupons_per_year=12):\n n_coupons=round(maturity*coupons_per_year)\n coupon_times = np.arange(1, n_coupons+1) #Creates an array ranging from 1 to to the no.of coupons + 1, since its not inclusive\n coupon_amt = principal*coupon_rate/coupons_per_year\n cash_flows = pd.Series(data=coupon_amt, index=coupon_times) \n cash_flows.iloc[-1] += principal #Since you will recieve back the principal at maturity\n return cash_flows", "def make_baseline(start_year, records_url):\r\n policy_cl = Policy()\r\n behavior_cl = Behavior()\r\n records_cl = Records(records_url)\r\n calc_cl = Calculator(policy_cl, records_cl, behavior_cl)\r\n for i in range(start_year - 2013):\r\n calc_cl.increment_year()\r\n assert calc_cl.current_year == start_year\r\n calc_cl.calc_all()\r\n return(calc_cl)", "def calculate_payoff_times(self):\n with self.database.transaction():\n current_id = 0\n for Bo in constants.initial_balance_range():\n for r in constants.interest_rate_range():\n for p in constants.monthly_payment_range():\n print(\"Calculating for initial balance {0}, rate {1}, monthly payment {2}\".format(Bo, r, p))\n t = time_until_zero_balance(r, Bo, p)\n if t is not None:\n database.create_point(current_id, Bo, r, p, t)\n current_id += 1", "def test_subsequent_monthly_bills_respect_boundary_dates(self, start, periods, db):\n\n def _get_ymd(dt):\n return dt.year, dt.month, dt.day\n\n def _s2dt(s):\n y, m, d = _s2ymd(s)\n return datetime.datetime(y, m, d)\n\n def _s2ymd(s, delim=\"/\"):\n y, m, d = tuple(int(n) for n in s.split(delim))\n return y, m, d\n\n gen = BillGeneratorFactory(\n period=\"monthly\",\n currency=\"GBP\",\n rate_per_device=1,\n enabled=True,\n active_from_date=_s2dt(start),\n )\n org = BilledOrganizationFactory(\n billed_by=gen,\n )\n\n (\n (this_start, this_end),\n (next_start, next_end),\n (another_start, another_end)\n ) = periods\n\n # Create bill under test\n this_but = org.create_next_bill()\n this_but.save()\n assert _get_ymd(this_but.period_start) == _s2ymd(this_start)\n assert _get_ymd(this_but.period_end) == _s2ymd(this_end)\n\n but_next = org.create_next_bill()\n but_next.save()\n assert _get_ymd(but_next.period_start) == _s2ymd(next_start)\n assert _get_ymd(but_next.period_end) == _s2ymd(next_end)\n\n but_another = org.create_next_bill()\n but_another.save()\n assert _get_ymd(but_another.period_start) == _s2ymd(another_start)\n assert _get_ymd(but_another.period_end) == _s2ymd(another_end)", "def _initialize_futures_chain(self, fields: Union[PriceField, Sequence[PriceField]], start_date: datetime,\n end_date: datetime, frequency: Frequency):\n # Check if single field was provided\n _, got_single_field = convert_to_list(fields, PriceField)\n\n # Get the expiration dates related to the future contracts belonging to the futures chain\n future_tickers_exp_dates_series = self._future_ticker.get_expiration_dates()\n\n # Consider only these future contracts, which may be used to build the futures chain - do not include contracts,\n # which expired before the start_date\n future_tickers_exp_dates_series = future_tickers_exp_dates_series[\n future_tickers_exp_dates_series.index >= start_date\n ]\n # Exclude contracts which will not be used while building the current futures chain. All of the newer contracts,\n # which will be used for later futures chains building will be downloaded later anyway, as\n # _initialize_futures_chain() is called after each expiration of a contract.\n current_contract_index = pd.Index(future_tickers_exp_dates_series).get_indexer(\n [self._future_ticker.get_current_specific_ticker()]\n )[0]\n last_ticker_position = min(future_tickers_exp_dates_series.size, current_contract_index + 1)\n future_tickers_exp_dates_series = future_tickers_exp_dates_series.iloc[0:last_ticker_position]\n\n # Download the historical prices\n future_tickers_list = list(future_tickers_exp_dates_series.values)\n futures_data = self._data_provider.get_price(future_tickers_list, fields, start_date, end_date, frequency)\n\n # Store the start_date used for the purpose of FuturesChain initialization\n self._first_cached_date = start_date\n\n for exp_date, future_ticker in future_tickers_exp_dates_series.items():\n\n # Create a data frame and cast it into PricesDataFrame or PricesSeries\n if got_single_field:\n data = futures_data.loc[:, future_ticker]\n else:\n data = futures_data.loc[:, future_ticker, :]\n data = cast_data_array_to_proper_type(data, use_prices_types=True)\n\n # Check if data is empty (some contract may have no price within the given time range) - if so do not\n # add it to the FuturesChain\n if not data.empty:\n # Create the future object and add it to the Futures Chain data structure\n future = FutureContract(ticker=future_ticker,\n exp_date=exp_date,\n data=data\n )\n\n self.loc[exp_date] = future\n\n self.sort_index(inplace=True)", "def cpr_curve_creator(description='.2 ramp 6 for 30, 6'):\n\n periods = str(description).split(',')\n nperiods = 360\n end_period = False\n\n cpr_curve = []\n\n current_period = 1\n\n for period in periods:\n start_cpr = 0\n end_cpr = 0\n period_duration = 0\n cpr_increment = 0\n period_curve = None\n\n if period == periods[-1]:\n end_period = True\n\n period_duration = nperiods + current_period\n words = period.strip().split(' ')\n\n for i in range(len(words)):\n if i == 0:\n start_cpr = float(words[i]) / 100.\n end_cpr = float(words[i]) / 100.\n elif words[i] == 'ramp':\n end_cpr = float(words[i + 1]) / 100.\n elif words[i] == 'for':\n period_duration = float(words[i + 1])\n\n period_curve = np.linspace(start_cpr, end_cpr, period_duration)\n\n cpr_curve.extend(list(period_curve))\n current_period += period_duration\n\n return cpr_curve", "def generate_payoff_range(self, price_array):\n\n payoffs = []\n for price in price_array:\n payoff = self.calc_payoff(price)\n payoffs.append(payoff)\n return np.array(payoffs)", "def forecast_table(self, past, ahead, inc=1):\n last_time = self[self.time_column][-1]\n past_times = self[self.time_column][-past-1:-1]\n fore_time = np.arange(last_time + inc, last_time + inc + ahead, inc)\n def project(lbl):\n m, b = np.polyfit(past_times, self[lbl][-past-1:-1], 1)\n return [m*time + b for time in fore_time]\n xtbl = Table().with_columns([(self.time_column, fore_time)] + [(label, project(label)) for label in self.categories])\n return self.copy().append(xtbl)", "def initialize_contracts(\n self, status=Status.Active, date=None, filter=None, nac=4, ncb=1, we_trade=True, initialize_data=False,\n as_dataframe=False, days_back=90, extra_days=True, data_download=False\n ):\n if date is None:\n date = dt.datetime.today().strftime('%Y-%m-%d')\n df = get_table(date, self.stem, self.future_type, status, we_trade=we_trade, nac=nac, ncb=ncb, data_download=data_download)\n # Set Class Variables\n assert not df.empty, 'Chain DataFrame is None for {} - {}, something went wrong!'.format(self.stem, self.future_type)\n self.status = status\n self.chain = df\n self.contracts = list(df['Ticker'])\n # Initialize Data\n if initialize_data:\n self.initialize_data(days_back=days_back, extra_days=extra_days)\n # Filtering\n if filter is not None:\n if isinstance(filter, int):\n if filter > len(df):\n self.log.error('Number of contracts: {} too high: {} - filter ignored!'.format(filter, len(df)))\n if filter > 0:\n df = df.head(filter)\n elif filter < 0:\n df = df.tail(abs(filter))\n elif len(filter) == 1 and filter.isalpha():\n # Filter based on month letter\n filter_df = df['Ticker'].str.contains(filter)\n if not filter_df.any():\n self.log.error('Filter ignored - check that the letter {} is valid!'.format(filter))\n else:\n df = df[filter_df]\n else:\n self.log.error('Filter {} not defined and ignored!')\n # This needs to be repeated... TODO: Find a better way to do this\n self.chain = df\n self.contracts = list(df['Ticker'])\n\n return self.chain if as_dataframe else self.contracts", "def makePayment(self):\n\t\tif len(self.paid) == self.teaserMonths + 1:\n\t\t\tself.rate = self.nextRate\n\t\t\tself.payment = calcPayment(self.owed[-1], self.rate, self.months - self.teaserMonths)\n\t\tMortgage.makePayment(self)", "def create_cashier():\r\n cashier_lst = []\r\n for cnt in range(1,5):\r\n cashier = Cashier(cnt,[],[])\r\n cashier_lst.append(cashier)\r\n\r\n return cashier_lst", "def create_cumulative_ifgs(ifgs_dc, ifg_dates_dc):\n import numpy as np\n from icasar.aux2 import baseline_from_names\n \n # 0: First make the ifgs, v1 that uses that has acquisition 0 to acquisition 0 as a row of 0 displacements at the start\n # ifgs_cum_0 = np.zeros((1, ifgs_dc.shape[1])) # 0 displacement on first acquistion\n # ifgs_cum = np.cumsum(ifgs_dc, axis = 0) # displacement to last date of each daisy chain interferogram. \n # ifgs_cum = np.vstack((ifgs_cum_0, ifgs_cum)) # combine so first acuqisiton has 0 displacmenent. \n # 0b: or ignores a0 to a0:\n ifgs_cum = np.cumsum(ifgs_dc, axis = 0) # displacement to last date of each daisy chain interferogram. \n\n \n # 1: then make the ifg dates. \n acq_0 = ifg_dates_dc[0][:8]\n #ifg_dates_cum = [f\"{acq_0}_{acq_0}\"]\n ifg_dates_cum = []\n for ifg_date_dc in ifg_dates_dc:\n ifg_dates_cum.append(f\"{acq_0}_{ifg_date_dc[9:]}\")\n \n return ifgs_cum, ifg_dates_cum", "def start_period(self, date, period):", "def create_new_LB_UB(stock, full_stock):\n\n bands = [30, 60, 90, 180, 360, 720, 1080]\n for b in bands:\n pcols = [\"Previous \" + str(b) + \" days LB\",\n \"Previous \" + str(b) + \" days UB\"]\n stock[pcols] = pd.DataFrame([[0]*len(pcols)], index=stock.index)\n for index, row in stock.iterrows():\n start = row['Date']\n# start = row['Date'] - datetime.timedelta(days=1)\n end = start - datetime.timedelta(days=b)\n specific_dates = full_stock[full_stock.Date.between(end, start)]\n low = specific_dates[\"Close Price\"].min()\n high = specific_dates[\"Close Price\"].max()\n today = row[\"Close Price\"]\n stock.loc[index, pcols] = [low/today, high/today]\n\n bands = [30, 60, 90, 180, 360, 720, 1080]\n for b in bands:\n ncols = [\"Next \" + str(b) + \" days LB\", \"Next \" + str(b) + \" days UB\"]\n stock[ncols] = pd.DataFrame([[0]*len(ncols)], index=stock.index)\n for index, row in stock.iterrows():\n start = row['Date']\n# start = row['Date'] + datetime.timedelta(days=1)\n end = start + datetime.timedelta(days=b)\n specific_dates = full_stock[full_stock.Date.between(start, end)]\n low = specific_dates[\"Close Price\"].min()\n high = specific_dates[\"Close Price\"].max()\n today = row[\"Close Price\"]\n stock.loc[index, ncols] = [low/today, high/today]\n return stock", "def append_gates_sequence_1(circ: Circuit) -> None:\n circ.YYPhase(0.2, 0, 1)\n circ.Ry(0.3, 1)\n circ.Rz(0.15, 0)", "def test_get_next_x_paydays_positive1(self):\n start_date = date_class(2022,11,3)\n x_number_of_paydays = 4\n expected_next_x_paydays = [\n date_class(2022,11,10),\n date_class(2022,11,25),\n date_class(2022,12,9),\n date_class(2022,12,23),\n ]\n\n next_x_paydays_list = self.pay_cycle.get_next_x_paydays(x_number_of_paydays, start_date)\n\n assert len(next_x_paydays_list) == x_number_of_paydays, \\\n f'Got {len(next_x_paydays_list)}, expected {x_number_of_paydays}'\n assert next_x_paydays_list == expected_next_x_paydays, \\\n f'Got {next_x_paydays_list}, expected {expected_next_x_paydays}'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a FixedRateACGB based on some minimal parameters which describe the bond.
def construct(first_coupon_pay_date: date, second_coupon_pay_date: date, maturity_date: date, coupon_rate: float, principle: float): coupons_a = FixedRateACGB.construct_yearly_coupon_series( first_coupon_pay_date, maturity_date, coupon_rate / 2.0, principle) coupons_b = FixedRateACGB.construct_yearly_coupon_series( second_coupon_pay_date, maturity_date, coupon_rate / 2.0, principle) return FixedRateACGB(principle, coupons_a + coupons_b, maturity_date)
[ "def make_periodic(*args):\n\tif len(args) == 0:\n\t\tnu.warn(\"No BGF file or instance specified.\")\n\t\treturn 0;\n\telif len(args) >= 1:\n\t\tmybgf = args[0]\n\t\tif isinstance(mybgf, str):\n\t\t\tmybgf = bgf.BgfFile(mybgf)\n\n\t\tmybgf.PERIOD = \"111\"\n\t\tmybgf.AXES = \"ZYX\"\n\t\tmybgf.SGNAME = \"P 1 1 1\"\n\t\tmybgf.CELLS = [-1, 1, -1, 1, -1, 1]\n\n\t\tif len(args) == 1:\n\t\t\tmybgf.CRYSTX = [0.0, 0.0, 0.0, 90.0, 90.0, 90.0]\n\t\t\tnu.warn(\"PBC set to [0.0, 0.0, 0.0, 90.0, 90.0, 90.0]\")\n\t\t\treturn mybgf\n\t\telif len(args) >= 2:\n\t\t\tif len(args[-1]) == 3:\n\t\t\t\tmybgf.CRYSTX = args[1] + [90.0, 90.0, 90.0]\n\t\t\telif len(args[-1]) == 6:\n\t\t\t\tmybgf.CRYSTX = args[1]\n\t\t\telse:\n\t\t\t\tnu.warn(\"Wrong pbc provided.\")\n\t\t\t\treturn 0;\n\t\t\tif len(args) == 2:\n\t\t\t\treturn mybgf\n\t\t\telif len(args) == 3:\n\t\t\t\tmybgf.saveBGF(args[1])\n\t\t\t\treturn 1;\n\t\t\telse:\n\t\t\t\tnu.warn(\"Too many parameters (>4) provided.\")\n\t\t\t\treturn 0;", "def newHBondConstraint(self, **attrlinks):\n return HBondConstraint(self, **attrlinks)", "def __init__(self, Bx, By, k, mant_bw, growing_oct=54, diminishing_oct=4):\n\n self.Bx = Bx\n self.By = By\n\n self.k = k\n self.mant_bw = mant_bw\n self.exp_bw = self.Bx - 2 - self.mant_bw # Width of exponent in floating point URNG output representation\n if self.exp_bw < 1:\n raise ValueError(\"Bx - 2 - mant_bw must be > 0\")\n self.growing_oct = growing_oct # max exp\n self.diminishing_oct = diminishing_oct # max exp\n if growing_oct > 2**self.exp_bw:\n raise ValueError(\"Number of growing octaves cannot exceed address space (2^exp_bw)\")\n if diminishing_oct > 2**self.exp_bw:\n raise ValueError(\"Number of diminishing octaves cannot exceed address space (2^exp_bw)\")\n\n self.min_x = 1 / (2 ** (self.growing_oct + 3))\n self.max_x = 0.5 - 1 / (2 ** (self.diminishing_oct + 3))\n self.quantisation_step = (self.laplace_inv_cdf(self.max_x) - self.laplace_inv_cdf(self.min_x)) / 2 ** self.By", "def build_bkg(self):\n try:\n self.param_bphi.x\n print(\"Bphi already built!\")\n except:\n self.calc_field()\n\n print(\"Build bkg\")\n\n R_temp = np.linspace(self.eqdsk.rboxleft, self.eqdsk.rboxleft+self.eqdsk.rboxlength+self.extend_psi_R, self.nR)\n z_temp = np.linspace(-self.eqdsk.zboxlength/2., self.eqdsk.zboxlength/2., self.nz)\n #R_temp = np.linspace(float(np.around(np.min(self.R_w), decimals=2)), float(np.around(np.max(self.R_w), decimals=2)), self.nR)\n #z_temp = np.linspace(float(np.around(np.min(self.z_w), decimals=2)), float(np.around(np.max(self.z_w), decimals=2)), self.nz)\n\n psitemp = self.psi_coeff(R_temp, z_temp)\n\n bphitemp = self.param_bphi(R_temp, z_temp)\n\n self.bkg={'type':'magn_bkg', 'phi0':0, 'nsector':0, 'nphi_per_sector':1,\\\n 'ncoil':0, 'zero_at_coil':1,\\\n 'R':R_temp,'z':z_temp, \\\n 'phimap_toroidal':0, 'phimap_poloidal':0, \\\n 'psi':[],\\\n 'Bphi':bphitemp, 'BR':self.Br, 'Bz':self.Bz, \\\n 'Bphi_pert':self.Bphi_pert, 'BR_pert':self.BR_pert, 'Bz_pert':self.Bz_pert} \n\n self.bkg['psi'] = psitemp*2*np.pi #in ASCOT Bfield, the psi is divided by 2*pi and reverses sign. This prevents it from happening \n print(\"remember: I am multiplying psi times 2pi since in ascot it divides by it!\")", "def bond_B(k):\n return (4-k) * 300000", "def PriceFloatingRateBondAddFloatingRateBond(builder, floatingRateBond):\n return AddFloatingRateBond(builder, floatingRateBond)", "def SBM(n, pi = [], B = [], seeds = [], weighted = False, dist = \"\", params = [], acorn = 1234):", "def from_dict(cls, arg: Dict):\n if not isinstance(arg, dict):\n raise TypeError(\"dict expected, got a {}\".format(type(arg)))\n\n keys = set(arg.keys())\n if not keys.issubset(Bond.fields()):\n raise KeyError(\"{}\".format(keys.difference(Bond.fields())))\n\n for value in [\"price\", \"ytm\"]:\n if value not in keys:\n arg[value] = None\n else:\n if not arg[value]:\n arg[value] = None\n\n if \"compounding_frequency\" not in keys:\n arg[\"compounding_frequency\"] = 2\n\n bond = cls(arg[\"par\"], arg[\"maturity_term\"],\n arg[\"coupon\"], arg[\"price\"],\n arg[\"ytm\"], arg[\"compounding_frequency\"])\n\n return bond", "def bond_parameters(self):\n return (self._default_r0, self._default_bond_k)", "def newHBondConstraintItem(self, **attrlinks):\n return HBondConstraintItem(self, **attrlinks)", "def create_bungie_object():\n return bun.BungieData(os.environ['BUNGIE_API_KEY'])", "def _prepare_ligand_BC(self):\n if self.data['BC'].protocol == []:\n\n # Set up the force field\n params_o = self.system.paramsFromAlpha(1.0, 'BC', site=False)\n self.system.setParams(params_o)\n\n # Get starting configurations\n basename = os.path.basename(self.args.FNs['score'])\n basename = basename[:basename.find('.')]\n dirname = os.path.dirname(self.args.FNs['score'])\n minimizedB_FN = os.path.join(dirname, basename + '_minB.nc')\n if os.path.isfile(minimizedB_FN):\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'r')\n minimizedConfigurations = [\n dock6_nc.variables['confs'][n][self.top.inv_prmtop_atom_order_L, :]\n for n in range(dock6_nc.variables['confs'].shape[0])\n ]\n Es = dict([(key, dock6_nc.variables[key][:])\n for key in dock6_nc.variables.keys() if key != 'confs'])\n dock6_nc.close()\n else:\n (minimizedConfigurations, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'w')\n dock6_nc.createDimension('n_confs', len(minimizedConfigurations))\n dock6_nc.createDimension('n_atoms', minimizedConfigurations[0].shape[0])\n dock6_nc.createDimension('n_cartesian', 3)\n dock6_nc.createDimension('one', 1)\n dock6_nc.createVariable('confs', 'f8', ('n_confs', 'n_atoms', 'n_cartesian'))\n for n in range(len(minimizedConfigurations)):\n dock6_nc.variables['confs'][n] = minimizedConfigurations[n][self.top.prmtop_atom_order_L, :]\n for key in Es.keys():\n dock6_nc.createVariable(key, 'f8', ('one', 'n_confs'))\n dock6_nc.variables[key][:] = Es[key]\n dock6_nc.close()\n\n # initializes smart darting for BC\n # and sets the universe to the lowest energy configuration\n self.iterator.initializeSmartDartingConfigurations(\n minimizedConfigurations, 'BC', self.log, self.data)\n if len(minimizedConfigurations) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, minimizedConfigurations[-1]))\n self.data['BC'].confs['starting_poses'] = minimizedConfigurations\n\n # Ramp the temperature from 0 to the desired starting temperature using HMC\n self._ramp_T(params_o['T'], normalize=True)\n\n # Run at starting temperature\n seeds = [np.copy(self.top.universe.configuration().array) \\\n for n in range(self.args.params['BC']['seeds_per_state'])]\n else:\n seeds = None\n return seeds", "def set_params(self, params):\n super().set_params(params)\n\n params = dict_to_namespace(params)\n self.params.name = getattr(params, 'name', 'BaxAcqFunction')\n self.params.acq_str = getattr(params, \"acq_str\", \"exe\")\n self.params.min_neighbors = getattr(params, \"min_neighbors\", 10)\n self.params.max_neighbors = getattr(params, \"max_neighbors\", 30)\n self.params.dist_thresh = getattr(params, \"dist_thresh\", 1.0)\n self.params.dist_thresh_init = getattr(params, \"dist_thresh_init\", 20.0)\n self.params.dist_thresh_inc = getattr(params, \"dist_thresh_inc\", 0.5)\n self.params.min_n_clust = getattr(params, \"min_n_clust\", 5)", "def testMASWWithGenericBond(self):\r\n bondCalendar = ql.TARGET()\r\n settlementDays = 3\r\n fixingDays = 2\r\n payFixedRate = True\r\n parAssetSwap = True\r\n mktAssetSwap = False\r\n inArrears = False\r\n\r\n ## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)\r\n ## maturity doesn't occur on a business day\r\n\r\n fixedBondStartDate1 = ql.Date(4, ql.January, 2005)\r\n fixedBondMaturityDate1 = ql.Date(4, ql.January, 2037)\r\n fixedBondSchedule1 = ql.Schedule(\r\n fixedBondStartDate1,\r\n fixedBondMaturityDate1,\r\n ql.Period(ql.Annual),\r\n bondCalendar,\r\n ql.Unadjusted,\r\n ql.Unadjusted,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n fixedBondLeg1 = list(\r\n ql.FixedRateLeg(fixedBondSchedule1, ql.ActualActual(ql.ActualActual.ISDA), [self.faceAmount], [0.04])\r\n )\r\n fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1, ql.Following)\r\n fixedBondLeg1.append(ql.SimpleCashFlow(100.0, fixedbondRedemption1))\r\n fixedBond1 = ql.Bond(\r\n settlementDays, bondCalendar, self.faceAmount, fixedBondMaturityDate1, fixedBondStartDate1, fixedBondLeg1\r\n )\r\n bondEngine = ql.DiscountingBondEngine(self.termStructure)\r\n swapEngine = ql.DiscountingSwapEngine(self.termStructure, False)\r\n fixedBond1.setPricingEngine(bondEngine)\r\n\r\n fixedBondMktPrice1 = 89.22 ## market price observed on 7th June 2007\r\n fixedBondMktFullPrice1 = fixedBondMktPrice1 + fixedBond1.accruedAmount()\r\n fixedBondParAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n fixedBond1,\r\n fixedBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n fixedBondParAssetSwap1.setPricingEngine(swapEngine)\r\n fixedBondParAssetSwapSpread1 = fixedBondParAssetSwap1.fairSpread()\r\n fixedBondMktAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n fixedBond1,\r\n fixedBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n fixedBondMktAssetSwap1.setPricingEngine(swapEngine)\r\n fixedBondMktAssetSwapSpread1 = fixedBondMktAssetSwap1.fairSpread()\r\n\r\n tolerance = 1.0e-13\r\n error1 = abs(fixedBondMktAssetSwapSpread1 - 100 * fixedBondParAssetSwapSpread1 / fixedBondMktFullPrice1)\r\n\r\n self.assertFalse(\r\n error1 > tolerance,\r\n \"wrong asset swap spreads for fixed bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(fixedBondMktAssetSwapSpread1)\r\n + \"\\n par asset swap spread: \"\r\n + str(fixedBondParAssetSwapSpread1)\r\n + \"\\n error: \"\r\n + str(error1)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)\r\n ## maturity occurs on a business day\r\n\r\n fixedBondStartDate2 = ql.Date(5, ql.February, 2005)\r\n fixedBondMaturityDate2 = ql.Date(5, ql.February, 2019)\r\n fixedBondSchedule2 = ql.Schedule(\r\n fixedBondStartDate2,\r\n fixedBondMaturityDate2,\r\n ql.Period(ql.Annual),\r\n bondCalendar,\r\n ql.Unadjusted,\r\n ql.Unadjusted,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n fixedBondLeg2 = list(\r\n ql.FixedRateLeg(fixedBondSchedule2, ql.Thirty360(ql.Thirty360.BondBasis), [self.faceAmount], [0.05])\r\n )\r\n fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2, ql.Following)\r\n fixedBondLeg2.append(ql.SimpleCashFlow(100.0, fixedbondRedemption2))\r\n fixedBond2 = ql.Bond(\r\n settlementDays, bondCalendar, self.faceAmount, fixedBondMaturityDate2, fixedBondStartDate2, fixedBondLeg2\r\n )\r\n fixedBond2.setPricingEngine(bondEngine)\r\n\r\n fixedBondMktPrice2 = 99.98 ## market price observed on 7th June 2007\r\n fixedBondMktFullPrice2 = fixedBondMktPrice2 + fixedBond2.accruedAmount()\r\n fixedBondParAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n fixedBond2,\r\n fixedBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n fixedBondParAssetSwap2.setPricingEngine(swapEngine)\r\n fixedBondParAssetSwapSpread2 = fixedBondParAssetSwap2.fairSpread()\r\n fixedBondMktAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n fixedBond2,\r\n fixedBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n fixedBondMktAssetSwap2.setPricingEngine(swapEngine)\r\n fixedBondMktAssetSwapSpread2 = fixedBondMktAssetSwap2.fairSpread()\r\n error2 = abs(fixedBondMktAssetSwapSpread2 - 100 * fixedBondParAssetSwapSpread2 / fixedBondMktFullPrice2)\r\n\r\n self.assertFalse(\r\n error2 > tolerance,\r\n \"wrong asset swap spreads for fixed bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(fixedBondMktAssetSwapSpread2)\r\n + \"\\n par asset swap spread: \"\r\n + str(fixedBondParAssetSwapSpread2)\r\n + \"\\n error: \"\r\n + str(error2)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)\r\n ## maturity doesn't occur on a business day\r\n\r\n floatingBondStartDate1 = ql.Date(29, ql.September, 2003)\r\n floatingBondMaturityDate1 = ql.Date(29, ql.September, 2013)\r\n floatingBondSchedule1 = ql.Schedule(\r\n floatingBondStartDate1,\r\n floatingBondMaturityDate1,\r\n ql.Period(ql.Semiannual),\r\n bondCalendar,\r\n ql.Unadjusted,\r\n ql.Unadjusted,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n floatingBondLeg1 = list(\r\n ql.IborLeg(\r\n [self.faceAmount],\r\n floatingBondSchedule1,\r\n self.iborIndex,\r\n ql.Actual360(),\r\n ql.Following,\r\n [fixingDays],\r\n [],\r\n [0.0056],\r\n [],\r\n [],\r\n inArrears,\r\n )\r\n )\r\n floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, ql.Following)\r\n floatingBondLeg1.append(ql.SimpleCashFlow(100.0, floatingbondRedemption1))\r\n floatingBond1 = ql.Bond(\r\n settlementDays,\r\n bondCalendar,\r\n self.faceAmount,\r\n floatingBondMaturityDate1,\r\n floatingBondStartDate1,\r\n floatingBondLeg1,\r\n )\r\n floatingBond1.setPricingEngine(bondEngine)\r\n\r\n ql.setCouponPricer(floatingBond1.cashflows(), self.pricer)\r\n self.iborIndex.addFixing(ql.Date(27, ql.March, 2007), 0.0402)\r\n ## market price observed on 7th June 2007\r\n floatingBondMktPrice1 = 101.64\r\n floatingBondMktFullPrice1 = floatingBondMktPrice1 + floatingBond1.accruedAmount()\r\n floatingBondParAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n floatingBond1,\r\n floatingBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n floatingBondParAssetSwap1.setPricingEngine(swapEngine)\r\n floatingBondParAssetSwapSpread1 = floatingBondParAssetSwap1.fairSpread()\r\n floatingBondMktAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n floatingBond1,\r\n floatingBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n floatingBondMktAssetSwap1.setPricingEngine(swapEngine)\r\n floatingBondMktAssetSwapSpread1 = floatingBondMktAssetSwap1.fairSpread()\r\n error3 = abs(\r\n floatingBondMktAssetSwapSpread1 - 100 * floatingBondParAssetSwapSpread1 / floatingBondMktFullPrice1\r\n )\r\n\r\n self.assertFalse(\r\n error3 > tolerance,\r\n \"wrong asset swap spreads for floating bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(floatingBondMktAssetSwapSpread1)\r\n + \"\\n par asset swap spread: \"\r\n + str(floatingBondParAssetSwapSpread1)\r\n + \"\\n error: \"\r\n + str(error3)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)\r\n ## maturity occurs on a business day\r\n\r\n floatingBondStartDate2 = ql.Date(24, ql.September, 2004)\r\n floatingBondMaturityDate2 = ql.Date(24, ql.September, 2018)\r\n floatingBondSchedule2 = ql.Schedule(\r\n floatingBondStartDate2,\r\n floatingBondMaturityDate2,\r\n ql.Period(ql.Semiannual),\r\n bondCalendar,\r\n ql.ModifiedFollowing,\r\n ql.ModifiedFollowing,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n floatingBondLeg2 = list(\r\n ql.IborLeg(\r\n [self.faceAmount],\r\n floatingBondSchedule2,\r\n self.iborIndex,\r\n ql.Actual360(),\r\n ql.ModifiedFollowing,\r\n [fixingDays],\r\n [],\r\n [0.0025],\r\n [],\r\n [],\r\n inArrears,\r\n )\r\n )\r\n floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ql.ModifiedFollowing)\r\n floatingBondLeg2.append(ql.SimpleCashFlow(100.0, floatingbondRedemption2))\r\n floatingBond2 = ql.Bond(\r\n settlementDays,\r\n bondCalendar,\r\n self.faceAmount,\r\n floatingBondMaturityDate2,\r\n floatingBondStartDate2,\r\n floatingBondLeg2,\r\n )\r\n floatingBond2.setPricingEngine(bondEngine)\r\n\r\n ql.setCouponPricer(floatingBond2.cashflows(), self.pricer)\r\n self.iborIndex.addFixing(ql.Date(22, ql.March, 2007), 0.04013)\r\n ## market price observed on 7th June 2007\r\n floatingBondMktPrice2 = 101.248\r\n floatingBondMktFullPrice2 = floatingBondMktPrice2 + floatingBond2.accruedAmount()\r\n floatingBondParAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n floatingBond2,\r\n floatingBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n floatingBondParAssetSwap2.setPricingEngine(swapEngine)\r\n floatingBondParAssetSwapSpread2 = floatingBondParAssetSwap2.fairSpread()\r\n floatingBondMktAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n floatingBond2,\r\n floatingBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n floatingBondMktAssetSwap2.setPricingEngine(swapEngine)\r\n floatingBondMktAssetSwapSpread2 = floatingBondMktAssetSwap2.fairSpread()\r\n error4 = abs(\r\n floatingBondMktAssetSwapSpread2 - 100 * floatingBondParAssetSwapSpread2 / floatingBondMktFullPrice2\r\n )\r\n\r\n self.assertFalse(\r\n error4 > tolerance,\r\n \"wrong asset swap spreads for floating bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(floatingBondMktAssetSwapSpread2)\r\n + \"\\n par asset swap spread: \"\r\n + str(floatingBondParAssetSwapSpread2)\r\n + \"\\n error: \"\r\n + str(error4)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)\r\n ## maturity doesn't occur on a business day\r\n\r\n cmsBondStartDate1 = ql.Date(22, ql.August, 2005)\r\n cmsBondMaturityDate1 = ql.Date(22, ql.August, 2020)\r\n cmsBondSchedule1 = ql.Schedule(\r\n cmsBondStartDate1,\r\n cmsBondMaturityDate1,\r\n ql.Period(ql.Annual),\r\n bondCalendar,\r\n ql.Unadjusted,\r\n ql.Unadjusted,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n cmsBondLeg1 = list(\r\n ql.CmsLeg(\r\n [self.faceAmount],\r\n cmsBondSchedule1,\r\n self.swapIndex,\r\n ql.Thirty360(),\r\n ql.Following,\r\n [fixingDays],\r\n [],\r\n [],\r\n [0.055],\r\n [0.025],\r\n inArrears,\r\n )\r\n )\r\n cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1, ql.Following)\r\n cmsBondLeg1.append(ql.SimpleCashFlow(100.0, cmsbondRedemption1))\r\n cmsBond1 = ql.Bond(\r\n settlementDays, bondCalendar, self.faceAmount, cmsBondMaturityDate1, cmsBondStartDate1, cmsBondLeg1\r\n )\r\n cmsBond1.setPricingEngine(bondEngine)\r\n\r\n ql.setCouponPricer(cmsBond1.cashflows(), self.cmspricer)\r\n self.swapIndex.addFixing(ql.Date(18, ql.August, 2006), 0.04158)\r\n cmsBondMktPrice1 = 88.45 ## market price observed on 7th June 2007\r\n cmsBondMktFullPrice1 = cmsBondMktPrice1 + cmsBond1.accruedAmount()\r\n cmsBondParAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n cmsBond1,\r\n cmsBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n cmsBondParAssetSwap1.setPricingEngine(swapEngine)\r\n cmsBondParAssetSwapSpread1 = cmsBondParAssetSwap1.fairSpread()\r\n cmsBondMktAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n cmsBond1,\r\n cmsBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n cmsBondMktAssetSwap1.setPricingEngine(swapEngine)\r\n cmsBondMktAssetSwapSpread1 = cmsBondMktAssetSwap1.fairSpread()\r\n error5 = abs(cmsBondMktAssetSwapSpread1 - 100 * cmsBondParAssetSwapSpread1 / cmsBondMktFullPrice1)\r\n\r\n self.assertFalse(\r\n error5 > tolerance,\r\n \"wrong asset swap spreads for cms bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(cmsBondMktAssetSwapSpread1)\r\n + \"\\n par asset swap spread: \"\r\n + str(100 * cmsBondParAssetSwapSpread1 / cmsBondMktFullPrice1)\r\n + \"\\n error: \"\r\n + str(error5)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)\r\n ## maturity occurs on a business day\r\n\r\n cmsBondStartDate2 = ql.Date(6, ql.May, 2005)\r\n cmsBondMaturityDate2 = ql.Date(6, ql.May, 2015)\r\n cmsBondSchedule2 = ql.Schedule(\r\n cmsBondStartDate2,\r\n cmsBondMaturityDate2,\r\n ql.Period(ql.Annual),\r\n bondCalendar,\r\n ql.Unadjusted,\r\n ql.Unadjusted,\r\n ql.DateGeneration.Backward,\r\n False,\r\n )\r\n cmsBondLeg2 = list(\r\n ql.CmsLeg(\r\n [self.faceAmount],\r\n cmsBondSchedule2,\r\n self.swapIndex,\r\n ql.Thirty360(),\r\n ql.Following,\r\n [fixingDays],\r\n [0.84],\r\n [],\r\n [],\r\n [],\r\n inArrears,\r\n )\r\n )\r\n cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2, ql.Following)\r\n cmsBondLeg2.append(ql.SimpleCashFlow(100.0, cmsbondRedemption2))\r\n cmsBond2 = ql.Bond(\r\n settlementDays, bondCalendar, self.faceAmount, cmsBondMaturityDate2, cmsBondStartDate2, cmsBondLeg2\r\n )\r\n cmsBond2.setPricingEngine(bondEngine)\r\n\r\n ql.setCouponPricer(cmsBond2.cashflows(), self.cmspricer)\r\n self.swapIndex.addFixing(ql.Date(4, ql.May, 2006), 0.04217)\r\n cmsBondMktPrice2 = 94.08 ## market price observed on 7th June 2007\r\n cmsBondMktFullPrice2 = cmsBondMktPrice2 + cmsBond2.accruedAmount()\r\n cmsBondParAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n cmsBond2,\r\n cmsBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n cmsBondParAssetSwap2.setPricingEngine(swapEngine)\r\n cmsBondParAssetSwapSpread2 = cmsBondParAssetSwap2.fairSpread()\r\n cmsBondMktAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n cmsBond2,\r\n cmsBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n cmsBondMktAssetSwap2.setPricingEngine(swapEngine)\r\n cmsBondMktAssetSwapSpread2 = cmsBondMktAssetSwap2.fairSpread()\r\n error6 = abs(cmsBondMktAssetSwapSpread2 - 100 * cmsBondParAssetSwapSpread2 / cmsBondMktFullPrice2)\r\n\r\n self.assertFalse(\r\n error6 > tolerance,\r\n \"wrong asset swap spreads for cms bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(cmsBondMktAssetSwapSpread2)\r\n + \"\\n par asset swap spread: \"\r\n + str(cmsBondParAssetSwapSpread2)\r\n + \"\\n error: \"\r\n + str(error6)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)\r\n ## maturity doesn't occur on a business day\r\n\r\n zeroCpnBondStartDate1 = ql.Date(19, ql.December, 1985)\r\n zeroCpnBondMaturityDate1 = ql.Date(20, ql.December, 2015)\r\n zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1, ql.Following)\r\n zeroCpnBondLeg1 = ql.Leg([ql.SimpleCashFlow(100.0, zeroCpnBondRedemption1)])\r\n zeroCpnBond1 = ql.Bond(\r\n settlementDays,\r\n bondCalendar,\r\n self.faceAmount,\r\n zeroCpnBondMaturityDate1,\r\n zeroCpnBondStartDate1,\r\n zeroCpnBondLeg1,\r\n )\r\n zeroCpnBond1.setPricingEngine(bondEngine)\r\n\r\n ## market price observed on 12th June 2007\r\n zeroCpnBondMktPrice1 = 70.436\r\n zeroCpnBondMktFullPrice1 = zeroCpnBondMktPrice1 + zeroCpnBond1.accruedAmount()\r\n zeroCpnBondParAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n zeroCpnBond1,\r\n zeroCpnBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n zeroCpnBondParAssetSwap1.setPricingEngine(swapEngine)\r\n zeroCpnBondParAssetSwapSpread1 = zeroCpnBondParAssetSwap1.fairSpread()\r\n zeroCpnBondMktAssetSwap1 = ql.AssetSwap(\r\n payFixedRate,\r\n zeroCpnBond1,\r\n zeroCpnBondMktPrice1,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n zeroCpnBondMktAssetSwap1.setPricingEngine(swapEngine)\r\n zeroCpnBondMktAssetSwapSpread1 = zeroCpnBondMktAssetSwap1.fairSpread()\r\n error7 = abs(zeroCpnBondMktAssetSwapSpread1 - 100 * zeroCpnBondParAssetSwapSpread1 / zeroCpnBondMktFullPrice1)\r\n\r\n self.assertFalse(\r\n error7 > tolerance,\r\n \"wrong asset swap spreads for zero cpn bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(zeroCpnBondMktAssetSwapSpread1)\r\n + \"\\n par asset swap spread: \"\r\n + str(zeroCpnBondParAssetSwapSpread1)\r\n + \"\\n error: \"\r\n + str(error7)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )\r\n\r\n ## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)\r\n ## maturity occurs on a business day\r\n\r\n zeroCpnBondStartDate2 = ql.Date(17, ql.February, 1998)\r\n zeroCpnBondMaturityDate2 = ql.Date(17, ql.February, 2028)\r\n zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2, ql.Following)\r\n zeroCpnBondLeg2 = ql.Leg([ql.SimpleCashFlow(100.0, zerocpbondRedemption2)])\r\n zeroCpnBond2 = ql.Bond(\r\n settlementDays,\r\n bondCalendar,\r\n self.faceAmount,\r\n zeroCpnBondMaturityDate2,\r\n zeroCpnBondStartDate2,\r\n zeroCpnBondLeg2,\r\n )\r\n zeroCpnBond2.setPricingEngine(bondEngine)\r\n\r\n ## zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()\r\n ## market price observed on 12th June 2007\r\n zeroCpnBondMktPrice2 = 35.160\r\n zeroCpnBondMktFullPrice2 = zeroCpnBondMktPrice2 + zeroCpnBond2.accruedAmount()\r\n zeroCpnBondParAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n zeroCpnBond2,\r\n zeroCpnBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n parAssetSwap,\r\n )\r\n zeroCpnBondParAssetSwap2.setPricingEngine(swapEngine)\r\n zeroCpnBondParAssetSwapSpread2 = zeroCpnBondParAssetSwap2.fairSpread()\r\n zeroCpnBondMktAssetSwap2 = ql.AssetSwap(\r\n payFixedRate,\r\n zeroCpnBond2,\r\n zeroCpnBondMktPrice2,\r\n self.iborIndex,\r\n self.spread,\r\n ql.Schedule(),\r\n self.iborIndex.dayCounter(),\r\n mktAssetSwap,\r\n )\r\n zeroCpnBondMktAssetSwap2.setPricingEngine(swapEngine)\r\n zeroCpnBondMktAssetSwapSpread2 = zeroCpnBondMktAssetSwap2.fairSpread()\r\n error8 = abs(zeroCpnBondMktAssetSwapSpread2 - 100 * zeroCpnBondParAssetSwapSpread2 / zeroCpnBondMktFullPrice2)\r\n\r\n self.assertFalse(\r\n error8 > tolerance,\r\n \"wrong asset swap spreads for zero cpn bond:\"\r\n + \"\\n market asset swap spread: \"\r\n + str(zeroCpnBondMktAssetSwapSpread2)\r\n + \"\\n par asset swap spread: \"\r\n + str(zeroCpnBondParAssetSwapSpread2)\r\n + \"\\n error: \"\r\n + str(error8)\r\n + \"\\n tolerance: \"\r\n + str(tolerance),\r\n )", "def fix_create_bills(self, fake_billed_org):\n bill_1 = BillFactory(\n generated_by=fake_billed_org.billed_by,\n period_start=weeks_ago(7)(),\n period_end=weeks_ago(6)(),\n )\n bill_2 = BillFactory(\n generated_by=fake_billed_org.billed_by,\n period_start=weeks_ago(6)(),\n period_end=weeks_ago(5)(),\n )\n\n return bill_1, bill_2", "def __init__(self, name, smarts, bonds=..., charges=..., radicals=...) -> None:\n ...", "def get_constant_bn(self, t_slice=0):\n from pgmpy.models import BayesianNetwork\n\n edges = [\n (\n str(u[0]) + \"_\" + str(u[1] + t_slice),\n str(v[0]) + \"_\" + str(v[1] + t_slice),\n )\n for u, v in self.edges()\n ]\n new_cpds = []\n for cpd in self.cpds:\n new_vars = [\n str(var) + \"_\" + str(time + t_slice) for var, time in cpd.variables\n ]\n new_cpds.append(\n TabularCPD(\n variable=new_vars[0],\n variable_card=cpd.cardinality[0],\n values=cpd.get_values(),\n evidence=new_vars[1:],\n evidence_card=cpd.cardinality[1:],\n )\n )\n\n bn = BayesianNetwork(edges)\n bn.add_cpds(*new_cpds)\n return bn", "def __init__(self, d: int = 8):\n # Arge and Vitter distinguish between the branching parameter `a`\n # and a leaf parameter `k`. Following Bender et al., we use k = 1.\n if d <= 4:\n raise ValueError('Balance factor must be >4.')\n self.d = d\n self.deleted = 0\n self.root: WBBNode[K, V] = WBBNode(d=d)", "def bond_A(k):\n return (4-k) * 600000" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Breaks an AudioSegment into chunks that are milliseconds long. if chunk_length is 50 then you'll get a list of 50 millisecond long audio segments back (except the last one, which can be shorter)
def make_chunks(audio_segment, chunk_length): number_of_chunks = ceil(len(audio_segment) / float(chunk_length)) return [audio_segment[i * chunk_length:(i + 1) * chunk_length] for i in range(int(number_of_chunks))]
[ "def chunk_audio_pieces(self, pieces, chunk_size):\n left_over = np.array([])\n for piece in pieces:\n if left_over.size == 0:\n combined = piece\n else:\n combined = np.concatenate([left_over, piece], axis=-1)\n for chunk in chunk_audio(combined.T, chunk_size):\n yield chunk.T\n left_over = piece[-(len(piece) % chunk_size):]", "def splitSong(songSegment, start, finish):\n\t# First we make sure that the start time is less than the finish time.\n\tif(finish < start):\n\t\t(start, finish) = (finish, start)\n\t\n\t# Next we check that the song segment is at least 20 seconds long in total.\n\tif(len(songSegment) < 20000):\n\t\treturn songSegment\n\t\n\t# Then we range check the start and stop.\n\telse if(start*1000 >= len(songSegment) or finish*1000 >= len(songSegment)):\n\t\treturn songSegment[len(songSegment)-20000:]\n\t\n\t# Finally we return the selection.\n\telse:\n\t\treturn songSegment[start*1000:finish*1000]", "def split_mp3_file(filename, seconds_to_split):\n\n for split in range(0, 10):\n start = seconds_to_split * (split) * 1000\n end = seconds_to_split * (split + 1) * 1000\n split_song = AudioSegment.from_file(filename, format=\"mp3\")\n splote = split_song[start:end]\n splote.export(str(split) + '-' + filename, format='mp3')", "def chunks(data_list, chunk_size):\n data_info, frequency, bits = data_list\n\n some_data_list = []\n for i in range(0, len(data_info), chunk_size):\n some_data_list.append(data_info[i:i+chunk_size])\n return some_data_list", "def split_on_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, keep_silence=100,\n seek_step=1):\n\n not_silence_ranges = detect_nonsilent(audio_segment, min_silence_len=min_silence_len, silence_thresh=silence_thresh)\n\n chunks = []\n start_points = []\n for start_i, end_i in not_silence_ranges:\n start_i = max(0, start_i - keep_silence)\n end_i += keep_silence\n start_points.append(start_i)\n chunks.append(audio_segment[start_i:end_i])\n\n return chunks, start_points", "def get_audio_chunks(audio, output_folder='audio_chunks'):\n # open the audio file using pydub\n sound = AudioSegment.from_wav(audio)\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 1000,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = output_folder\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n# whole_text = \"\"\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")", "def split_sample(sample, length, n_samples=None):\n (time_steps, pitch_level) = sample.shape\n if n_samples == None:\n n_samples = int(time_steps / length)\n samples = np.zeros((n_samples, length, pitch_level))\n max_start = time_steps - length\n for i in range(0, n_samples):\n start = int(i * max_start / n_samples)\n end = start + length\n samples[i] = sample[start:end, :]\n return samples", "def split(signal, width, step, fs):\n if width <= 0 or step <= 0:\n raise ValueError()\n\n frames = []\n\n step_len = int(step/1000 * fs)\n width_len = int(width/1000 * fs)\n\n if width_len <= 0 or step_len <= 0 or width_len > len(signal):\n raise ValueError(f'{width_len=}, {step_len=}, {width_len > len(signal)=}')\n\n for i in range(0, len(signal), step_len):\n f = signal[i:i + width_len]\n if len(f) != width_len:\n break\n frames.append(f)\n\n return np.array(frames)", "def get_class_batch(dataset,clss, chunk_size):\n wav_list = dataset[clss]\n # for each wav, truncate the end if shorter than chunk_size and split\n wav_chunks = np.stack([w[x:x+chunk_size] for w in wav_list\n for x in xrange(0,\n w.shape[0]-(w.shape[0]%chunk_size),\n chunk_size)])\n return wav_chunks", "def split_on_silence_threshold(wav_file):\n abs_path = os.path.dirname(wav_file)\n dest_dir = os.path.join(abs_path, \"custom_split\")\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.info(\"Splitting started: \" + wav_file) \n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n print(\"Splitting into silence chunks\", file=sys.stderr)\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def extract_chunks(data):\n silence = data[0] == 0\n\n if silence:\n # Track [onset sample number, length] for each silent chunk\n silences = [[0, 0]]\n nonsilences = []\n else:\n # Track (onset sample number, amplitudes) for each nonsilent chunk\n nonsilences = [(0, [])]\n silences = []\n\n for i, amp in enumerate(data):\n if amp > 0:\n # Entering a new nonsilent chunk\n if silence:\n silence = False\n nonsilences.append((i, [amp]))\n else:\n nonsilences[-1][1].append(amp)\n else:\n # Entering a new silent chunk\n if not silence:\n silence = True\n silences.append([i, 1])\n else:\n silences[-1][1] += 1\n return silences, nonsilences", "def _chunking(ds, dim=\"time\", number_chunks=False, chunk_length=False):\n if number_chunks and not chunk_length:\n chunk_length = np.floor(ds[dim].size / number_chunks)\n cmin = int(ds[dim].min())\n elif not number_chunks and chunk_length:\n cmin = int(ds[dim].min())\n number_chunks = int(np.floor(ds[dim].size / chunk_length))\n else:\n raise KeyError(\"set number_chunks or chunk_length to True\")\n c = ds.sel({dim: slice(cmin, cmin + chunk_length - 1)})\n c = c.expand_dims(\"c\")\n c[\"c\"] = [0]\n for i in range(1, number_chunks):\n c2 = ds.sel(\n {dim: slice(cmin + chunk_length * i, cmin + (i + 1) * chunk_length - 1)}\n )\n c2 = c2.expand_dims(\"c\")\n c2[\"c\"] = [i]\n c2[dim] = c[dim]\n c = xr.concat([c, c2], \"c\")\n return c", "def chunk_sound(bits):\n global buffer\n buffer = np.append(buffer, bits)\n abs_buffer = np.absolute(buffer)\n # Keep accumulating if not enough silence has been detected\n if len(buffer) <= SILENCE_FRAME_THRESHOLD:\n return np.array([])\n # If enough silence, clear the buffer\n last_timespan = abs_buffer[-SILENCE_FRAME_THRESHOLD:]\n if np.average(last_timespan) < SILENCE_AVR_THRESHOLD:\n # If there is enough sound, return it\n if np.average(abs_buffer) >= OVERALL_THRESHOLD:\n result = buffer\n buffer = np.array([])\n return result\n buffer = np.array([])\n return np.array([])", "def _chunking(binned_pair, size_chunks, max_lag, best_lag):\n\n length = len(binned_pair[0], )\n\n # number of chunks\n n_chunks = math.ceil((length - max_lag) / size_chunks)\n\n # new chunk size, this is to have all chunks of roughly the same size\n size_chunks = math.floor((length - max_lag) / n_chunks)\n\n n_chunks = int(n_chunks)\n size_chunks = int(size_chunks)\n\n chunked = [[[], []] for _ in range(n_chunks)]\n\n # cut the time series according to best_lag\n\n binned_pair_cut = np.array([np.zeros(length - max_lag, dtype=int),\n np.zeros(length - max_lag, dtype=int)])\n\n # choose which entries to consider according to the best lag chosen\n if best_lag == 0:\n binned_pair_cut[0] = binned_pair[0][0:length - max_lag]\n binned_pair_cut[1] = binned_pair[1][0:length - max_lag]\n elif best_lag > 0:\n binned_pair_cut[0] = binned_pair[0][0:length - max_lag]\n binned_pair_cut[1] = binned_pair[1][\n best_lag:length - max_lag + best_lag]\n else:\n binned_pair_cut[0] = binned_pair[0][\n -best_lag:length - max_lag - best_lag]\n binned_pair_cut[1] = binned_pair[1][0:length - max_lag]\n\n # put the cut data into the chunked object\n for iii in range(n_chunks - 1):\n chunked[iii][0] = binned_pair_cut[0][\n size_chunks * iii:size_chunks * (iii + 1)]\n chunked[iii][1] = binned_pair_cut[1][\n size_chunks * iii:size_chunks * (iii + 1)]\n\n # last chunk can be of slightly different size\n chunked[n_chunks - 1][0] = binned_pair_cut[0][\n size_chunks * (n_chunks - 1):length]\n chunked[n_chunks - 1][1] = binned_pair_cut[1][\n size_chunks * (n_chunks - 1):length]\n\n return chunked, n_chunks", "def split_audio(path, audio_file, logfile):\n \n # Parse logfile\n log = pd.read_csv(logfile)\n rel_time = log['Relative Time'][2:].tolist()\n rel_time.pop(1)\n for i in range(0, len(rel_time) - 1):\n t1 = timestring_to_seconds(rel_time[i])\n t2 = timestring_to_seconds(rel_time[i+1])\n\n t1 = t1 * 1000\n t2 = t2 * 1000\n audio = AudioSegment.from_wav(audio_file)\n sliced = audio[t1:t2]\n source = \"computer\" if \"computer\" in audio_file else \"watch\"\n basename = os.path.basename(audio_file)\n basename = os.path.splitext(basename)[0]\n sliced.export(f\"{path}/parts/{source}/{basename}_{i}.wav\", format=\"wav\")", "def chunks(seq, num):\n\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out", "def calculate_segment_nframes(path, segment_len):\n\n wave_reader, wave_params = read_audio(path)\n window_nframes = int(wave_params.framerate * 0.01) # every window last 0.01 second\n segment_nframes = int(wave_params.framerate * segment_len)\n\n # switch every window by 0.01 second\n # save the frame index of middle of the window to frame_list\n # save maximum value of the window to max_list\n frame = 0\n frame_list, max_list = [], []\n while True:\n if frame >= wave_params.nframes:\n break\n fragment = wave_reader.readframes(window_nframes)\n frame_list.append(min(int(frame + window_nframes / 2),\n wave_params.nframes))\n max_list.append(audioop.max(fragment, wave_params.sampwidth))\n frame += window_nframes\n wave_reader.close()\n\n # calculate the threshold by 30 percentile\n max_list_sorted = sorted(max_list)\n threshold = max_list_sorted[int(len(max_list_sorted) * 30. / 100)]\n\n # calculate how many previous windows have maximum values smaller than threshold\n continuous = 0\n continuous_list = []\n for max_val in max_list:\n if max_val < threshold:\n continuous += 1\n else:\n continuous = 0\n continuous_list.append(continuous)\n\n # find frame numbers of breakpoints\n breakpoint_frame_list = []\n while True:\n frame_min = frame_list[0]\n frame_max = frame_min + segment_nframes - window_nframes\n if frame_list[-1] <= frame_max:\n break\n\n for index, frame in enumerate(frame_list):\n if frame > frame_max:\n continuous_max_value = max(continuous_list[:index])\n continuous_max_index = continuous_list.index(continuous_max_value)\n for i in range(continuous_max_index + 1):\n continuous_list[i] = 0\n\n continuous_max_index = int(continuous_max_index - (continuous_max_value - 1) / 2)\n breakpoint_frame_list.append(frame_list[continuous_max_index])\n frame_list = frame_list[continuous_max_index + 1:]\n continuous_list = continuous_list[continuous_max_index + 1:]\n break\n\n # remove too close breakpoints\n i = 1\n while True:\n if len(breakpoint_frame_list) < 2 or i >= len(breakpoint_frame_list):\n break\n if i == 1:\n if breakpoint_frame_list[i] < segment_nframes:\n del breakpoint_frame_list[0]\n else:\n i += 1\n else:\n if breakpoint_frame_list[i] - breakpoint_frame_list[i - 2] < segment_nframes:\n del breakpoint_frame_list[i - 1]\n else:\n i += 1\n\n # calculate nframes_list\n segment_nframes_list = []\n if len(breakpoint_frame_list) > 0:\n segment_nframes_list.append(breakpoint_frame_list[0])\n for i in range(1, len(breakpoint_frame_list)):\n segment_nframes_list.append(breakpoint_frame_list[i] - breakpoint_frame_list[i - 1])\n if len(breakpoint_frame_list) == 0 or breakpoint_frame_list[-1] < wave_params.nframes:\n segment_nframes_list.append(segment_nframes)\n return segment_nframes_list", "def chunk_packets(packets: Iterable[av.Packet], size: int) -> Iterator[List[av.Packet]]:\n chunk = []\n chunk_bytes = 0\n # group packets by into consecutive keyframes and inbetweens\n for is_keyframe, group in groupby(packets, key=attrgetter(\"is_keyframe\")):\n # add all the group packets into the current chunk\n for packet in group:\n if packet.buffer_size:\n chunk.append(packet)\n chunk_bytes += packet.buffer_size\n\n # yield a chunk when:\n # - it doesn't end in a keyframe (it may be needed for the following inbetweens)\n # - and its total size is at least `size`\n if not is_keyframe and chunk_bytes >= size:\n yield chunk\n chunk = []\n chunk_bytes = 0\n\n # yield the last chunk regardless of keyframe/size if non-empty\n if chunk:\n yield chunk", "def chunk_queue(dir_in=\"../audio/chunk_queue\",\n dir_out=\"../audio/wav_chunked\",\n chunk_len=5,\n sr=22050,\n log=True\n ):\n \n for root, dirs, files in os.walk(dir_in):\n for fname in files:\n if not re.match(r'^\\.', fname):\n rel_fpath = os.path.join(root, fname)\n chunk_song(rel_fpath, chunk_len=chunk_len, sr=sr, log=log)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing if correct electrondensity parsing of a VASPdirectory.
def test_parse_electrondensity(): # Parse envisionpy.hdf5parser.charge(PATH_TO_HDF5, PATH_TO_VASP_CALC) envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC) # Test if the generated HDF5-file contains correct information if os.path.isfile(PATH_TO_HDF5): with h5py.File(PATH_TO_HDF5, 'r') as h5: assert '/CHG' in h5 assert '/UnitCell' in h5 assert '/basis' in h5 assert '/scaling_factor' in h5 # cleanup os.remove(PATH_TO_HDF5)
[ "def _check_density(self,density, num_electrons):\n\n FLOAT_PRECISION = 0.01\n #integrate the density over the spherical space\n #s = float(np.sum(density))\n #s = 4*np.pi * float(np.sum(density * self.grid.gridvec**2 ))\n s = 4*np.pi * integrate.simps(density * self.grid.gridvec**2 ,self.grid.gridvec)\n print(\"the density sums to \",s)\n assert (abs(s - num_electrons) < FLOAT_PRECISION), \\\n \"density should sum to {0} ! got prob={1} instead\".format(num_electrons, s)", "def measure_deviations(electron):\n\n if electron.shape[0] == 0:\n return None, None\n\n # column indices\n ind_trackID = 0\n ind_parentID = 1\n ind_stepnum = 2\n ind_charge = 3\n ind_initpos = range(4, 7)\n ind_finalpos = range(7, 10)\n ind_tracklen = 10\n ind_steplen = 11\n ind_final_E = 12\n ind_dE = 13\n\n energy_keV = geant.measureEnergyKev(electron)\n # tabata_range_um = tabata.extrapolatedRangeSi(energy_keV)\n\n trackID = electron[:, ind_trackID].astype(int)\n parentID, charge = geant.constructParticleTable(\n electron, ind_trackID, ind_parentID, ind_charge)\n\n # (copied from geant.measureExtrapolatedRangeX)\n # exclude electrons induced by secondary photons\n # (e.g. bremsstrahlung)\n # i.e., only include particles with a pure electron ancestry\n # start from all electrons, and remove any with photon ancestors.\n is_valid = charge == -1\n was_valid = np.ones(len(is_valid)) > 0\n # is there a better way to make a boolean array?\n while any(np.logical_xor(is_valid, was_valid)):\n was_valid = is_valid\n is_valid = np.logical_and(\n is_valid,\n np.logical_or(is_valid[parentID], parentID == 0))\n\n is_valid_step = is_valid[trackID]\n\n first_step = list(electron[is_valid_step, ind_stepnum]).index(1)\n initial_pos = electron[first_step, ind_initpos]\n # assume initial direction is along x-axis\n\n offset_vector_mm = (electron[is_valid_step, :][:, ind_finalpos] -\n initial_pos)\n radial_distance_mm = np.sqrt(\n offset_vector_mm[:, 0]**2 +\n offset_vector_mm[:, 1]**2 +\n offset_vector_mm[:, 2]**2)\n atan_y = np.sqrt(\n offset_vector_mm[:, 1]**2 + offset_vector_mm[:, 2]**2)\n atan_x = offset_vector_mm[:, 0]\n deviation_deg = np.arctan2(atan_y, atan_x)\n\n return radial_distance_mm, deviation_deg", "def test_density(self):\n self.ld.compute(self.box, self.pos, self.pos)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "def test_directory_no_raw(self):\n self.assertFalse(valet.view(self.test_subdir)\n .find(self.test_subdir + \"?raw\") >= 0)", "def test_get_installed_vis():\n path_dirs = test_env.PathDirs()\n invalid_dirs = test_env.PathDirs(vis_dir=\"/tmp/foo\")\n get_status.get_installed_vis(path_dirs)\n get_status.get_installed_vis(invalid_dirs)", "def verify_pddf_thermalutil(dut, mode, thermal_list, version=\"2.0\"):\n thermal_li = thermal_list if isinstance(thermal_list, list) else [thermal_list]\n output = show_pddf_thermalutil(dut, mode)\n if not output:\n st.error(\"PDDF THERMAL UTIL DATA NOT FOUND\")\n return False\n count = 0\n if mode == \"gettemp\":\n for data in output:\n for each_thermal in thermal_li:\n if each_thermal != data[\"temp_sensor\"]:\n st.error(\"Invalid Temp Sensor detected - {}\".format(data[\"temp_sensor\"]))\n count += 1\n elif mode == \"numthermals\":\n if str(len(thermal_list)) != str(output[data]):\n st.error(\"Incorrect Thermal sensors numbers detected - {}\".format(output[data]))\n count += 1\n elif mode == \"version\":\n if str(version) not in str(data[\"version\"]):\n st.error(\"Invalid Thermal version detected - {}\".format(data[\"version\"]))\n count += 1\n if count:\n st.error(\"Mismatch in PDDF Thermal UTIL data\")\n return False\n return True", "def check_resolution(self,neuron):\r\n\r\n distances = []\r\n for spline in neuron.data.splines:\r\n for i,point in enumerate(spline.points):\r\n #Skip first node (root node of each spline -> has no parent)\r\n if i == 0:\r\n continue\r\n #Virtual nodes basically skip z-sections, so points are more than 50nm (0.005 in CATMAID coords)) apart in z-direction (y-direction in Blender)\r\n dist = math.fabs(point.co[1] - spline.points[i-1].co[1])\r\n if dist > 0:\r\n distances.append(dist)\r\n\r\n return round(sum(distances)/len(distances),3)", "def check_dens_directint(dfi,pot,tol,dens,\n rmin=None,rmax=None,bins=31):\n rs= numpy.linspace(rmin,rmax,bins)\n intdens= numpy.array([dfi.vmomentdensity(r,0,0) for r in rs])\n expdens= numpy.array([dens(r) for r in rs])\n assert numpy.all(numpy.fabs(intdens/expdens-1.) < tol), \\\n \"Density from direct integration is not equal to the expected value\"\n return None", "def _load_data(self): \n # Every key in self.calcdata['compositions'] is a composition, and each composition contains a list of dict entrees.\n # relaxed_structure, input_structure, magmoms, total_energy. \n \n _is_vasp_calc = lambda fs: 'POSCAR' in fs and 'INCAR' in fs and 'KPOINTS' in fs and 'POTCAR' in fs\n # Load VASP runs from given directories\n \n n_matched = 0\n n_inputs = 0\n new_unassigned_strs = []\n for root,dirs,files in os.walk(self.vaspdir):\n #A calculation directories has only 3 status: \n #accepted: calculation was successful, and already entered into calcdata.mson\n #falied: calculated but not successful, either aborted or can't be read into calcdata.mson\n #For these above two, we don't want to submit a calculation or post-process again.\n #not marked: calculation run not started or not finished yet. Since analyzer is always called\n #after runner, we don't need to worry that analyzer will find unmarked folders.\n\n if _is_vasp_calc(files) and (not 'accepted' in files) and (not 'failed' in files):\n print(\"Loading VASP run in {}\".format(root));\n parent_root = os.path.join(*root.split(os.sep)[0:-1])\n parent_parent_root = os.path.join(*root.split(os.sep)[0:-2])\n with open(os.path.join(parent_parent_root,'composition_by_site')) as compfile:\n composition = json.load(compfile)\n compstring = json.dumps(composition)\n \n if compstring not in self.calcdata['compositions']:\n self.calcdata['compositions'][compstring]=[]\n \n if not os.path.isfile(os.path.join(parent_root,'matrix')):\n print('Warning: matrix presave not found. Will autodetect supercell matrix using structure matcher,\\\n and will suffer from numerical errors!')\n matrix = None\n else:\n with open(os.path.join(parent_root,'matrix')) as mat_file:\n matrix = json.load(mat_file)\n #Check existence of output structure\n try:\n relaxed_struct = Poscar.from_file(os.path.join(root,'CONTCAR')).structure\n except:\n print('Entry {} CONTCAR can not be read. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n\n input_struct = Poscar.from_file(os.path.join(parent_root,'POSCAR')).structure\n \n #Check uniqueness\n strict_sm = StructureMatcher(stol=0.1, ltol=0.1, angle_tol=1, comparator=ElementComparator())\n _is_unique = True\n for entry in self.calcdata['compositions'][compstring]:\n entry_struct = Structure.from_dict(entry['relaxed_structure'])\n if strict_sm.fit(entry_struct,relaxed_struct):\n _is_unique = False\n break\n if not _is_unique:\n print('Entry {} alredy calculated before.'.format(root))\n open(os.path.join(root,'accepted'),'a').close()\n continue\n n_inputs += 1\n \n # Note: the input_struct here comes from the poscar in upper root, rather than fm.0, so \n # it is not deformed.\n \n # Rescale volume to that of unrelaxed structure, this will lead to a better mapping back. \n # I changed it to a rescaling tensor\n relaxed_lat_mat = np.matrix(relaxed_struct.lattice.matrix)\n input_lat_mat = np.matrix(input_struct.lattice.matrix)\n o2i_deformation = Deformation(input_lat_mat.T*relaxed_lat_mat.I.T)\n relaxed_deformed = o2i_deformation.apply_to_structure(relaxed_struct)\n #print(relaxed_deformed,input_struct)\n \n # Assign oxidation states to Mn based on magnetic moments in OUTCAR, first check existence of OUTCAR\n try:\n Out=Outcar(os.path.join(root,'OUTCAR'))\n except:\n print('Entry {} OUTCAR can not be read. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n \n # Get final energy from OSZICAR or Vasprun. Vasprun is better but OSZICAR is much\n # faster and works fine is you separately check for convergence, sanity of\n # magnetic moments, structure geometry\n with open(os.path.join(root, 'OUTCAR')) as outfile:\n outcar_string = outfile.read()\n if 'reached required accuracy' not in outcar_string:\n print('Entry {} did not converge to required accuracy. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n TotE=Oszicar(os.path.join(root, 'OSZICAR')).final_energy;\n # Checking convergence\n Mag = []\n for SiteInd,Site in enumerate(relaxed_struct.sites):\n Mag.append(np.abs(Out.magnetization[SiteInd]['tot']));\n \n \n new_entry = {}\n new_entry['input_structure']=input_struct.as_dict()\n new_entry['relaxed_structure']=relaxed_struct.as_dict()\n new_entry['relaxed_deformed']=relaxed_deformed.as_dict()\n new_entry['total_energy']=TotE\n new_entry['magmoms']=Mag\n new_entry['matrix']=matrix\n \n if os.path.isfile(os.path.join(parent_parent_root,'axis')):\n with open(os.path.join(parent_parent_root,'axis')) as axisfile:\n axis = json.load(axisfile)\n if 'axis' not in new_entry:\n new_entry['axis']=axis\n \n new_unassigned_strs.append((compstring,root,new_entry))\n \n if len(new_unassigned_strs)==0:\n print('No new structures appeared. Calcdata will not be updated.')\n return\n\n #Charge assignment\n if self.is_charged_ce:\n relaxed_deformed_pool = []\n relaxed_strs_pool = []\n mags = []\n roots = []\n energies = []\n comps = []\n inputs = []\n mats = []\n if 'axis' in new_unassigned_strs[0][2]:\n axis = []\n for compstring,root,new_entry in new_unassigned_strs:\n # Out=Outcar(os.path.join(root,'OUTCAR'))\n Mag=new_entry['magmoms']\n relaxed_struct = Structure.from_dict(new_entry['relaxed_structure'])\n relaxed_deformed = Structure.from_dict(new_entry['relaxed_deformed'])\n # Throw out structures where oxidation states don't make charge balanced.\n \n mags.append(Mag)\n roots.append(root)\n relaxed_strs_pool.append(relaxed_struct)\n relaxed_deformed_pool.append(relaxed_deformed)\n comps.append(compstring)\n inputs.append(Structure.from_dict(new_entry['input_structure']))\n energies.append(new_entry['total_energy'])\n mats.append(new_entry['matrix'])\n if 'axis' in new_entry:\n axis.append(new_entry['axis'])\n \n CA = ChargeAssign(relaxed_strs_pool,mags,algo=self.assign_algo)\n relaxed_strs_assigned = CA.assigned_structures\n relaxed_deformed_assigned = CA.extend_assignments(relaxed_deformed_pool,mags)\n \n for i in range(len(inputs)):\n if relaxed_strs_assigned[i] is not None and relaxed_deformed_assigned[i] is not None:\n # Checking whether structure can be mapped to corr function.\n # This is out deformation tolerance. \n try:\n if mats[i] is not None:\n cesup = self.ce.supercell_from_matrix(mats[i])\n corr=cesup.corr_from_structure(relaxed_deformed_assigned[i])\n else:\n corr=self.ce.corr_from_structure(relaxed_deformed_assigned[i])\n except:\n print(\"Entry {} too far from original lattice. Skipping.\".format(roots[i]))\n open(os.path.join(roots[i],'failed'),'a').close()\n continue\n\n assigned_entry = {}\n assigned_entry['input_structure']=inputs[i].as_dict()\n assigned_entry['relaxed_structure']=relaxed_strs_assigned[i].as_dict()\n assigned_entry['relaxed_deformed']=relaxed_deformed_assigned[i].as_dict()\n assigned_entry['matrix']=mats[i]\n assigned_entry['total_energy']=energies[i]\n assigned_entry['magmoms']=mags[i]\n if 'axis' in new_unassigned_strs[0][2]:\n assigned_entry['axis']=axis[i]\n self.calcdata['compositions'][comps[i]].append(assigned_entry)\n print('Entry {} accepted!'.format(roots[i]))\n open(os.path.join(roots[i],'accepted'),'a').close()\n n_matched+=1\n\n else:\n print(\"Entry {} can not be assigned. Skipping.\".format(roots[i]))\n open(os.path.join(roots[i],'failed'),'a').close()\n continue\n else:\n print('Doing non charged ce.')\n for compstring,root,new_entry in new_unassigned_strs:\n # Checking whether structure can be mapped to corr function.\n # This is out deformation tolerance. \n try:\n if new_entry['matrix'] is not None:\n cesup = self.ce.supercell_from_matrix(new_entry['matrix'])\n corr = cesup.corr_from_structure(Structure.from_dict(new_entry['relaxed_defromed']))\n else:\n corr = self.ce.corr_from_structure(Structure.from_dict(new_entry['relaxed_defromed']))\n except:\n print(\"Entry {} too far from original lattice. Skipping.\".format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n\n self.calcdata['compositions'][compstring].append(new_entry)\n open(os.path.join(root,'accepted'),'a').close()\n n_matched+=1\n # Data already deduplicated!\n\n print('{}/{} structures matched in this run. Parsed vasp data will be saved into {}.'.format(n_matched,n_inputs,self.calc_data_file))", "def read_vpd(vpd_filepath: str, moreinfo=False) -> vmdstruct.Vmd:\n\tcleanname = core.get_clean_basename(vpd_filepath) + \".vpd\"\n\tcore.MY_PRINT_FUNC(\"Begin reading VPD file '%s'\" % cleanname)\n\t\n\t# read textfile to linelist, no CSV fields to untangle here\n\tlines = core.read_txtfile_to_list(vpd_filepath, use_jis_encoding=True)\n\t\n\t# verify magic header \"Vocaloid Pose Data file\"\n\tif lines[0] != \"Vocaloid Pose Data file\":\n\t\tcore.MY_PRINT_FUNC(\"warning: did not find expected magic header! this might not be a real VPD file!\")\n\t# get rid of the header\n\tlines.pop(0)\n\t\n\t# this var is a state machine that keeps track of what I expect to find next\n\t# if i find anything other than blankspace or what I expect, then err & die\n\tparse_state = 0\n\t\n\t# save this so I know when I'm done reading all the bones the header promised\n\tnum_bones = 0\n\t\n\t# temp vars to hold stuff from previous lines\n\ttemp_title = \"qwertyuiop\"\n\ttemp_name = \"foobar\"\n\ttemp_pos = tuple()\n\ttemp_rot = tuple()\n\ttemp_value = 0.0\n\t\n\t# this is the VMD object that will be ultimately returned\n\tvmd_boneframes = []\n\tvmd_morphframes = []\n\t\n\t# iterate over the remaining lines until end-of-file\n\tfor d,line in enumerate(lines):\n\t\t# vertical whitespace is always acceptable\n\t\tif not line or line.isspace(): continue\n\t\t\n\t\t# if line is not blank, it had better be something good:\n\t\tif parse_state == 0: # 0 = model title\n\t\t\tm = title_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find model title\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\ttemp_title = m.group(1) # if valid match, then grab the actual title\n\t\t\tif moreinfo: core.MY_PRINT_FUNC(\"...model name = JP:'%s'\" % temp_title)\n\t\t\tparse_state = 10 # next thing to look for is #bones\n\t\t\n\t\telif parse_state == 10: # 10 = #bones\n\t\t\tm = f1_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find number of bones\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tnum_bones = int(float(m.group(1))) # if a valid match, then grab the actual # of bones\n\t\t\tif moreinfo: core.MY_PRINT_FUNC(\"...# of boneframes = %d\" % num_bones)\n\t\t\tif num_bones == 0:\tparse_state = 30 # if there are 0 bones then immediately begin with the morphs\n\t\t\telse:\t\t\t\tparse_state = 20 # otherwise look for bones next\n\t\t\n\t\telif parse_state == 20: # 20 = boneA, name\n\t\t\tm = bone_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find bone name\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tidx, name = m.group(1,2) # get idx and name\n\t\t\ttemp_name = name\n\t\t\t# can i use idx for anything? or is it totally useless?\n\t\t\tparse_state = 21 # next look for quaternion rotation\n\t\t\n\t\telif parse_state == 21: # 21 = boneB, xyz pos\n\t\t\tm = f3_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find bone XYZ position\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tpos = m.group(1,2,3) # get all 3 components\n\t\t\ttemp_pos = [float(f) for f in pos] # convert strings to floats\n\t\t\tparse_state = 22 # next look for quaternion rotation\n\t\t\n\t\telif parse_state == 22: # 22 = boneC, xyzw quaternion rotation\n\t\t\tm = f4_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find bone XYZW rotation\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tquat = m.group(1,2,3,4) # get all 4 components\n\t\t\tquat = [float(f) for f in quat] # convert strings to floats\n\t\t\tquat.insert(0,quat.pop(-1)) # WXYZW -> XYZW, AKA move tail (w) to head\n\t\t\ttemp_rot = core.quaternion_to_euler(quat) # convert quaternion to euler angles\n\t\t\tparse_state = 23 # next look for closing curly\n\t\t\n\t\telif parse_state == 23: # 23 = boneD, closing curly\n\t\t\tm = close_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: bone item not properly closed\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\t# finish the bone-obj and add to VMD structure\n\t\t\t# this_boneframe = [bname_str, f, xp, yp, zp, xrot, yrot, zrot, phys_off, x_ax, y_ax, z_ax, r_ax, x_ay, y_ay,\n\t\t\t# \t\t\t\t z_ay, r_ay, x_bx, y_bx, z_bx, r_bx, x_by, y_by, z_by, r_by]\n\t\t\tnewframe = vmdstruct.VmdBoneFrame(\n\t\t\t\tname=temp_name, f=0, pos=temp_pos, rot=list(temp_rot), phys_off=False,\n\t\t\t\tinterp=list(core.bone_interpolation_default_linear)\n\t\t\t)\n\t\t\tvmd_boneframes.append(newframe)\n\t\t\tif len(vmd_boneframes) == num_bones:\tparse_state = 30 # if i got all the bones i expected, move to morphs\n\t\t\telse:\t\t\t\t\t\t\t\t\tparse_state = 20 # otherwise, get another bone\n\t\t\n\t\telif parse_state == 30: # 30 = morphA, name\n\t\t\tm = morph_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find morph name\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tidx, name = m.group(1,2) # get idx and name\n\t\t\ttemp_name = name\n\t\t\t# can i use idx for anything? or is it totally useless?\n\t\t\tparse_state = 31 # next look for value\n\t\t\n\t\telif parse_state == 31: # 31 = morphB, value\n\t\t\tm = f1_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: failed to find morph value\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\tv = m.group(1) # get value\n\t\t\ttemp_value = float(v) # convert strings to floats\n\t\t\tparse_state = 32 # next look for close\n\t\t\n\t\telif parse_state == 32: # 32 = morphC, closing curly\n\t\t\tm = close_re.match(line) # regex match from beginning of line\n\t\t\tif m is None:\n\t\t\t\tcore.MY_PRINT_FUNC(\"Parse err line %d state %d: morph item not properly closed\" % (d + 2, parse_state))\n\t\t\t\tcore.MY_PRINT_FUNC(\"line = '%s'\" % line)\n\t\t\t\traise RuntimeError()\n\t\t\t# finish the morph-obj and add to VMD structure\n\t\t\t# morphframe_list.append([mname_str, f, v])\n\t\t\tnewframe = vmdstruct.VmdMorphFrame(name=temp_name, f=0, val=temp_value)\n\t\t\tvmd_morphframes.append(newframe)\n\t\t\tparse_state = 30 # loop morphs until end-of-file\n\t\t\n\t\telse:\n\t\t\tcore.MY_PRINT_FUNC(\"this should not happen, err & die\")\n\t\t\traise RuntimeError()\n\t\n\tif moreinfo: core.MY_PRINT_FUNC(\"...# of morphframes = %d\" % len(vmd_morphframes))\n\n\t# verify we did not hit end-of-file unexpectedly, looking-for-morphA is only valid ending state\n\tif parse_state != 30:\n\t\tcore.MY_PRINT_FUNC(\"Parse err state %d: hit end-of-file unexpectedly\" % parse_state)\n\t\traise RuntimeError()\n\t\n\t# after hitting end-of-file, assemble the parts of the final returnable VMD-list thing\n\t# builds object \t(header, boneframe_list, morphframe_list, camframe_list, lightframe_list, shadowframe_list, ikdispframe_list)\n\tvmd_retme = vmdstruct.Vmd(\n\t\tvmdstruct.VmdHeader(version=2, modelname=temp_title),\n\t\tvmd_boneframes,\n\t\tvmd_morphframes,\n\t\tlist(), list(), list(), list())\n\t\n\tcore.MY_PRINT_FUNC(\"Done reading VPD file '%s'\" % cleanname)\n\t\n\treturn vmd_retme", "def test_dominant_variant(self):\n assert not self.dominant_variant['Inheritance_model']['AR_hom']\n assert not self.dominant_variant['Inheritance_model']['AR_hom_dn']\n assert self.dominant_variant['Inheritance_model']['AD']\n assert not self.dominant_variant['Inheritance_model']['AD_dn']", "def test_resolution_volume():\n\n instr = gen_std_instr()\n instr.calc_resolution([1, 1, 0, 0])\n\n resvol = (2 * np.pi) ** 2 / np.sqrt(np.linalg.det(instr.RMS))\n\n assert (np.round(resvol, 8) == 1.22e-6)", "def testProcessPathSpecVMDK(self):\n knowledge_base_values = {'year': 2016}\n session = sessions.Session()\n\n test_file_path = self._GetTestFilePath(['image.vmdk'])\n self._SkipIfPathNotExists(test_file_path)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_VMDK, parent=path_spec)\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',\n parent=path_spec)\n storage_writer = fake_writer.FakeStorageWriter(session)\n\n expected_event_counters = {\n 'fs:stat': 18}\n\n self._TestProcessPathSpec(\n storage_writer, path_spec, expected_event_counters,\n knowledge_base_values=knowledge_base_values)", "def isValidDir(self,dir):\n if not os.path.isdir(dir):\n print('Check the directory you have supplied')\n return False\n elif not os.path.isfile(dir+'/'+self.elastic_name+'.0001'):\n print(\"The directory you supplied does not have a elastic.0001 file!!! \\n If your elastic scan has a different name, please specify as: 'elastic_name'\")\n return False\n elif not os.path.isfile(dir+'/'+self.nixs_name+'.0001'):\n print(\"The directory you supplied does not have a NIXS.0001 file!!! \\n If your raman scan has a different name, please specify as: 'NIXS_name'\")\n return False\n elif not os.path.isfile(dir+'/'+self.wide_name+'.0001'):\n print(\"No wide scans found. Continuing...\")\n return True\n else:\n return True", "def test_star_type_probability_no_Av(self):\n\n # download cached file\n star_prob_fname = download_rename(f\"{self.basename}_star_type_probability.asdf\")\n with asdf.open(star_prob_fname) as af:\n star_prob_info = copy.deepcopy(af.tree)\n\n # edit the 2D PDF file to not have A_V info\n temp_pdf2d_fname = tempfile.NamedTemporaryFile(suffix=\".fits\").name\n temp_hdu_list = []\n with fits.open(self.pdf2d_fname_cache) as hdu:\n for ext in hdu:\n if \"Av+\" in ext.name or \"+Av\" in ext.name:\n continue\n temp_hdu_list.append(ext)\n fits.HDUList(temp_hdu_list).writeto(temp_pdf2d_fname)\n\n # edit the expected output to have NaNs in columns that require A_V\n # (currently, that's all columns)\n expected_star_prob = Table(star_prob_info[\"output\"])\n for col in expected_star_prob.colnames:\n if col == \"ext_O_star\":\n expected_star_prob[col] = np.nan\n if col == \"dusty_agb\":\n expected_star_prob[col] = np.nan\n\n # run star_type_probability\n star_prob = star_type_probability.star_type_probability(\n self.pdf1d_fname_cache, temp_pdf2d_fname, **star_prob_info[\"input\"],\n )\n\n # compare to expected table\n compare_tables(expected_star_prob, Table(star_prob))", "def getVtypeV():\n vtypeEdgeDictSpeedList = {}\n inputFile = open(path.vtypeprobe, 'r')\n for line in inputFile:\n words = line.split('\"')\n if words[0].find(\"<vehicle id=\") != -1 and words[3][0] != ':':\n vtypeEdgeDictSpeedList.setdefault(\n words[3][:-2], []).append(float(words[15]) * 3.6)\n inputFile.close()\n for edge in vtypeEdgeDictSpeedList:\n vtypeEdgeDict[edge] = sum(\n vtypeEdgeDictSpeedList[edge]) / len(vtypeEdgeDictSpeedList[edge])\n print(len(vtypeEdgeDict))", "def test_physical_volumes(self):\n pvs = self.dwrap.phys_vols\n self.assertEqual(1, len(pvs))\n\n pv = pvs[0]\n self.assertEqual('01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw',\n pv.udid)\n self.assertEqual(1089592, pv.capacity)\n self.assertEqual('hdisk1', pv.name)\n self.assertEqual('active', pv.state)\n self.assertFalse(pv.is_fc_backed)\n self.assertTrue(pv.avail_for_use)\n self.assertEqual('SAS RAID 0 Disk Array', pv.description)\n self.assertEqual('U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0',\n pv.loc_code)\n self.assertEqual(22, pv.read_iops_limit)\n self.assertEqual(33, pv.write_iops_limit)", "def _calc_density(self, EigenVecs, num_electrons): \n density = 0\n\n for i in range (0, len(self.occupation_list)):\n #print(\"orbital number - {0} adding occupation: {1}\".format(i, self.occupation_list[i]))\n #density += self.occupation_list[i] * np.power(np.abs(EigenVecs[:, i]), 2)\n density += self.occupation_list[i] * np.abs(EigenVecs[:, i])**2 \n\n self._check_density(density, num_electrons)\n return density", "def _normality_checking(series):\r\n\r\n JB_stat, p, _, __ = jarque_bera(series)\r\n\r\n print(\"\\n--------------------------------------------\\n\")\r\n print(\"Checking Normality of {}\".format(series.name))\r\n print(\"Test Statistic : %.2f, p value : %.5f\" % (JB_stat, p))\r\n\r\n alpha = 0.05\r\n\r\n if p > alpha:\r\n\r\n print(\"Data looks Gaussian: fail to reject the Null Hypothesis\")\r\n return False\r\n\r\n else:\r\n\r\n print(\"Data does not look Gaussian: we reject the Null Hypothesis\")\r\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the badge is initialized.
def test_init(): _badgegen = badgegen.BadgeGenerator() assert isinstance(_badgegen, badgegen.BadgeGenerator)
[ "def test_init(self):\n assert self.registration_behaviour.is_registered is False\n assert self.registration_behaviour.registration_in_progress is False\n assert self.registration_behaviour.failed_registration_msg is None\n assert self.registration_behaviour._nb_retries == 0", "def test_create_and_get_badge(self):\n badge = badge_api.create_badge(*self.badge_values)\n\n # test the presence or attributes for a badge\n for attr in self.badge_attrs:\n self.assertIn(attr, badge)\n\n # test attribute values\n attrs = self.badge_attrs\n del attrs[0]\n del attrs[0]\n for key, value in zip(attrs, self.badge_values):\n self.assertEquals(badge[key], value)\n \n # test make sure attributes are equal\n badge2 = badge_api.get_badge(badge['uri'])\n self.assertEqual(badge, badge2)\n\n # test that the badge shows up in drafts\n badges = badge_api.get_user_draft_badges(badge['author_uri'])\n self.assertEquals(len(badges), 1)\n self.assertEquals(badge2, badges[0])", "def test_init(self):\n g = BillGroup()\n\n self.assertTrue(len(g.getPayors()) == 0, 'No payors should be added yet.')\n self.assertTrue(g.getContributionTotal() == 0, 'No contributions should have been made yet.')", "def testInitialization(self):\n self.assertIsNotNone(self.yara_collector)", "def testInitialization(self):\n test_state = state.DFTimewolfState(config.Config)\n collector = gsheets.GoogleSheetsCollector(test_state)\n self.assertIsNotNone(collector)", "def test_consumer_badge_is_valid(self):\n new_consumer = Consumer(badge=0)\n self.assertIs(new_consumer.is_valid_badge_value(), True)", "def test_init(self):\n fw = firewall.FireWall()\n\n self.assertTrue(hasattr(fw, '_rlock'))", "def testInit(self):\n event_tester = EventTester()\n self.assertEqual(event_tester.events, [])", "def test_not_ready(self):\n self.assertFalse(self.notification.ready())", "def test_ready(self):\n notification = self.notification\n notification.destination[\"frequency\"] = 0\n self.assertTrue(notification.ready())", "def testInitialization(self) -> None:\n test_state = state.DFTimewolfState(config.Config)\n processor = gcp_crt.GCPCloudResourceTree(test_state)\n self.assertIsNotNone(processor)", "def test_init(self):\n payload = payloads.DeriveKeyResponsePayload()\n\n self.assertIsNone(payload.unique_identifier)\n self.assertIsNone(payload.template_attribute)", "def test_init(name: str, species: str, level: int):\n Pakuri(name=name, species=species, level=level)\n assert True", "def test_init(self):\r\n\r\n test = Maps()\r\n self.assertEqual(test.drawings, {})\r\n self.assertEqual(test.names, [])\r\n self.assertEqual(len(test.__dict__), 2)", "def test_init(self):\n payload = payloads.DeriveKeyRequestPayload()\n\n self.assertIsNone(payload.object_type)\n self.assertIsNone(payload.unique_identifiers)\n self.assertIsNone(payload.derivation_method)\n self.assertIsNone(payload.derivation_parameters)\n self.assertIsNone(payload.template_attribute)", "def test_init() -> None:\n # As long as this doesn't fail, the test will pass.\n # Note: This is a pedagogical example, we don't usually test this.\n DummyNet()", "def test_setup(self):\n assert self.cosm_trade_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_init(self):\n u = create_upvote()\n self.assertTrue(isinstance(u, Upvote))", "def test_update_badge(self):\n badge = badge_api.create_badge(*self.badge_values)\n attrs = self.badge_attrs\n del attrs[1]\n kwargs = dict(zip(self.badge_attrs, [badge['uri']] + self.badge_values))\n del kwargs['author_uri']\n kwargs['title'] = 'A new title'\n badge_api.update_badge(**kwargs)\n badge2 = badge_api.get_badge(badge['uri'])\n self.assertNotEquals(badge, badge2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a csv of DNA sequences and inserts them into an array
def array(file): sequences = [] recSite = [] freq = [] with open(file, 'r') as csv_file: fileReader = csv.reader(csv_file, delimiter = "|") fileReader.next() # throwaway header row for row in fileReader: strippedRow = row[0].strip(",").split(',') sequences.append(strippedRow[1]) recSite.append(strippedRow[2]) freq.append(int(strippedRow[4])) return sequences, recSite, freq
[ "def load_sequence(filename):\n with open(filename) as f:\n data = []\n for line in f:\n data += [int(n) for n in line.strip(',\\n').split(',')]\n return data", "def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)", "def load_dataset(path_fasta):\n fasta_sequences = SeqIO.parse(open(path_fasta),'fasta')\n \n for fasta in fasta_sequences:\n desc = fasta.description.split(\" \")\n labels = desc[1].split(\"-\")\n if len(labels) > 2:\n continue\n loclabel, memlabel, sequence = labels[0], labels[1], str(fasta.seq)\n if len(desc) > 2:\n test_loc.append(loclabel)\n test_mem.append(memlabel)\n test_seq.append(sequence)\n else:\n trainval_loc.append(loclabel)\n trainval_mem.append(memlabel)\n trainval_seq.append(sequence)", "def importData(file):\n with open(file,'rb') as csvfile:\n\tdatareader = csv.reader(csvfile, delimiter = ',', quotechar='|')\n examples = []\n\tfor row in datareader:\n pairs = []\n for i in range(len(row)):\n row[i] = row[i].replace(\" \",\"\")\n pairs.append((attris[i], row[i]))\n examples.append(Example(pairs))\n #globalExamples = examples\n return examples", "def csv_to_ndarray(fname): \n\t\ttry:\n\t\t\treturn np.genfromtxt(fname, delimiter=\",\")\t\n\t\texcept Exception, e:\n\t\t\tprint \"Error loading file %s:\" % fname\n\t\t\traise", "def load(self, file):\n seq_fn = open(file, \"r\")\n seq_list = []\n\n for line in seq_fn:\n seq_x = [int(x) for x in line.strip().split(\"\\t\")]\n seq_y = [int(y) for y in line.strip().split(\"\\t\")]\n self.add_sequence(seq_x, seq_y)\n seq_fn.close()", "def extract_data_from_csv(path):\n lemmas = [] # a string array containing the lemmas\n sentences = [] # a string array containing the sentences\n labels = [] # a int array containing their corresponding scores\n\n faulty_samples_counter = 0\n\n with open(path, 'r', encoding=\"utf-8\") as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n # only add a row if it has the syntax we expect it to have\n if _has_valid_syntax(row):\n lemmas.append(str(row['Lemma']))\n sentences.append(str(row['Example']))\n labels.append(int(row['Score']))\n else:\n faulty_samples_counter += 1\n\n if faulty_samples_counter > 0:\n logging.info(str(faulty_samples_counter) + ' faulty samples were found among the given ' +\n str(len(sentences)) + '. They will not be considered in the embeddings calculations.')\n\n # sort by sentence length (ascending)\n # we zip and unzip using the same function\n return zip(*sorted(zip(lemmas, sentences, labels), key=lambda x: len(x[1])))", "def load_data(data_file=None):\n\n # Set defaults.\n if data_file is None:\n data_file = 'bin_seq.csv'\n\n bin_array = np.genfromtxt(data_file, delimiter=',')\n\n return(bin_array)", "def getSequencesFromFile(inputFile):\n sequences = io().readFastaFile(inputFile)\n return sequences", "def read_from_csv(csv, seq_name, seq, group=None):\n csv_file = pd.read_csv(csv)\n df = csv_file[[seq_name, seq, group]]\n # with open(\"orf9-1.fasta\", \"w\") as fasta:\n # \tfor index, row in df.iterrows():\n # \t\tfasta.write(\">\"+str(row[seq_name])+\"\\n\"+str(row[seq])+\"\\n\")\n\n # for human 9-1\n for index, row in df.iterrows():\n fasta = \"orf9-1_G0\"+str(row[group])+\".fasta\"\n with open(fasta, \"a\") as fasta:\n fasta.write(\">\"+str(row[seq_name])+\"\\n\"+str(row[seq])+\"\\n\")", "def readinMATRIX(csvpath):\n\n G = []\n with open (csvpath, 'rb') as csvfile:\n myreader = csv.reader(csvfile)\n for row in myreader:\n G.append(row)\n return G", "def read_csv_with_numpy():\n filename = \"C:\\\\Users\\mdjuk\\\\repos\\\\q_python_scripts\\\\digits.csv\"\n\n data = np.loadtxt(filename, delimiter=',')\n\n return(data)", "def load_airportcodes():\n\n the_file = open(\"./seed_data/airportcodes.txt\")\n\n for line in the_file:\n \tsplit_line = line.rstrip().split(\"|\")\n \tlocation = split_line[0]\n \tcode = split_line[1]\n\n # preparing to insert into database\n new_airportcode = AirportCode(code=code, location=location)\n print new_airportcode\n\n db.session.add(new_airportcode)\n \tdb.session.commit()", "def read(filename: str) -> List[Accident]:\n #return [] #stub\n # Template from HtDAP\n \n # loa contains the result so far\n loa = [] # type: List[Accident]\n\n with open(filename) as csvfile:\n \n reader = csv.reader(csvfile)\n next(reader) # skip header line\n\n for row in reader:\n a = Accident(parse_roles(row[1]), parse_int(row[2]))\n loa.append(a)\n \n return loa", "def str2seq(dna_seq_str):\n dna_seq_array = np.asarray(list(dna_seq_str))\n return dna_seq_array", "def import_csv(in_csv, delimit=','):\n with open(in_csv, encoding='utf-8') as source:\n sourcereader = csv.reader(source, delimiter=delimit)\n data_list = []\n for row in sourcereader:\n data_list.append(row)\n return data_list", "def load_raw_cell_lines():\n rna_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271040\"\n \"&format=file&file=GSM3271040%5FRNA%5FsciCAR%5FA549%5Fgene%5Fcount.txt.gz\")\n rna_cells_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271040\"\n \"&format=file&file=GSM3271040%5FRNA%5FsciCAR%5FA549%5Fcell.txt.gz\"\n )\n rna_genes_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271040\"\n \"&format=file&file=GSM3271040%5FRNA%5FsciCAR%5FA549%5Fgene.txt.gz\"\n )\n atac_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271041\"\n \"&format=file&file=GSM3271041%5FATAC%5FsciCAR%5FA549%5Fpeak%5Fcount.txt.gz\"\n )\n atac_cells_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271041\"\n \"&format=file&file=GSM3271041%5FATAC%5FsciCAR%5FA549%5Fcell.txt.gz\"\n )\n atac_genes_url = (\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM3271041\"\n \"&format=file&file=GSM3271041%5FATAC%5FsciCAR%5FA549%5Fpeak.txt.gz\"\n )\n rna_genes = pd.read_csv(rna_genes_url, low_memory=False, index_col=0)\n atac_genes = pd.read_csv(atac_genes_url, low_memory=False, index_col=1)\n rna_cells = pd.read_csv(rna_cells_url, low_memory=False, index_col=0)\n atac_cells = pd.read_csv(atac_cells_url, low_memory=False, index_col=0)\n\n with tempfile.TemporaryDirectory() as tempdir:\n rna_file = os.path.join(tempdir, \"rna.mtx.gz\")\n scprep.io.download.download_url(rna_url, rna_file)\n rna_data = scprep.io.load_mtx(rna_file, cell_axis=\"col\").tocsr()\n atac_file = os.path.join(tempdir, \"atac.mtx.gz\")\n scprep.io.download.download_url(atac_url, atac_file)\n atac_data = scprep.io.load_mtx(atac_file, cell_axis=\"col\").tocsr()\n return rna_data, atac_data, rna_cells, atac_cells, rna_genes, atac_genes", "def extract(filepath):\r\n with open(filepath, \"r\") as f:\r\n dataset = f.readlines()\r\n dataset = map(lambda i: i.strip('\\n').split(';'), dataset)\r\n dataset = np.array(list(dataset))\r\n return dataset", "def load_sequences(datf, length):\n dirname = CHROM_DIR\n seqdat = pd.DataFrame()\n for gen, chrom in datf[['genome', 'chromosome']] \\\n .groupby(['genome', 'chromosome']).count().index:\n\n chrom_file = dirname + gen + \"_\" + chrom.strip(\"chr\") + \".fasta\"\n chrom_record = SeqIO.read(chrom_file, 'fasta')\n\n # get rows for organism and chromosome\n startstops = datf.loc[(datf['genome'] == gen) & (datf['chromosome'] == chrom)]\n # retrive motif + indent\n motifs, mstarts, mstops = search_chromosome(chrom_record,\n startstops[\"start\"],\n startstops[\"stop\"],\n startstops[\"strand\"],\n length)\n rows = pd.concat([startstops, motifs, mstarts, mstops], axis=1)\n rows.columns = [\"motif-id\", \"organism\", \"genome\", \"chromosome\", \"start\",\n \"stop\", \"strand\", \"seq\", \"mstart\", \"mstop\"]\n seqdat = seqdat.append(rows, ignore_index=True)\n\n return seqdat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a DNA sequence and calculates the running average GC content with a defualt window of 3 basepairs over the length of the sequence
def GC_content(sequence, recLength = 5, overhang = 12, window = 3, ymax = 1, ymin = -1): GC_array = [] maxGC = 100 minGC = 0 # GC percentages for bp in sequence: if bp.capitalize() in ['G', 'C']: GC_array.append(100) else: GC_array.append(0) # window weighting weights = np.repeat(1.0, window)/float(window) runningAverage = np.convolve(GC_array, weights, 'valid') # normalizing data normalize = (ymax - ymin)*(runningAverage - minGC)/(maxGC - minGC) + ymin; # pulling out feature indecies middle = len(runningAverage)/2 + len(runningAverage)%2 - 1 start = middle - recLength/2 end = middle + recLength/2 + 1 return GC_array, runningAverage[start-overhang:end+overhang], normalize[start-overhang:end+overhang]
[ "def GC(seq):\n\tA = seq.count('A')\n\tT = seq.count('T')\n\tC = seq.count('C')\n\tG = seq.count('G')\n\treturn float(C+G) / float(A+T+G+C)", "def GC_content(dna):\n g = dna.count('G')\n c = dna.count('C')\n ret = (g+c)/len(dna)\n return ret", "def get_at_gc_ratio(sequence):\n return get_at_content(sequence) / get_gc_content(sequence)", "def gc_skew(sequence):\n half = len(sequence) // 2\n genome = np.frombuffer(sequence.encode() + sequence.encode(), dtype='uint8')\n g = np.concatenate(([0], np.array(genome == ord('G'), dtype='uint8').cumsum()))\n c = np.concatenate(([0], np.array(genome == ord('C'), dtype='uint8').cumsum()))\n gc = g - c\n skew = gc[half:(half + len(sequence))] - gc[0:len(sequence)] + \\\n gc[(len(sequence) - half):(2 * len(sequence) - half)] - gc[len(sequence):(2 * len(sequence))]\n return skew", "def countLength(infile):\n infile = open(infile,'r')\n fasta_storage = defaultdict(list)\n chr_length = 0\n chr_full = ''\n chr_name = ''\n for line in infile:\n # Makes sure that '\\n' doesn't get added to the chr length\n line = line.rstrip()\n if line[0] == '>' and chr_full != '':\n fasta_storage[chr_name] = [chr_length, gcContent(chr_full)]\n chr_length = 0\n chr_full = ''\n chr_name = line[1:]\n #Use this to see progress:\n #print chr_name\n elif line[0] == '>':\n chr_name = line[1:]\n chr_length = 0\n chr_full = ''\n #See progress:\n #print chr_name\n else:\n chr_length += len(line)\n chr_full += line\n #print fasta_storage\n fasta_storage[chr_name] = [chr_length, gcContent(chr_full)]\n percent_sum = 0\n percent_num = 0\n print 'Chr Name','\\t\\t','Chr Length','\\t\\t','GC Percent'\n for key in fasta_storage.keys():\n print key,'\\t\\t',fasta_storage[key][0],'\\t\\t',fasta_storage[key][1],'%'\n percent_sum += fasta_storage[key][1]\n percent_num += 1\n print 'Average GC Percentage:',(percent_sum/percent_num),'%'", "def GA(seq):\n g_a = sum(seq.count(x) for x in [\"G\", \"A\", \"g\", \"a\", \"S\", \"s\"])\n try:\n return g_a * 100.0 / len(seq)\n except ZeroDivisionError:\n return 0.0", "def GC_content(self):\n gc = self.seq.count(\"G\") + self.seq.count(\"C\")\n return gc/float(len(self))", "def calculate_formal_contig_stats( filename ):\n\t\n\tprint \"calculation of formal assembly stats ... please wait!\"\n\tnumber_of_bases_without_N = 0\t#counts all bases without N\n\tnumber_of_gc = 0\t\t#counts occurences of G or C in sequence\n\tcontig_lengths = []\t\t#lengths of all contigs in the assembly; used for calculation of min, max and mean\n\t\n\twith open( filename, 'r' ) as f:\n\t\tfirst_line = f.readline()\n\t\tline = f.readline()\n\t\tsequence = \"\"\n\t\tcounter = 1\n\t\twhile line:\n\t\t\tif line[0] == '>':\t#new header => evaluate current sequence and set back to empty string\n\t\t\t\tfor base in sequence.upper():\n\t\t\t\t\tif base == 'G' or base == 'C':\n\t\t\t\t\t\tnumber_of_gc += 1\n\t\t\t\t\t\tnumber_of_bases_without_N += 1\n\t\t\t\t\telif base == 'A' or base == 'T':\n\t\t\t\t\t\tnumber_of_bases_without_N += 1\n\t\t\t\tcontig_lengths.append( len( sequence ) )\n\t\t\t\tsequence = \"\"\n\t\t\telse:\n\t\t\t\tsequence += line.strip()\n\t\t\tline = f.readline()\n\t\t\tcounter += 1\n\t\t\tif counter % 1000 == 0:\n\t\t\t\tprint str( counter/1000 ) + ' x1000 lines processed'\n\t\t#place block from new header here again (for last sequence in file)\n\t\tfor base in sequence.upper():\n\t\t\tif base == 'G' or base == 'C':\n\t\t\t\tnumber_of_gc += 1\n\t\t\t\tnumber_of_bases_without_N += 1\n\t\t\telif base == 'A' or base == 'T':\n\t\t\t\tnumber_of_bases_without_N += 1\n\t\tcontig_lengths.append( len( sequence ) )\n\t\n\t# --- calculate remaining stats --- #\n\tnumber_of_contigs = len( contig_lengths )\t#counts number of contigs / scaffolds in this assembly\n\ttotal_number_of_bases = sum( contig_lengths )\t#counts all bases in the assembyl\n\tmean_contig_length = total_number_of_bases / number_of_contigs\t#average contig lengths\n\tminimal_contig_length = min( contig_lengths )\n\tmaximal_contig_length = max( contig_lengths )\n\t\n\n\t# --- sort list of contig length decreasing --- #\n\tsorted_contig_lengths = sorted( contig_lengths )[::-1]\t#invert to get it decreasing\n\tN25 = False\n\tN50 = False\n\tN75 = False\n\tN90 = False\n\t\n\tcum_length = total_number_of_bases\n\t\n\tfor contig_length in sorted_contig_lengths:\n\t\tcum_length -= contig_length\n\t\tif cum_length <= 0.1 * total_number_of_bases:\n\t\t\tif not N90:\n\t\t\t\tN90 = contig_length\n\t\telif cum_length <= 0.25 * total_number_of_bases:\n\t\t\tif not N75:\n\t\t\t\tN75 = contig_length\n\t\telif cum_length <= 0.5 * total_number_of_bases:\n\t\t\tif not N50:\n\t\t\t\tN50 = contig_length\n\t\telif cum_length <= 0.75 * total_number_of_bases:\n\t\t\tif not N25:\n\t\t\t\tN25 = contig_length\n\t\n\t\n\tstats = { \t'number_of_contigs': number_of_contigs,\n\t\t\t'mean_contig_length': mean_contig_length,\n\t\t\t'minimal_contig_length': minimal_contig_length,\n\t\t\t'maximal_contig_length': maximal_contig_length,\n\t\t\t'total_number_of_bases': total_number_of_bases,\n\t\t\t'number_of_bases_without_N': number_of_bases_without_N,\n\t\t\t'gc_content': float( number_of_gc ) /number_of_bases_without_N,\n\t\t\t'N25': N25,\n\t\t\t'N50': N50,\n\t\t\t'N75': N75,\n\t\t\t'N90': N90\n\t\t }\n\t\n\tprint \"calculation of formal assembly stats done.\"\t\n\treturn stats", "def get_gc_content(self):\n c = self.sequence.count('C')\n g = self.sequence.count('G')\n return round((c + g) / self.length, 4)", "def average_len(records):\n count = 0\n total_Length = 0\n for i in records:\n count = count + 1\n total_Length = total_Length + len(i.seq)\n average = total_Length/count\n return average", "def gunning_fog_index(mean_sent_length, num_3_syll_words):\n\n # compute formula\n return 0.4 * (mean_sent_length + num_3_syll_words)", "def gcContent(chromosome):\n \"\"\"Finds the percentage of Gs and Cs in a chromosome.\n\n :param str chromosome: string, containing a line from a chromosome\n :return: int gc_perc\n \"\"\"\n if chromosome != str(chromosome):\n raise TypeError(\"Chromosome needs to be a string.\")\n if len(chromosome) == 0:\n raise ValueError(\"Chromosome has no length.\")\n gcCount = 0\n chromosome = chromosome.upper()\n gcCount = chromosome.count(\"G\") + chromosome.count(\"C\")\n gc_perc = (gcCount/len(chromosome))*100\n #print gc_perc\n return gc_perc", "def log10_probability(sequence, gc_content):\n probabilities = {\n 'A': log10((1 - gc_content) / 2),\n 'C': log10(gc_content / 2),\n 'G': log10(gc_content / 2),\n 'T': log10((1 - gc_content) / 2),\n }\n return sum(probabilities[c] for c in sequence)", "def get_gc_content(self):\n data = [(f.seq.count(\"C\") + f.seq.count(\"G\")) / len(f.seq) * 100.0 for f in self if f.seq]\n return data", "def gc_map(seq, block_size, gc_thresh):\n\n new_seq = ''\n for i in range(len(seq) // block_size):\n if gc_blocks(seq, block_size)[i] < gc_thresh:\n new_seq += seq[i*block_size:i*block_size + block_size].lower()\n else:\n new_seq += seq[i*block_size:i*block_size + block_size].upper()\n\n return new_seq", "def chop_dna(dna):\n read_len = 150\n max_ovl = 50\n min_coverage = 5\n out = []\n\n dna_len = len(dna)\n base_id = dna.id\n starts = []\n start = 0\n read_n = math.floor((dna_len - max_ovl)/(read_len - max_ovl))\n if read_n > 1:\n ovl_len = (read_len * read_n - dna_len)/(read_n - 1)\n else:\n ovl_len = max_ovl\n\n cnt = 0\n for i in range(read_n):\n for ii in range(min_coverage):\n if i == read_n - 1:\n out_seq = dna[int(start) : ]\n else:\n out_seq = dna[int(start) : int(start + read_len)]\n\n out_seq.id = base_id + \"_\" + str(cnt)\n out_seq.letter_annotations[\"phred_quality\"] = [40] * len(out_seq)\n out.append(out_seq)\n cnt += 1\n\n start += (read_len - ovl_len)\n\n return out", "def unigramSegment(sequence, corpus):\n\n # Init Heap\n heap = []\n usequence = unicode(sequence, 'utf-8')\n nUnseen = corpus.getNUseen(usequence)\n\n # items = [(-corpus.uniProba(usequence[0:i + 1], nUnseen),\n # (usequence[0:i + 1], 0, -1))\n # for i in range(corpus.maxlen)]\n items = [(-corpus.uniProba(usequence[0:i + 1], nUnseen),\n (usequence[0:i + 1], 0, -1))\n for i in range(corpus.maxlen)]\n itemsUnique = removeDuplicate(items)\n map(lambda i: heapq.heappush(heap, i), itemsUnique)\n\n # Iteratively fill in chart\n N = len(usequence)\n chart = N * [()]\n while len(heap) > 0:\n\n # entry: (log-probability, (word, start_position, back-pointer))\n entry = heapq.heappop(heap)\n endindex = entry[1][1] + len(entry[1][0]) - 1\n preventry = chart[endindex]\n\n # Check if preventry exists\n if preventry is not () and preventry[0] < entry[0]:\n continue\n\n # Check previous entry\n chart[endindex] = preventry \\\n if preventry is not () and preventry[0] < entry[0] \\\n else entry\n\n # Add new entries\n newItems = []\n\n lenToCheck = corpus.maxlen \\\n if len(usequence) - corpus.maxlen > endindex \\\n else len(usequence) - endindex - 1\n\n for i in range(lenToCheck):\n low = endindex + 1\n high = endindex + 2 + i\n prevWord = entry[1][0]\n newProb = entry[0] \\\n - corpus.uniProba(usequence[low: high], nUnseen)\n newItems.append((newProb,\n (usequence[low: high], low, entry)))\n newItemsUnique = removeDuplicate(newItems)\n map(lambda i: heapq.heappush(heap, i), newItemsUnique)\n\n # Print output\n return \" \".join(unfoldEntries(chart[-1])), chart[-1][0]", "def gcd_seq(seq):\n return reduce(gcd, seq)", "def updateGC(path, db, motifChrom='chr17'):\n mcollection = db[\"hg19\"+motifChrom]\n mcollection.ensure_index(\"gc\",name=\"gc_content\",unique=False,background=True)\n print 'updating GC'\n cursor = mcollection.find()#{\"tf_name\": tfName,\"motif_genomic_regions_info.chr\": motifChrom,\n\t\t#\"motif_score\":{\"$lt\":1e-3}})\n try:\n\tchrnum = int(motifChrom[3:])\n\tfname = 'hg19_chrom%02d' % chrnum\n except:\n\tfname = 'hg19_chrom23'\n with open(os.path.join(path,fname)) as f:\n\tseq_record = SeqIO.read(f,\"fasta\")\n for test in cursor:\n\t #motifChrom = test[\"motif_genomic_regions_info\"][\"chr\"]\n\t motifStart = test[\"genomic_region\"][\"start\"]\n\t motifEnd = test[\"genomic_region\"][\"end\"]\n\t seq = seq_record.seq[motifStart-1:motifEnd]##(]\n\t size = float(motifEnd-motifStart+1)\n\n\t if not size == float(seq.count('C')+seq.count('G')+seq.count('A')+seq.count('T')):\n\t\tgc = (seq.count('C')+seq.count('G')+seq.count('c')+seq.count('g'))/size\n\t\tif size == seq.count('N'):\n\t\t print 'N repeat found', seq\n\t\t #mcollection.update({\"_id\":test[\"_id\"]},{\"$set\":{\"motif_mapability_info.gc_content\": ('NA','repeat')}}, upsert = True)\n\t\tif size == float(seq.count('c')+seq.count('g')+seq.count('a')+seq.count('t')):\n\t #print 'repeat found', gc, seq\n\t\t mcollection.update({\"_id\":test[\"_id\"]},{\"$set\":{\"gc\": (gc,'r')}}, upsert = True)\n\t\tif size > seq.count('N') and seq.count('N') > 0:\n\t\t print 'partial repeat found', seq\n\t\t #mcollection.update({\"_id\":test[\"_id\"]},{\"$set\":{\"motif_mapability_info.gc_content\": ('NA','partial repeat')}}, upsert = True)\n\t\telif seq.count('N') == 0:\n\t\t #print 'partial repeat found', gc, seq\n\t\t mcollection.update({\"_id\":test[\"_id\"]},{\"$set\":{\"gc\": (gc,'pr')}}, upsert = True)\n\t else:\n\t\tgc = (seq.count('C')+seq.count('G'))/size\n\t\t#print 'not repeats', gc, seq\n\t\tmcollection.update({\"_id\":test[\"_id\"]},{\"$set\":{\"gc\": (gc,'nr')}}, upsert = True)\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a DNA sequence and converts it into a list of floats based on the dinucleotide pairs where G = 0010, C = 0.5, T = 0.5, A = 1
def dinucleotide(sequence): frog = [] for i in range(0,(len(sequence)-1)): bp = sequence[i] bp_next = sequence[i+1] bp = bp.capitalize() bp_next = bp_next.capitalize() if bp == 'A': if bp_next == 'A': frog.append([-1,-1,-1,-1]) elif bp_next == 'C': frog.append([-1,-1,-1,1]) elif bp_next == 'G': frog.append([-1,-1,1,-1]) elif bp_next == 'T': frog.append([-1,-1,1,1]) elif bp == 'C': if bp_next == 'A': frog.append([-1,1,-1,-1]) elif bp_next == 'C': frog.append([-1,1,-1,1]) elif bp_next == 'G': frog.append([-1,1,1,-1]) elif bp_next == 'T': frog.append([-1,1,1,1]) elif bp == 'G': if bp_next == 'A': frog.append([1,-1,-1,-1]) elif bp_next == 'C': frog.append([1,-1,-1,1]) elif bp_next == 'G': frog.append([1,-1,1,-1]) elif bp_next == 'T': frog.append([1,-1,1,1]) elif bp == 'T': if bp_next == 'A': frog.append([1,1,-1,-1]) elif bp_next == 'C': frog.append([1,1,-1,1]) elif bp_next == 'G': frog.append([1,1,1,-1]) elif bp_next == 'T': frog.append([1,1,1,1]) frog = np.array(frog).flatten() return frog
[ "def GC(seq):\n\tA = seq.count('A')\n\tT = seq.count('T')\n\tC = seq.count('C')\n\tG = seq.count('G')\n\treturn float(C+G) / float(A+T+G+C)", "def _convert_to_floats(line, start_index=0):\n return [float(f) for f in line.strip().split(' ')[start_index:] if f != '']", "def encode_DNA(seq):\n\tseq2bin_dict = {'A':[0,0], 'C':[0,1], 'G':[1,0], 'T':[1,1]}\n\treturn np.array(sum([seq2bin_dict.get(nuc) for nuc in seq], []))", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def GA(seq):\n g_a = sum(seq.count(x) for x in [\"G\", \"A\", \"g\", \"a\", \"S\", \"s\"])\n try:\n return g_a * 100.0 / len(seq)\n except ZeroDivisionError:\n return 0.0", "def gha2lst(gha: float | np.ndarray) -> float | np.ndarray:\n lst = gha + const.galactic_centre_lst\n return lst % 24", "def genseq(DNA):\n DNA = DNA.upper()\n res1 = genes(DNA)\n DNA = DNA.translate(str.maketrans('ATGC', 'TACG'))[::-1]\n res2 = genes(DNA)\n return (res1+res2)", "def dna_to_proteins(dna_string):\n\n if os.path.isfile(dna_string):\n d = fasta_read(dna_string)[0][1]\n f_out = True\n else:\n d = dna_string = dna_string.upper()\n f_out = False\n\n d_frames = reading_frames(d)\n proteins = []\n\n for frame in d_frames:\n\n starts = [i for i in subs(frame, \"ATG\") if (i+3)%3 == 0]\n if len(starts) == 0:\n continue\n\n for start in starts:\n s = frame[start:]\n prot = ''\n stop_found = False\n for i in range(0, len(s), 3):\n if dna_codons[s[i:i+3]] == 'Stop':\n stop_found = True\n break\n prot += dna_codons[s[i:i+3]]\n if prot and stop_found:\n proteins += [prot]\n\n proteins = list(set(proteins))\n\n if f_out:\n with open('output_{}'.format(dna_string), 'w') as fout:\n for prot in proteins:\n fout.write(\"{}\\n\".format(prot))\n\n return proteins", "def a2f(self, a):\n if len(a)!=4:\n raise Exception(f\"not a 4-byte array: {a}\")\n a1 = np.asarray(a, dtype=np.int)-0x30\n s = f\"{a1[0]}{a1[1]}.{a1[2]}{a1[3]}\"\n return float(s)", "def f2a(self, f, maxV=60, minV=0):\n if f>maxV or f<minV:\n raise Exception(f\"data out of range: {f}\")\n f = 0.1*f\n a = []\n for i in range(4):\n v = int(f)\n a.append(v+0x30) # 0x30 is 0, 0x31 is 1, ...\n f -= v\n f *= 10\n\n return a", "def convert_genotype_data_to_numeral(genotypes):\n ret = []\n for snp in genotypes:\n if snp == 'A/A':\n ret.append(2)\n elif snp == 'A/B' or snp == 'B/A':\n ret.append(1)\n else:\n ret.append(0)\n\n return ret", "def pairwise_matrix(sequence):\n # Import libraries\n from numpy import zeros\n # Function\n sequence = sequence.replace('U','T')\n pairwise_sequence = []\n for i in range(len(sequence)):\n if i < len(sequence)-1:\n basepair = sequence[i]+sequence[i+1]\n pairwise_sequence.append(basepair)\n matrix = zeros([len(pairwise_sequence),16], dtype=int)\n for i,item in enumerate(pairwise_sequence):\n if item == 'AA':\n matrix[i,0] = 1\n if item == 'AT':\n matrix[i,1] = 1\n if item == 'AC':\n matrix[i,2] = 1\n if item == 'AG':\n matrix[i,3] = 1\n if item == 'TA':\n matrix[i,4] = 1\n if item == 'TT':\n matrix[i,5] = 1\n if item == 'TC':\n matrix[i,6] = 1\n if item == 'TG':\n matrix[i,7] = 1\n if item == 'CA':\n matrix[i,8] = 1\n if item == 'CT':\n matrix[i,9] = 1\n if item == 'CC':\n matrix[i,10] = 1\n if item == 'CG':\n matrix[i,11] = 1\n if item == 'GA':\n matrix[i,12] = 1\n if item == 'GT':\n matrix[i,13] = 1\n if item == 'GC':\n matrix[i,14] = 1\n if item == 'GG':\n matrix[i,15] = 1\n return matrix", "def _cfrac_convergents(S):\n \n n0,n1 = 0,1\n d0,d1 = 1,0\n \n for c in S:\n n0,n1 = n1,c*n1 + n0\n d0,d1 = d1,c*d1 + d0\n \n yield Fraction(n1,d1)", "def binarize_sequence(sequence):\n arr = np.zeros((4, 26))\n for i in range(26):\n if sequence[i] == 'A':\n arr[0, i] = 1\n elif sequence[i] == 'C':\n arr[1, i] = 1\n elif sequence[i] == 'G':\n arr[2, i] = 1\n elif sequence[i] == 'T':\n arr[3, i] = 1\n else:\n raise Exception('sequence contains characters other than A,G,C,T \\n%s' % sequence)\n\n return arr", "def table_to_genome(table):\r\n if type(table) == str:\r\n table = pd.read_csv(table, sep = \"\\t\", index_col=False)\r\n c1 = table[\"A1\"]\r\n c2 = table[\"A2\"]\r\n G = [c1[i] + c2[i] for i in range(len(c1))]\r\n return G", "def genes(DNA):\n import re\n res = []\n f = True\n while f:\n gene = re.findall('(ATG(...)+?(TAG|TAA|TGA))', DNA)\n if gene and len(gene[0][0]) >= 15: # and not (len(gene[0][0]) % 3) это уже выполнено у re\n res.append(gene[0][0])\n n = DNA.find('ATG')\n DNA = DNA[n + 3:] # обрезаем первый старт-кодон, чтобы найти более короткий ген, если есть старт-кодон внутри предыдущего гена\n if DNA.find('ATG') == -1:\n f = False\n return res", "def StrToSeq(string):\n if isinstance(string, np.ndarray):\n return string\n arr = np.empty(len(string)).astype(int)\n for i in range(len(string)):\n if ( string[i] == 'A' or string[i] == 'a' ):\n arr[i] = 0\n elif ( string[i] == 'T' or string[i] == 't' ):\n arr[i] = 1\n elif ( string[i] == 'C' or string[i] == 'c' ):\n arr[i] = 2\n elif ( string[i] == 'G' or string[i] == 'g' ):\n arr[i] = 3\n else:\n arr[i] = 0 # For now, undefined nucleotides replaced with A. Beware bias.\n return arr", "def calibrate(gcode, ratio, var=None):\n\tngcode = []\n\tfor line in gcode:\n\t\tl = []\n\t\tfor c in line.split():\n\t\t\tif c.find('X') is 0:\n\t\t\t\taux = c.replace(\"X\",\" \")\n\t\t\t\taux = float(aux)*ratio\n\t\t\t\tc = \"{0:.4}\".format(aux)\n\t\t\tif c.find('Y') is 0:\n\t\t\t\taux = c.replace(\"Y\",\" \")\n\t\t\t\taux = float(aux)*ratio\n\t\t\t\tc = \"{0:.4}\".format(aux)\n\t\t\ttry:\n\t\t\t\tl.append(c)\n\t\t\t\tprint (l)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tngcode.append(l)\n\n\treturn ngcode", "def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The most basic lineal hash is SHA(SHA(name) + version)
def test_basic(self): a = linealHash('name', 'version') expected = sha1(sha1('name').hexdigest() + 'version').hexdigest() self.assertEqual(a, expected)
[ "def getHash(name):\n return hashlib.md5(name).hexdigest()", "def hash(bytes):\n return unpack(sha256(bytes).digest())", "def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1, sample_hash2])\n expected = sha1(linealHash('name', 'version') + sample_hash1 \\\n + sample_hash2).hexdigest()\n self.assertEqual(a, expected, \"With inputs, expected lineal hash to be\"\n \" H(linealHash + input1hash + input2hash)\")", "def hash(*args) -> \"uint32_t\":\n return _coin.SbString_hash(*args)", "def SbString_hash(*args) -> \"uint32_t\":\n return _coin.SbString_hash(*args)", "def deterministic_hash(thing, length=10):\n digest = sha1(json.dumps(hashablize(thing)).encode('ascii')).digest()\n return b32encode(digest)[:length].decode('ascii').lower()", "def hash_sha(a):\n return int.from_bytes(hlib.sha3_512(str(a).encode()).digest(), 'big')", "def stable_hash(self, source, digits=9):\r\n\r\n return int(sha1(source.encode()).hexdigest(), 16) % (10 ** digits)", "def _username_hash(username):\n return hashlib.sha256(username.encode('utf-8')).digest()", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def _hash(self: Syscall) -> int:\n return hash(self.name)", "def test_00():\n hs1 = hashlib.sha256()\n hs2 = hashlib.sha256()\n\n # 해쉬는 바이너리로 진행해야 한다\n hs1.update(b\"Nobody inspects\")\n hs2.update(b\"the spammish repetition\")\n\n # 결과는 바이너리로 출력된다\n print(hs1.digest())\n print(hs2.digest(), \"\\n\\n\")\n\n \"\"\"바이너리 스트링 길이 체크 (테스트)\"\"\"\n ss1 = str(hs1.digest()).split(\"\\\\\")\n ss2 = str(hs2.digest()).split(\"\\\\\")\n\n # 리스트 스트링의 갯수 체크\n print(ss1)\n print(ss2)\n\n print(len(ss1))\n print(len(ss2), \"\\n\\n\")\n\n # 바이너리를 핵사로 변경하여 출력 ... 당연히 길이는 동일함!\n print(\"hs1=\", hs1.hexdigest())\n print(\"hs1.digest_siz=\", hs1.digest_size)\n print(\"hs2.digest_siz=\", hs2.digest_size, \"\\n\\n\")\n\n print(\"hs2=\", hs2.hexdigest())\n print(\"hs1.block_size=\", hs1.block_size)\n # hash comparison\n print(\"hs2.block_size=\", hs2.block_size)", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def _make_hash(self, sid, secret):\n return hmac.new(secret, sid, sha).hexdigest()[:8]", "def deterministic_hash(thing, length=10):\n hashable = hashablize(thing)\n jsonned = json.dumps(hashable, cls=NumpyJSONEncoder)\n # disable bandit\n digest = sha1(jsonned.encode('ascii')).digest()\n return b32encode(digest)[:length].decode('ascii').lower()", "def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)", "def hashhex(s):\r\n h = hashlib.sha1()\r\n h.update(s.encode('utf-8'))\r\n return h.hexdigest()", "def short_hash(self, length=8):\n return self.hash[:length]", "def get_hash(self):\r\n path = self.files[self.idx_image]\r\n filename = path.split(\"/\")[-1]\r\n with open(path,\"rb\") as f:\r\n hash_object = hashlib.sha512(f.read())\r\n hex_dig = hash_object.hexdigest()\r\n hash = filename + \" \"+ hex_dig\r\n return hash" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a data point has inputs, the lineal hash of the data point is SHA(SHA(SHA(name) + version) + input1_lineal_hash + input2_lineal_hash)
def test_args(self): sample_hash1 = sha1('foo').hexdigest() sample_hash2 = sha1('bar').hexdigest() a = linealHash('name', 'version', [sample_hash1, sample_hash2]) expected = sha1(linealHash('name', 'version') + sample_hash1 \ + sample_hash2).hexdigest() self.assertEqual(a, expected, "With inputs, expected lineal hash to be" " H(linealHash + input1hash + input2hash)")
[ "def hash_input(self, input_data: Union[bytes, str]) -> bytes:\n return cast(bytes, self.hash_method(self.bytes_from_input(input_data)).digest()) # We know this is always a hashlib hash that returns bytes", "def __get_hash(self, user_input, number_of_lines):\r\n hash_number = 0\r\n for i in range(number_of_lines):\r\n counter = 0\r\n for char in user_input[i]:\r\n hash_number += int(ord(char)) - 65 + counter + i\r\n counter += 1\r\n return hash_number", "def _hash_it(self, _data):\n m = hashlib.sha256(_data)\n if m.hexdigest() not in self.hashed_graphics:\n self.hashed_graphics[m.hexdigest()] = _data\n length = int(len(_data)/1024 + 0.5)\n return m.hexdigest(), \"b'{}Kb'\".format(length)", "def hashGeneretor(inputString):\n\treturn hashlib.sha256(inputString.encode('utf-8')).hexdigest()", "def test_basic(self):\n a = linealHash('name', 'version')\n expected = sha1(sha1('name').hexdigest() + 'version').hexdigest()\n self.assertEqual(a, expected)", "def hash_data(data):\n return hashlib.md5(data).hexdigest()", "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def _set_hash_sha1(self, data = None):\n data = self.data\n if not data:\n return None\n if isinstance(data, str):\n data = data.encode() # \n hash_object = hashlib.sha1(data)\n return hash_object.hexdigest()", "def _hash(input, hash_type):\n # the SHA module needs plain byte strings to hash --\n # it chokes on unicode objects with non-ASCII characters\n if isinstance(input, unicode):\n input = input.encode('utf-8')\n\n if hash_type == 'SHA-1':\n s = hashlib.sha1()\n elif hash_type == 'SHA-512':\n s = hashlib.sha512()\n else:\n raise exceptions.InvalidUsageException('The requested hash type %s is not supported. Please choose from SHA-1 or SHA-512.'%hash_type)\n\n s.update(input)\n return s.hexdigest()", "def obtain_SHA1(input_string):\n\n # Calculate the SHA-1 hash of both input sequences\n return str(hashlib.sha1(input_string).hexdigest())", "def _hash_it(self, artworkdata):\n #so open artwork read in as bytes\n m = hashlib.sha256(artworkdata)\n length = \"b'{}Kb'\".format(int(len(artworkdata)/1024 + 0.5))\n #so if the hash not a key in hashed_graphics, add it\n if m.hexdigest() not in self.hashed_graphics:\n self.hashed_graphics[m.hexdigest()] = artworkdata\n return m.hexdigest(), length", "def crypto_hash(*args):\n \n # convert every arg into a string\n stringfiedargs = sorted(map(lambda data: json.dumps(data), args))\n\n #generate a single string with all args\n joined_data = ''.join(stringfiedargs)\n\n return hashlib.sha256(joined_data.encode('utf-8')).hexdigest()", "def get_hash(self, descriptor):", "def __calc_model_hash(self, type, training_data) -> str:\n texts = ''.join([text for text in training_data.texts])\n labels = ''.join([label for label in training_data.labels])\n model_str = type.value + texts + labels\n return sha1((model_str).encode('utf-8')).hexdigest()", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def doubleHashHex (data):\n\n hasher = hashlib.sha256 ()\n hasher.update (binascii.unhexlify (data))\n data = hasher.digest ()\n\n hasher = hashlib.sha256 ()\n hasher.update (data)\n\n return reverseHex (hasher.hexdigest ())", "def hash(polygon):\n crc = zlib.adler32(polygon.wkb)\n return crc", "def _make_input_signature_hashable(elem):\n try:\n hash(elem)\n except TypeError:\n # TODO(slebedev): consider using nest.\n if isinstance(elem, tuple):\n return tuple(map(_make_input_signature_hashable, elem))\n\n # TFE_Py_EncodeArg weakrefs arguments it does not recognize, and we expect\n # all recognized types to be hashable.\n assert isinstance(elem, weakref.ReferenceType)\n v = elem()\n\n if resource_variable_ops.is_resource_variable(v):\n # We special case variables here to use unique_id as the cache key. This\n # ensures we have to retrace whenever a different variable is passed in.\n # This is needed to support cases where the user may use the id of a\n # variable in the function perhaps as a lookup in a dictionary.\n #\n # This choice leads to more retracing when we could have possibly used the\n # shape and dtype instead. However, we expect the number of variables in a\n # program to be bounded, and correspondingly the number of retraces.\n #\n # Note we also include the class name to avoid collisions with strings.\n return v.__class__, v._unique_id # pylint: disable=protected-access\n\n if _is_ndarray(v):\n # Numpy arrays are not hashable, but when calling functions we treat them\n # in the same way as tf.Tensors.\n if not hasattr(v, \"shape\") or not hasattr(v, \"dtype\"):\n # TODO(tomhennigan) De-dup with _as_ndarray in _convert_numpy_inputs.\n v = _as_ndarray(v)\n return tensor_spec.TensorSpec(v.shape, v.dtype)\n\n raise ValueError(\"Arguments to a tf.function must be a nested structure of \"\n \"Tensors, Variables, NumPy arrays, or hashable Python \"\n f\"objects, got {type(v)}.\")\n\n return elem" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can make it into a WorkInput
def test_IWorkInput(self): d = Data('joe', 'a', '1', 'xxxx', 'value') i = IWorkInput(d) self.assertEqual(i.name, 'a') self.assertEqual(i.version, '1') self.assertEqual(i.lineage, 'xxxx') self.assertEqual(i.value, 'value') self.assertEqual(i.hash, sha1('value').hexdigest())
[ "def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'val', 'hash'),\n WorkInput('c', '1', 'xxxx', 'val', 'hash'),\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ), \"Should convert all arguments to a WorkInput\")", "def to_action_input(self, action_batch):\n\t\traise NotImplementedError", "def _construct_input_spec(self):", "def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))", "def test_IResultInput(self):\n i = WorkInput('a', '1', 'xxxx', 'val', 'hash')\n r = IResultInput(i)\n self.assertEqual(r.name, 'a')\n self.assertEqual(r.version, '1')\n self.assertEqual(r.lineage, 'xxxx')\n self.assertEqual(r.hash, 'hash')", "def _input_data(self, job):\n\n cmd = \"{}/input.sh\".format(\n job.code_dir\n )\n print(\"Will run data submission: \" + cmd)\n try:\n call([\"bash\", cmd])\n except Exception:\n print(\"Failed data input\")", "def get_work():\n\n if work_buffered:\n work = work_buffered.pop(0)\n else:\n line = input.readline()\n if line:\n work = line[:-1]\n else:\n work = None\n\n if work:\n work_pending.append(work)\n elif work_pending:\n work = work_pending.pop(0)\n work_pending.append(work)\n return work", "def handleWork(self, work):\n raise NotImplementedError()", "def workRequirement(world, action):", "def createSubflowWorkItem(process, activity, subflow, execution):", "def Work(self, h):\n\t\treturn _hi.hi_Person_Work(self.handle, h)", "def __init__(self):\n super().__init__()\n self.__work_last_step = {}", "def createWorkItem(participant, process, activity, application):", "def responder_work_ref(self) -> UniqueWorkRef:\n pass", "def getInput(self, *args):\n return _coin.SoFieldConverter_getInput(self, *args)", "def workflow_inputs_command():\n return Command().command(_workflow_inputs).require_migration().with_database(write=False)", "def sender_work_ref(self) -> UniqueWorkRef:\n pass", "def get_input(self):\n run_input = self.describe().get('input', None)\n if run_input is None:\n return run_input\n return json.loads(run_input)", "def input_fn(self, mode):\n task_name = self.config['data']['task'][\"name\"]\n self._task = registers.task[task_name](self.config, mode)\n return self._task" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert an IWorkInput to an IResultInput
def test_IResultInput(self): i = WorkInput('a', '1', 'xxxx', 'val', 'hash') r = IResultInput(i) self.assertEqual(r.name, 'a') self.assertEqual(r.version, '1') self.assertEqual(r.lineage, 'xxxx') self.assertEqual(r.hash, 'hash')
[ "def test_toResult(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResult('the result')\n self.assertEqual(r, Result('bob', 'a', '1', 'xxxx', 'the result', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'val', 'hash'),\n WorkInput('c', '1', 'xxxx', 'val', 'hash'),\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ), \"Should convert all arguments to a WorkInput\")", "def test_IWorkInput(self):\n d = Data('joe', 'a', '1', 'xxxx', 'value')\n i = IWorkInput(d)\n self.assertEqual(i.name, 'a')\n self.assertEqual(i.version, '1')\n self.assertEqual(i.lineage, 'xxxx')\n self.assertEqual(i.value, 'value')\n self.assertEqual(i.hash, sha1('value').hexdigest())", "def test_inputs(self):\n r = Result('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def _assign_inputs_and_outputs(self, execution, execution_data, interface):\n with self.remote_context() as ctx:\n execution._inputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_input_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.inputs),\n )\n if execution.is_complete and not execution.error:\n execution._outputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_output_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.outputs),\n )\n return execution", "def get_input(prompt: str, convert: Callable[[str], T]) -> T:\n return convert(input(prompt))", "def get_work():\n\n if work_buffered:\n work = work_buffered.pop(0)\n else:\n line = input.readline()\n if line:\n work = line[:-1]\n else:\n work = None\n\n if work:\n work_pending.append(work)\n elif work_pending:\n work = work_pending.pop(0)\n work_pending.append(work)\n return work", "def to_action_input(self, action_batch):\n\t\traise NotImplementedError", "def test_toResultError(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResultError('the err')\n self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def handleWork(self, work):\n raise NotImplementedError()", "def compute_output(\n self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'\n ) -> torch.Tensor:\n if isinstance(input, (list, tuple)):\n out = adapter(*input)\n elif isinstance(input, dict):\n out = adapter(**input)\n else:\n out = adapter(input)\n return out", "def preproc_output(self, input: I, output: O) -> PO:\n raise Exception(\"Not implemented\")", "def get_input(self):\n run_input = self.describe().get('input', None)\n if run_input is None:\n return run_input\n return json.loads(run_input)", "def get_work_order_result(self, work_order_id):\n pass", "def getInput(self, *args):\n return _coin.SoFieldConverter_getInput(self, *args)", "def result_in(self, result_in):\n\n self._result_in = result_in", "def convert_to_workflow(command_line_tool, location=None):\n\n if command_line_tool[\"class\"] == \"Workflow\":\n workflow_tool = command_line_tool\n else:\n workflow_tool = {\n \"class\": \"Workflow\",\n \"cwlVersion\": command_line_tool[\"cwlVersion\"],\n \"inputs\": [],\n \"outputs\": []\n }\n\n for key in [\"requirements\"]:\n if key in command_line_tool:\n workflow_tool[key] = command_line_tool[key]\n\n for input_id, input_data in get_items(command_line_tool[\"inputs\"]):\n workflow_input = {\n \"id\": input_id,\n \"type\": remove_field_from_dict(input_data[\"type\"], \"inputBinding\") # \"type\" in WorkflowInputParameter cannot have \"inputBinding\"\n }\n for key in [\"secondaryFiles\", \"default\"]: # TODO: Do I need to copy format?\n if key in input_data:\n workflow_input[key] = input_data[key]\n workflow_tool[\"inputs\"].append(workflow_input)\n\n for output_id, output_data in get_items(command_line_tool[\"outputs\"]):\n workflow_output = {\n \"id\": output_id,\n \"type\": output_data[\"type\"],\n \"outputSource\": get_rootname(command_line_tool[\"id\"]) + \"/\" + output_id\n }\n # TODO: not sure if I need format here\n # for key in [\"format\"]:\n # if key in output_data:\n # workflow_output[key] = output_data[key]\n workflow_tool[\"outputs\"].append(workflow_output)\n\n workflow_tool[\"steps\"] = [\n {\n \"id\": get_rootname(command_line_tool[\"id\"]),\n \"run\": command_line_tool,\n \"in\": [\n {\n \"id\": input_id, \"source\": input_id\n } for input_id, _ in get_items(workflow_tool[\"inputs\"])\n ],\n \"out\": [\n output_id for output_id, _ in get_items(workflow_tool[\"outputs\"])\n ]\n }\n ]\n\n if location is not None:\n dump_json(workflow_tool, location)\n\n return workflow_tool", "def _work(self, **kwargs):\n \n results = self.work(**kwargs)\n self._complete(results)\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/WorkInputs should work for the inputs argument.
def test_inputs(self): w = Work('bob', 'a', '1', 'xxxx', [ ('b', '1', 'xxxx', 'val', 'hash'), ['c', '1', 'xxxx', 'val', 'hash'], WorkInput('d', '1', 'xxxx', 'val', 'hash'), ]) self.assertEqual(w.inputs, ( WorkInput('b', '1', 'xxxx', 'val', 'hash'), WorkInput('c', '1', 'xxxx', 'val', 'hash'), WorkInput('d', '1', 'xxxx', 'val', 'hash'), ), "Should convert all arguments to a WorkInput")
[ "def _populate_inputs(self):\n\n self.inputs = Bunch(outfile=None,\n infile=None)", "def __init__(self, inputs=None, outputs=None, data = None):\n if inputs == None:\n self.inputs = []\n else:\n self.inputs = inputs\n\n if outputs == None:\n self.outputs = []\n else:\n self.outputs = outputs\n\n # fill in the rest of the method\n\n self.data = data # to store arbitrary data ", "def _prepare(self, inputs):\n assert isinstance(inputs, (tuple, list))\n # Convert variable into VarBase and feed in training data.\n input_vars = []\n for i, value in enumerate(inputs):\n if isinstance(value, np.ndarray):\n var = core.VarBase(\n value=value,\n name=self.inputs[i].desc.name(),\n persistable=False,\n place=framework._current_expected_place(),\n zero_copy=True)\n elif isinstance(value, core.VarBase):\n var = value\n var.name = self.inputs[i].desc.name()\n else:\n continue\n input_vars.append(var)\n # Create VarBase to receive output data.\n out_vars = []\n for var in self.outputs:\n if not isinstance(var, framework.Variable):\n continue\n var_desc = var.desc\n var_base = core.VarBase(var_desc.dtype(),\n var_desc.shape(),\n var_desc.name(), var_desc.type(), False)\n out_vars.append(var_base)\n\n # Hold forward variables\n tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],\n \"program_out_scope\",\n core.VarDesc.VarType.STEP_SCOPES, True)\n\n tmp_scope_vec.value().set_scope(self._inner_scope)\n\n return input_vars, out_vars, tmp_scope_vec", "def _construct_input_spec(self):", "def test_inputs(self):\n r = Result('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings", "def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))", "def mapInputs2Intermediates():\n input_decs = []\n signal_decs = []\n signal_resets = []\n variable_decs = []\n variable_initializations = []\n variable_assignments = []\n intermediate_assignments = []\n signal_assignments = []\n\n one_assignment_batch = []\n input_list = list(range(NUM_INPUTS))\n intermediate_num = 0\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_num = 0\n while len(input_list) > 0:\n if (signal_num % INPUTS_PER_INTERMEDIATE == 0) and (signal_num > 0):\n intermediate_num += 1\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_resets.append(resetOneIntermediate(intermediate_num))\n n = input_list[0]\n input_decs.append(declareOneInput(n))\n one_assignment_batch.extend(makeOneIfStatement(n, intermediate_num))\n del(input_list[0])\n signal_num += 1\n intermediate_assignments.append(one_assignment_batch)\n\n return input_decs, signal_decs, signal_resets, variable_decs, variable_initializations, intermediate_assignments, signal_assignments", "def prepare_inputs(*inputs, **kwinputs):\n alpha, delta, tau, N = inputs\n meta = kwinputs['meta']\n\n # Organize the parameters to an array. The broadcasting works nicely with constant\n # arguments.\n param_array = np.row_stack(np.broadcast(alpha, delta, tau, N))\n\n # Prepare a unique filename for parallel settings\n filename = '{model_name}_{batch_index}_{submission_index}.txt'.format(**meta)\n np.savetxt(filename, param_array, fmt='%.4f %.4f %.4f %d')\n\n # Add the filenames to kwinputs\n kwinputs['filename'] = filename\n kwinputs['output_filename'] = filename[:-4] + '_out.txt'\n\n # Return new inputs that the command will receive\n return inputs, kwinputs", "def _instantiate_input_states(self, context=None):\n num_values = len(self.monitored_values)\n values = [None] * num_values\n names = self.names or [None] * num_values\n\n # If default_input_value arg (assigned to variable in __init__) was used to specify the size of inputStates,\n # pass those values for use in instantiating inputStates\n if self.variable is not None:\n input_state_sizes = self.variable\n else:\n input_state_sizes = values\n for i, monitored_value, name in zip(range(num_values), self.monitored_values, names):\n values[i] = self._instantiate_input_state_for_monitored_value(input_state_sizes[i],\n monitored_value,\n name,\n context=context)\n\n # If self.variable was not specified, construct from values of inputStates\n if self.variable is None:\n # If all items of self.variable are numeric and of the same length, convert to ndarray\n dim_axis_0 = len(values)\n dim_axis_1 = len(values[0])\n if all((is_numeric(values[i]) and len(values[i])==dim_axis_1) for i in range(dim_axis_0)):\n self.variable = np.zeros((dim_axis_0,dim_axis_1), dtype=float)\n # Otherwise, just use list of values returned from instantiation above\n else:\n self.variable = values.copy()\n\n self.variableClassDefault = self.variable.copy()\n self.inputValue = list(self.variable)", "def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def fill_in_new_state( self, trans, inputs, state, context=None ):\n context = ExpressionContext( state, context )\n for input in inputs.itervalues():\n state[ input.name ] = input.get_initial_value( trans, context )", "def _parse_list(self, inputs):\n # Lists can only be used as inputs in the case where there is a single input node.\n # Validate that this is true. If so, resolve the list into a dict and parse it.\n input_nodes = self.get_nodes_by_role(NodeRole.INPUT)\n if len(input_nodes) == 1:\n _inputs = {next(iter(input_nodes)): inputs}\n else:\n raise CompositionError(\n f\"Inputs to {self.name} must be specified in a dictionary with a key for each of its \"\n f\"{len(input_nodes)} INPUT nodes ({[n.name for n in input_nodes]}).\")\n input_dict, num_inputs_sets = self._parse_dict(_inputs)\n return input_dict, num_inputs_sets", "def pack_args(self, *inputs, **args):\r\n\t\tmodule_input = []\r\n\r\n\t\targ_map = {}\r\n\t\tmodule_args = {}\r\n\t\tmodule_args['arg_map'] = arg_map\r\n\t\tmodule_args['loss_fn'] = self.loss_fn\r\n\t\tmodule_args['transpose'] = True\r\n\t\tinputs_processed = 0\r\n\t\tnum_args = 0\r\n\r\n\t\t# if text is not in args it should be the first object in inputs\r\n\t\tif 'text' in args and args['text'] is not None:\r\n\t\t\tmodule_input.append(args['text'])\r\n\t\telse:\r\n\t\t\tmodule_input.append(inputs[inputs_processed])\r\n\t\t\tinputs_processed += 1\r\n\t\targ_map['x'] = num_args\r\n\t\tnum_args += 1\r\n\r\n\t\t# pass module initial state to module_inputs\r\n\t\tmodule_input.append(self.init_state)\r\n\t\targ_map['hidden_init'] = num_args\r\n\t\tnum_args += 1\r\n\r\n\t\t# get input sequence length\r\n\t\tif 'timesteps' in args and args['timesteps'] is not None:\r\n\t\t\tts = args['timesteps']\r\n\t\telse:\r\n\t\t\tts = inputs[inputs_processed]\r\n\t\t\tinputs_processed += 1\r\n\r\n\t\t# if sequence length is not a tensor move it to module_args so that it doesn't have scatter problems from DataParallel\r\n\t\tif isinstance(ts, int) or type(ts[0]).__module__ == 'numpy':\r\n\t\t\tmodule_args['timesteps'] = ts\r\n\t\telse:\r\n\t\t\tmodule_input.append(ts)\r\n\t\t\targ_map['timesteps'] = num_args\r\n\t\t\tnum_args += 1\r\n\r\n\t\t# pack state persistence mask\r\n\t\tif 'state_mask' in args and args['state_mask'] is not None:\r\n\t\t\tmodule_input.append(args['state_mask'])\r\n\t\t\targ_map['state_mask'] = num_args\r\n\t\t\tnum_args += 1\r\n\r\n\t\t# pack return sequence boolean\r\n\t\tif 'return_sequence' in args:\r\n\t\t\treturn_sequence = args['return_sequence']\r\n\t\telse:\r\n\t\t\treturn_sequence = False\r\n\t\tmodule_args['return_sequence'] = return_sequence\r\n\t\treturn module_input, module_args", "def initialize(self, runInfo, inputs, initDict):\n self._initializeLSpp(runInfo, inputs, initDict)\n self._initializeLSppROM(self.inputs[self.indexes])", "def input_data(self, inputs):\n for i, x in enumerate(inputs):\n self.activations[0][i] = x", "def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]", "def fill_in_defaults(\n inputs: List[CWLObjectType],\n job: CWLObjectType,\n fsaccess: StdFsAccess,\n) -> None:\n debug = _logger.isEnabledFor(logging.DEBUG)\n for e, inp in enumerate(inputs):\n with SourceLine(inputs, e, WorkflowException, debug):\n fieldname = shortname(cast(str, inp[\"id\"]))\n if job.get(fieldname) is not None:\n pass\n elif job.get(fieldname) is None and \"default\" in inp:\n job[fieldname] = copy.deepcopy(inp[\"default\"])\n elif job.get(fieldname) is None and \"null\" in aslist(inp[\"type\"]):\n job[fieldname] = None\n else:\n raise WorkflowException(\n \"Missing required input parameter '%s'\" % shortname(cast(str, inp[\"id\"]))\n )", "def _assign_inputs_and_outputs(self, execution, execution_data, interface):\n with self.remote_context() as ctx:\n execution._inputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_input_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.inputs),\n )\n if execution.is_complete and not execution.error:\n execution._outputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_output_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.outputs),\n )\n return execution" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert to a Result
def test_toResult(self): w = Work('bob', 'a', '1', 'xxxx', [ ('a', '1', 'xxxx', 'val', 'hash'), ]) r = w.toResult('the result') self.assertEqual(r, Result('bob', 'a', '1', 'xxxx', 'the result', [ ('a', '1', 'xxxx', 'hash'), ]))
[ "def test_toResultError(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResultError('the err')\n self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def _get_result_constructor(self):\n return ResultObject", "def create_result(self):\n raise NotImplementedError(\"Abstract Method:create_result.\")", "def _unpack_result(result):\n if not result:\n return None\n elif len(result) == 1:\n return result[0]\n return result", "def from_result(cls, container, result):\n if result is None:\n raise errors.NoObjectException\n\n elif cls.is_prefix(result):\n return cls.from_prefix(container, result)\n\n elif cls.is_key(result):\n return cls.from_key(container, result)\n\n raise errors.CloudException(\"Unknown boto result type: %s\" %\n type(result))", "def _gotResult(self, result):\n return {\n \"jsonrpc\": \"2.0\",\n \"result\": result,\n \"id\": 1\n }", "def format_result(result):\n if not isinstance(result, tuple):\n return ActionResult(result, None)\n else:\n return ActionResult(*result)", "def read_query_result(self, *args, **kwargs): # real signature unknown\n pass", "def transform_result(task_ex, result):\n if result.is_error():\n return result\n\n action_spec_name = spec_parser.get_task_spec(\n task_ex.spec).get_action_name()\n\n if action_spec_name:\n wf_ex = task_ex.workflow_execution\n wf_spec_name = spec_parser.get_workflow_spec(wf_ex.spec).get_name()\n\n return transform_action_result(\n wf_ex.workflow_name,\n wf_spec_name,\n action_spec_name,\n result\n )\n\n return result", "def test_send_result(self):\n pass", "def to_objects(self, results):\n return results", "def convert(self):\n self.result = self.source\n return True", "def test_typeToMethodResult(self):\n expected = object()\n resolver = ResolverBase()\n resolver.typeToMethod = {54321: lambda query, timeout: expected}\n query = Query(name=b\"example.com\", type=54321)\n queryDeferred = resolver.query(query, 123)\n result = []\n queryDeferred.addBoth(result.append)\n self.assertEqual(expected, result[0])", "def read_single_result():\n # TODO: your code here\n # example return values\n return \"some_table1\", \"some_table2\", \"p1\", \"p2\", \"runtime\"", "def resultAsInt(self, result):\n self.log.debug('result: {result:}', result = result)\n\n try:\n match = self.result_re.match(result)\n if match:\n return int(match.group(1))\n\n raise AGICommandFailure(FAILURE_CODE, result)\n\n except ValueError as err:\n raise AGICommandFailure(FAILURE_CODE, result)", "def GetVectorResult(self):\n ...", "def test_index_result_serialization(self):\n\n # Construct a json representation of a IndexResult model\n index_result_model_json = {}\n index_result_model_json['id'] = 'testString'\n index_result_model_json['name'] = 'testString'\n index_result_model_json['result'] = 'created'\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model = IndexResult.from_dict(index_result_model_json)\n assert index_result_model != False\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model_dict = IndexResult.from_dict(index_result_model_json).__dict__\n index_result_model2 = IndexResult(**index_result_model_dict)\n\n # Verify the model instances are equivalent\n assert index_result_model == index_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n index_result_model_json2 = index_result_model.to_dict()\n assert index_result_model_json2 == index_result_model_json", "def get_result(self):\n return {\n \"in\": self.input,\n \"out\": self.expected,\n \"actual\": self.actual,\n \"status\": self.status\n }", "def _make_result(self):\n\n return _TestResult(\n self.stream, self.descriptions, self.verbosity, self.elapsed_times\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can convert to a ResultError
def test_toResultError(self): w = Work('bob', 'a', '1', 'xxxx', [ ('a', '1', 'xxxx', 'val', 'hash'), ]) r = w.toResultError('the err') self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [ ('a', '1', 'xxxx', 'hash'), ]))
[ "def check_result(res, msg=None):\n if not res.status:\n return\n\n # If there was an error, it should be the last operation.\n if res.resarray:\n resop = res.resarray[-1].resop\n else:\n resop = None\n raise BadCompoundRes(resop, res.status, msg)", "def get_error_code(result):\r\n\t\tif result is None:\r\n\t\t\treturn 999\r\n\t\telif result.has_key('error'):\r\n\t\t\treturn result['error']\r\n\t\telif result.has_key('result'):\r\n\t\t\treturn result['result']\r\n\t\telse:\r\n\t\t\treturn 0", "def __call__(self, *args, **kwargs):\n return self.error(*args, **kwargs)", "def _convert_exception(test, exception, errors):\n if isinstance(exception, TimeoutError):\n return (test.result_cls(\"EXTERNAL-TIMEOUT\", errors), [])\n if isinstance(exception, CrashError):\n return (test.result_cls(\"CRASH\", errors), [])\n if isinstance(exception, LeakError):\n # TODO: the internal error is to force a restart, but it doesn't correctly\n # describe what the issue is. Need to find a way to return a \"FAIL\",\n # and restart the content_shell after the test run.\n return (test.result_cls(\"INTERNAL-ERROR\", errors), [])\n raise exception", "def test_unexpected_error_result(self):\n process_result = process_response(self.resp_unexpected_error)\n self.assertEqual(process_result[\"result\"], -1)", "def WithResult(message, result):\n\t\tmessage += \" (result was %s)\" % hexify(result)\n\t\treturn FatalError(message)", "def test_rescue_success():\n def factory(inner_value: int) -> Result[int, Any]:\n return Success(inner_value * 2)\n\n bound = Success(5).rescue(factory)\n\n assert bound == Success(5)\n assert str(bound) == '<Success: 5>'", "def from_exception(e: Exception) -> \"CheckerResult\":\n\n if isinstance(e, EnoException):\n message = str(e)\n return CheckerResult(result=e.result, message=message)\n\n else:\n return CheckerResult(CheckerTaskResult.INTERNAL_ERROR, message=None)", "def fail(error = N_(u'An error occured')):\n def fail_converter(value, state = None):\n if state is None:\n state = states.default_state\n return value, state._(error) if strings.is_basestring(error) else error\n return fail_converter", "def from_result(cls, container, result):\n if result is None:\n raise errors.NoObjectException\n\n elif cls.is_prefix(result):\n return cls.from_prefix(container, result)\n\n elif cls.is_key(result):\n return cls.from_key(container, result)\n\n raise errors.CloudException(\"Unknown boto result type: %s\" %\n type(result))", "def check_result_type(self, result_type):\n if result_type not in self.valid_result_types:\n raise ValueError(\n \"Result type {} not understood, accepted result types are: \\\n {}\".format(result_type, self.valid_result_types))", "def get_error_message(result):\r\n\t\tif result is None:\r\n\t\t\treturn 'Invalid result (connection error)'\r\n\t\telif result.has_key('error') and result['error'] > 0:\r\n\t\t\tif result.has_key('message'):\r\n\t\t\t\treturn result['message']\r\n\t\t\telse:\r\n\t\t\t\treturn BtSyncApi.get_error_text(result['error'])\r\n\t\telif result.has_key('result') and result['result'] > 0:\r\n\t\t\tif result.has_key('message'):\r\n\t\t\t\treturn result['message']\r\n\t\t\telse:\r\n\t\t\t\treturn BtSyncApi.get_error_text(result['result'])\r\n\t\telse:\r\n\t\t\treturn 'No error'", "def test_TestResults_init():\n with pytest.raises(ValueError):\n res1 = test_cases.TestResults([1])", "def handle_failure(query):\n return \"Sorry, we're having trouble finding {query}. Can you be more specific?\".format(query=query)", "def invalid_result(self, obj, fmt=None, *args):\n if fmt is not None:\n suffix = \": %s\" % (fmt % args)\n else:\n suffix = \"\"\n raise Exception(\"Not a valid ZGrab2 result\" + suffix)", "def testFieldErrorIsLookupError(self):\n self.assertTrue(issubclass(sqlresult.FieldError, LookupError))", "def to_exception(self):\n for error in OCPPError.__subclasses__():\n if error.code == self.error_code:\n return error(\n description=self.error_description,\n details=self.error_details\n )\n\n raise UnknownCallErrorCodeError(\"Error code '%s' is not defined by the\"\n \" OCPP specification\", self.error_code)", "def make_conversion_error_msg (self, bad_val, err):\n\t\t# TODO: shift to base class\n\t\treturn \"can't convert '%s' to %s\" % (bad_val, self.type_name)", "def unpack_msrest_error(e):\n from typing import Callable\n\n op_err = None\n try:\n err_txt = \"\"\n if isinstance(e.response.text, Callable):\n err_txt = e.response.text()\n else:\n err_txt = e.response.text\n op_err = json.loads(err_txt)\n except (ValueError, TypeError):\n op_err = err_txt\n if not op_err:\n return str(e)\n return op_err" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/ResultInputs should work for the inputs argument.
def test_inputs(self): r = Result('bob', 'a', '1', 'xxxx', 'val', [ ('b', '1', 'xxxx', 'hash'), ['c', '1', 'xxxx', 'hash'], ResultInput('d', '1', 'xxxx', 'hash'), ]) self.assertEqual(r.inputs, ( ResultInput('b', '1', 'xxxx', 'hash'), ResultInput('c', '1', 'xxxx', 'hash'), ResultInput('d', '1', 'xxxx', 'hash'), ), "Should convert all arguments to a ResultInput")
[ "def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def _prepare(self, inputs):\n assert isinstance(inputs, (tuple, list))\n # Convert variable into VarBase and feed in training data.\n input_vars = []\n for i, value in enumerate(inputs):\n if isinstance(value, np.ndarray):\n var = core.VarBase(\n value=value,\n name=self.inputs[i].desc.name(),\n persistable=False,\n place=framework._current_expected_place(),\n zero_copy=True)\n elif isinstance(value, core.VarBase):\n var = value\n var.name = self.inputs[i].desc.name()\n else:\n continue\n input_vars.append(var)\n # Create VarBase to receive output data.\n out_vars = []\n for var in self.outputs:\n if not isinstance(var, framework.Variable):\n continue\n var_desc = var.desc\n var_base = core.VarBase(var_desc.dtype(),\n var_desc.shape(),\n var_desc.name(), var_desc.type(), False)\n out_vars.append(var_base)\n\n # Hold forward variables\n tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],\n \"program_out_scope\",\n core.VarDesc.VarType.STEP_SCOPES, True)\n\n tmp_scope_vec.value().set_scope(self._inner_scope)\n\n return input_vars, out_vars, tmp_scope_vec", "def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'val', 'hash'),\n WorkInput('c', '1', 'xxxx', 'val', 'hash'),\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ), \"Should convert all arguments to a WorkInput\")", "def _initresultsets(self, result_sets):\n if isinstance(result_sets, types.GeneratorType):\n self.generate_resultsets = result_sets\n return\n if result_sets is None:\n result_sets = []\n try:\n self.generate_resultsets = (row for row in result_sets) # generator\n except TypeError:\n raise TypeError(\"Result resultset must be iterable\")", "def __init__(self, inputs=None, outputs=None, data = None):\n if inputs == None:\n self.inputs = []\n else:\n self.inputs = inputs\n\n if outputs == None:\n self.outputs = []\n else:\n self.outputs = outputs\n\n # fill in the rest of the method\n\n self.data = data # to store arbitrary data ", "def _construct_input_spec(self):", "def _parse_list(self, inputs):\n # Lists can only be used as inputs in the case where there is a single input node.\n # Validate that this is true. If so, resolve the list into a dict and parse it.\n input_nodes = self.get_nodes_by_role(NodeRole.INPUT)\n if len(input_nodes) == 1:\n _inputs = {next(iter(input_nodes)): inputs}\n else:\n raise CompositionError(\n f\"Inputs to {self.name} must be specified in a dictionary with a key for each of its \"\n f\"{len(input_nodes)} INPUT nodes ({[n.name for n in input_nodes]}).\")\n input_dict, num_inputs_sets = self._parse_dict(_inputs)\n return input_dict, num_inputs_sets", "def _populate_inputs(self):\n\n self.inputs = Bunch(outfile=None,\n infile=None)", "def _parse_run_inputs(self, inputs):\n # handle user-provided input based on input type. return processd inputs and num_inputs_sets\n if not inputs:\n _inputs, num_inputs_sets = self._parse_dict({})\n elif isgeneratorfunction(inputs):\n _inputs, num_inputs_sets = self._parse_generator_function(inputs)\n elif isgenerator(inputs):\n _inputs, num_inputs_sets = self._parse_generator(inputs)\n elif callable(inputs):\n _inputs, num_inputs_sets = self._parse_function(inputs)\n elif type(inputs) == list:\n _inputs, num_inputs_sets = self._parse_list(inputs)\n elif type(inputs) == dict:\n _inputs, num_inputs_sets = self._parse_dict(inputs)\n elif type(inputs) == str:\n _inputs, num_inputs_sets = self._parse_string(inputs)\n else:\n raise CompositionError(\n f\"Provided inputs {inputs} is in a disallowed format. Inputs must be provided in the form of \"\n f\"a dict, list, function, or generator. \"\n f\"See https://princetonuniversity.github.io/PsyNeuLink/Composition.html#composition-run for details and \"\n f\"formatting instructions for each input type.\"\n )\n return _inputs, num_inputs_sets", "def test_IResultInput(self):\n i = WorkInput('a', '1', 'xxxx', 'val', 'hash')\n r = IResultInput(i)\n self.assertEqual(r.name, 'a')\n self.assertEqual(r.version, '1')\n self.assertEqual(r.lineage, 'xxxx')\n self.assertEqual(r.hash, 'hash')", "def mapInputs2Intermediates():\n input_decs = []\n signal_decs = []\n signal_resets = []\n variable_decs = []\n variable_initializations = []\n variable_assignments = []\n intermediate_assignments = []\n signal_assignments = []\n\n one_assignment_batch = []\n input_list = list(range(NUM_INPUTS))\n intermediate_num = 0\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_num = 0\n while len(input_list) > 0:\n if (signal_num % INPUTS_PER_INTERMEDIATE == 0) and (signal_num > 0):\n intermediate_num += 1\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_resets.append(resetOneIntermediate(intermediate_num))\n n = input_list[0]\n input_decs.append(declareOneInput(n))\n one_assignment_batch.extend(makeOneIfStatement(n, intermediate_num))\n del(input_list[0])\n signal_num += 1\n intermediate_assignments.append(one_assignment_batch)\n\n return input_decs, signal_decs, signal_resets, variable_decs, variable_initializations, intermediate_assignments, signal_assignments", "def _inputs_to_list(self, inputs: InputsType) -> list:\n\n processed_inputs = []\n\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n\n for single_input in inputs:\n if self.novisual:\n processed_input = copy.deepcopy(single_input)\n if 'img' not in single_input and \\\n 'img_shape' not in single_input:\n raise ValueError(\n 'KIEInferencer in no-visual mode '\n 'requires input has \"img\" or \"img_shape\", but both are'\n ' not found.')\n if 'img' in single_input:\n img = single_input['img']\n if isinstance(img, str):\n img_bytes = mmengine.fileio.get(img)\n img = mmcv.imfrombytes(img_bytes)\n processed_input['img'] = img\n processed_input['img_shape'] = img.shape[:2]\n processed_inputs.append(processed_input)\n else:\n if 'img' not in single_input:\n raise ValueError(\n 'This inferencer is constructed to '\n 'accept image inputs, but the input does not contain '\n '\"img\" key.')\n if isinstance(single_input['img'], str):\n processed_input = {\n k: v\n for k, v in single_input.items() if k != 'img'\n }\n processed_input['img_path'] = single_input['img']\n processed_inputs.append(processed_input)\n elif isinstance(single_input['img'], np.ndarray):\n processed_inputs.append(copy.deepcopy(single_input))\n else:\n atype = type(single_input['img'])\n raise ValueError(f'Unsupported input type: {atype}')\n\n return processed_inputs", "def _standardize_args(self, inputs, initial_state, constants, num_constants):\r\n print(\"INPUTS >>>> \", inputs, \" INIT_STATE \", initial_state, \" >> CONSTANTS >>\", constants)\r\n\r\n if isinstance(inputs, list) and len(inputs) > 2:\r\n assert initial_state is None and constants is None\r\n if num_constants is not None:\r\n constants = inputs[-num_constants:]\r\n inputs = inputs[:-num_constants]\r\n initial_state = inputs[2:]\r\n inputs = inputs[:2]\r\n\r\n def to_list_or_none(x):\r\n if x is None or isinstance(x, list):\r\n return x\r\n if isinstance(x, tuple):\r\n return list(x)\r\n return [x]\r\n\r\n initial_state = to_list_or_none(initial_state)\r\n constants = to_list_or_none(constants)\r\n print(\"INPUTS >>>> \", inputs, \" INIT_STATE \", initial_state, \" >> CONSTANTS >>\", constants)\r\n return inputs, initial_state, constants", "def _transfer_tensor_to_tuple(inputs):\n if isinstance(inputs, Tensor):\n return (inputs,)\n\n return inputs", "def initialize(self, runInfo, inputs, initDict):\n self._initializeLSpp(runInfo, inputs, initDict)\n self._initializeLSppROM(self.inputs[self.indexes])", "def __init__(self, id: ID, database: Database, inputs: Tuple[ID, ...], outputs: Tuple[ID, ...]):\r\n\t\tsuper(RawDataToEpochsData, self).__init__(\r\n\t\t\tid, database, inputs, outputs)\r\n\r\n\t\tif inputs[0].get_type() is RawData:\r\n\t\t\tself.set_params = self._set_params_raw\r\n\r\n\t\telse:\r\n\t\t\traise Exception('Input Data type is not RawData\\n'\r\n\t\t\t 'input type={}'.format(inputs[0].get_type()))", "def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))", "def _assign_inputs_and_outputs(self, execution, execution_data, interface):\n with self.remote_context() as ctx:\n execution._inputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_input_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.inputs),\n )\n if execution.is_complete and not execution.error:\n execution._outputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_output_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.outputs),\n )\n return execution", "def _instantiate_input_states(self, context=None):\n num_values = len(self.monitored_values)\n values = [None] * num_values\n names = self.names or [None] * num_values\n\n # If default_input_value arg (assigned to variable in __init__) was used to specify the size of inputStates,\n # pass those values for use in instantiating inputStates\n if self.variable is not None:\n input_state_sizes = self.variable\n else:\n input_state_sizes = values\n for i, monitored_value, name in zip(range(num_values), self.monitored_values, names):\n values[i] = self._instantiate_input_state_for_monitored_value(input_state_sizes[i],\n monitored_value,\n name,\n context=context)\n\n # If self.variable was not specified, construct from values of inputStates\n if self.variable is None:\n # If all items of self.variable are numeric and of the same length, convert to ndarray\n dim_axis_0 = len(values)\n dim_axis_1 = len(values[0])\n if all((is_numeric(values[i]) and len(values[i])==dim_axis_1) for i in range(dim_axis_0)):\n self.variable = np.zeros((dim_axis_0,dim_axis_1), dtype=float)\n # Otherwise, just use list of values returned from instantiation above\n else:\n self.variable = values.copy()\n\n self.variableClassDefault = self.variable.copy()\n self.inputValue = list(self.variable)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert an IResult to IData
def test_IData(self): r = Result('bob', 'a', '1', 'xxxx', 'val', []) d = IData(r) self.assertEqual(d.entity, 'bob') self.assertEqual(d.name, 'a') self.assertEqual(d.version, '1') self.assertEqual(d.lineage, 'xxxx') self.assertEqual(d.value, 'val')
[ "def getResultData(self):\n return self.result", "def test_IResultInput(self):\n i = WorkInput('a', '1', 'xxxx', 'val', 'hash')\n r = IResultInput(i)\n self.assertEqual(r.name, 'a')\n self.assertEqual(r.version, '1')\n self.assertEqual(r.lineage, 'xxxx')\n self.assertEqual(r.hash, 'hash')", "def test_toResult(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResult('the result')\n self.assertEqual(r, Result('bob', 'a', '1', 'xxxx', 'the result', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def create_result(self):\n raise NotImplementedError(\"Abstract Method:create_result.\")", "def convert(self):\n self.result = self.source\n return True", "def _gotResult(self, result):\n return {\n \"jsonrpc\": \"2.0\",\n \"result\": result,\n \"id\": 1\n }", "def from_result_proxy(self, proxy, result):\n if not result:\n return None\n\n data = dict(zip(proxy.keys(), result))\n return self.model(engine=self.engine, **data)", "def read_query_result(self, *args, **kwargs): # real signature unknown\n pass", "def serialize_results_data(self):\n if not self.has_results_data:\n return {}\n results = self.data.get('results', {\"format\": \"json\", \"data\": {}})\n if not isinstance(results, dict):\n return {}\n\n if results.get('format', None) == \"json\":\n return results.get('data', {})\n elif results.get('format', None) == \"joblib\":\n try:\n buf = io.BytesIO()\n buf.write(base64.b64decode(results.get('data', None)))\n buf.seek(0)\n data = joblib.load(buf)\n jsons = json.dumps(data, cls=DictNumpyEncoder)\n except Exception as e:\n log(f\"Error serializing results data: {e}\")\n jsons = \"{}\"\n return json.loads(jsons)\n else:\n return {}", "def from_result(cls, container, result):\n if result is None:\n raise errors.NoObjectException\n\n elif cls.is_prefix(result):\n return cls.from_prefix(container, result)\n\n elif cls.is_key(result):\n return cls.from_key(container, result)\n\n raise errors.CloudException(\"Unknown boto result type: %s\" %\n type(result))", "def secondResultItem(self, result):\n\n match = self.result_re.match(result)\n if match:\n result = int(match.group(1))\n data = match.group(2)\n return data\n\n raise AGICommandFailure(FAILURE_CODE, result)", "def _get_result_constructor(self):\n return ResultObject", "def to_objects(self, results):\n return results", "def persist_result(result_data, analysis_result_id, result_name):\n result = ReadStatsResult(**result_data)\n persist_result_helper(result, analysis_result_id, result_name)", "def make_response(self, data, *args, **kwargs):\n # we've already got a response, eg, from jsonify\n if isinstance(data, Response):\n return (data, *args)\n\n if isinstance(data, (list, tuple)) and len(data) and isinstance(data[0], Model):\n model_name = data[0].__class__.__name__\n if model_name in self.serializers_many:\n data = self.serializers_many[model_name].dump(data).data\n\n # we got the result of serializer.dump(obj)\n if isinstance(data, MarshalResult):\n data = data.data\n\n # we got plain python data types that need to be serialized\n return super().make_response(data, *args, **kwargs)", "def test__Resolved__to_data():\n guild_id = 202211050030\n entity_id = 202211050031\n interaction_event = InteractionEvent(guild_id = guild_id)\n \n attachment = Attachment.precreate(entity_id)\n channel = Channel.precreate(entity_id)\n role = Role.precreate(entity_id)\n message = Message.precreate(entity_id)\n user = User.precreate(entity_id)\n \n resolved = Resolved(\n attachments = [attachment],\n channels = [channel],\n roles = [role],\n messages = [message],\n users = [user],\n )\n \n data = {\n 'attachments': {str(attachment.id): attachment.to_data(defaults = True, include_internals = True)},\n 'channels': {str(channel.id): channel.to_data(defaults = True, include_internals = True)},\n 'roles': {str(role.id): role.to_data(defaults = True, include_internals = True)},\n 'messages': {str(message.id): message.to_data(defaults = True, include_internals = True)},\n 'users': {str(user.id): user.to_data(defaults = True, include_internals = True)},\n 'members': {},\n }\n \n vampytest.assert_eq(\n resolved.to_data(\n defaults = True,\n interaction_event = interaction_event,\n ),\n data,\n )", "def setResult(self, result):\r\n self.result = result", "def data(prod, data):\n\n (result, _) = prod.read(data)\n return result", "def test_index_result_serialization(self):\n\n # Construct a json representation of a IndexResult model\n index_result_model_json = {}\n index_result_model_json['id'] = 'testString'\n index_result_model_json['name'] = 'testString'\n index_result_model_json['result'] = 'created'\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model = IndexResult.from_dict(index_result_model_json)\n assert index_result_model != False\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model_dict = IndexResult.from_dict(index_result_model_json).__dict__\n index_result_model2 = IndexResult(**index_result_model_dict)\n\n # Verify the model instances are equivalent\n assert index_result_model == index_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n index_result_model_json2 = index_result_model.to_dict()\n assert index_result_model_json2 == index_result_model_json" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/ResultInputs should work for the inputs argument.
def test_inputs(self): r = ResultError('bob', 'a', '1', 'xxxx', 'val', [ ('b', '1', 'xxxx', 'hash'), ['c', '1', 'xxxx', 'hash'], ResultInput('d', '1', 'xxxx', 'hash'), ]) self.assertEqual(r.inputs, ( ResultInput('b', '1', 'xxxx', 'hash'), ResultInput('c', '1', 'xxxx', 'hash'), ResultInput('d', '1', 'xxxx', 'hash'), ), "Should convert all arguments to a ResultInput")
[ "def test_inputs(self):\n r = Result('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ResultInput('c', '1', 'xxxx', 'hash'),\n ResultInput('d', '1', 'xxxx', 'hash'),\n ), \"Should convert all arguments to a ResultInput\")", "def _prepare(self, inputs):\n assert isinstance(inputs, (tuple, list))\n # Convert variable into VarBase and feed in training data.\n input_vars = []\n for i, value in enumerate(inputs):\n if isinstance(value, np.ndarray):\n var = core.VarBase(\n value=value,\n name=self.inputs[i].desc.name(),\n persistable=False,\n place=framework._current_expected_place(),\n zero_copy=True)\n elif isinstance(value, core.VarBase):\n var = value\n var.name = self.inputs[i].desc.name()\n else:\n continue\n input_vars.append(var)\n # Create VarBase to receive output data.\n out_vars = []\n for var in self.outputs:\n if not isinstance(var, framework.Variable):\n continue\n var_desc = var.desc\n var_base = core.VarBase(var_desc.dtype(),\n var_desc.shape(),\n var_desc.name(), var_desc.type(), False)\n out_vars.append(var_base)\n\n # Hold forward variables\n tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],\n \"program_out_scope\",\n core.VarDesc.VarType.STEP_SCOPES, True)\n\n tmp_scope_vec.value().set_scope(self._inner_scope)\n\n return input_vars, out_vars, tmp_scope_vec", "def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'val', 'hash'),\n WorkInput('c', '1', 'xxxx', 'val', 'hash'),\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ), \"Should convert all arguments to a WorkInput\")", "def _initresultsets(self, result_sets):\n if isinstance(result_sets, types.GeneratorType):\n self.generate_resultsets = result_sets\n return\n if result_sets is None:\n result_sets = []\n try:\n self.generate_resultsets = (row for row in result_sets) # generator\n except TypeError:\n raise TypeError(\"Result resultset must be iterable\")", "def __init__(self, inputs=None, outputs=None, data = None):\n if inputs == None:\n self.inputs = []\n else:\n self.inputs = inputs\n\n if outputs == None:\n self.outputs = []\n else:\n self.outputs = outputs\n\n # fill in the rest of the method\n\n self.data = data # to store arbitrary data ", "def _construct_input_spec(self):", "def _parse_list(self, inputs):\n # Lists can only be used as inputs in the case where there is a single input node.\n # Validate that this is true. If so, resolve the list into a dict and parse it.\n input_nodes = self.get_nodes_by_role(NodeRole.INPUT)\n if len(input_nodes) == 1:\n _inputs = {next(iter(input_nodes)): inputs}\n else:\n raise CompositionError(\n f\"Inputs to {self.name} must be specified in a dictionary with a key for each of its \"\n f\"{len(input_nodes)} INPUT nodes ({[n.name for n in input_nodes]}).\")\n input_dict, num_inputs_sets = self._parse_dict(_inputs)\n return input_dict, num_inputs_sets", "def _populate_inputs(self):\n\n self.inputs = Bunch(outfile=None,\n infile=None)", "def _parse_run_inputs(self, inputs):\n # handle user-provided input based on input type. return processd inputs and num_inputs_sets\n if not inputs:\n _inputs, num_inputs_sets = self._parse_dict({})\n elif isgeneratorfunction(inputs):\n _inputs, num_inputs_sets = self._parse_generator_function(inputs)\n elif isgenerator(inputs):\n _inputs, num_inputs_sets = self._parse_generator(inputs)\n elif callable(inputs):\n _inputs, num_inputs_sets = self._parse_function(inputs)\n elif type(inputs) == list:\n _inputs, num_inputs_sets = self._parse_list(inputs)\n elif type(inputs) == dict:\n _inputs, num_inputs_sets = self._parse_dict(inputs)\n elif type(inputs) == str:\n _inputs, num_inputs_sets = self._parse_string(inputs)\n else:\n raise CompositionError(\n f\"Provided inputs {inputs} is in a disallowed format. Inputs must be provided in the form of \"\n f\"a dict, list, function, or generator. \"\n f\"See https://princetonuniversity.github.io/PsyNeuLink/Composition.html#composition-run for details and \"\n f\"formatting instructions for each input type.\"\n )\n return _inputs, num_inputs_sets", "def test_IResultInput(self):\n i = WorkInput('a', '1', 'xxxx', 'val', 'hash')\n r = IResultInput(i)\n self.assertEqual(r.name, 'a')\n self.assertEqual(r.version, '1')\n self.assertEqual(r.lineage, 'xxxx')\n self.assertEqual(r.hash, 'hash')", "def mapInputs2Intermediates():\n input_decs = []\n signal_decs = []\n signal_resets = []\n variable_decs = []\n variable_initializations = []\n variable_assignments = []\n intermediate_assignments = []\n signal_assignments = []\n\n one_assignment_batch = []\n input_list = list(range(NUM_INPUTS))\n intermediate_num = 0\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_num = 0\n while len(input_list) > 0:\n if (signal_num % INPUTS_PER_INTERMEDIATE == 0) and (signal_num > 0):\n intermediate_num += 1\n signal_decs.append(declareOneIntermediate(intermediate_num))\n variable_decs.append(declareOneVariable(intermediate_num))\n variable_initializations.append(initializeFirstStageVariable(intermediate_num))\n signal_assignments.append(assignIntermediate(intermediate_num))\n signal_resets.append(resetOneIntermediate(intermediate_num))\n n = input_list[0]\n input_decs.append(declareOneInput(n))\n one_assignment_batch.extend(makeOneIfStatement(n, intermediate_num))\n del(input_list[0])\n signal_num += 1\n intermediate_assignments.append(one_assignment_batch)\n\n return input_decs, signal_decs, signal_resets, variable_decs, variable_initializations, intermediate_assignments, signal_assignments", "def _inputs_to_list(self, inputs: InputsType) -> list:\n\n processed_inputs = []\n\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n\n for single_input in inputs:\n if self.novisual:\n processed_input = copy.deepcopy(single_input)\n if 'img' not in single_input and \\\n 'img_shape' not in single_input:\n raise ValueError(\n 'KIEInferencer in no-visual mode '\n 'requires input has \"img\" or \"img_shape\", but both are'\n ' not found.')\n if 'img' in single_input:\n img = single_input['img']\n if isinstance(img, str):\n img_bytes = mmengine.fileio.get(img)\n img = mmcv.imfrombytes(img_bytes)\n processed_input['img'] = img\n processed_input['img_shape'] = img.shape[:2]\n processed_inputs.append(processed_input)\n else:\n if 'img' not in single_input:\n raise ValueError(\n 'This inferencer is constructed to '\n 'accept image inputs, but the input does not contain '\n '\"img\" key.')\n if isinstance(single_input['img'], str):\n processed_input = {\n k: v\n for k, v in single_input.items() if k != 'img'\n }\n processed_input['img_path'] = single_input['img']\n processed_inputs.append(processed_input)\n elif isinstance(single_input['img'], np.ndarray):\n processed_inputs.append(copy.deepcopy(single_input))\n else:\n atype = type(single_input['img'])\n raise ValueError(f'Unsupported input type: {atype}')\n\n return processed_inputs", "def _standardize_args(self, inputs, initial_state, constants, num_constants):\r\n print(\"INPUTS >>>> \", inputs, \" INIT_STATE \", initial_state, \" >> CONSTANTS >>\", constants)\r\n\r\n if isinstance(inputs, list) and len(inputs) > 2:\r\n assert initial_state is None and constants is None\r\n if num_constants is not None:\r\n constants = inputs[-num_constants:]\r\n inputs = inputs[:-num_constants]\r\n initial_state = inputs[2:]\r\n inputs = inputs[:2]\r\n\r\n def to_list_or_none(x):\r\n if x is None or isinstance(x, list):\r\n return x\r\n if isinstance(x, tuple):\r\n return list(x)\r\n return [x]\r\n\r\n initial_state = to_list_or_none(initial_state)\r\n constants = to_list_or_none(constants)\r\n print(\"INPUTS >>>> \", inputs, \" INIT_STATE \", initial_state, \" >> CONSTANTS >>\", constants)\r\n return inputs, initial_state, constants", "def _transfer_tensor_to_tuple(inputs):\n if isinstance(inputs, Tensor):\n return (inputs,)\n\n return inputs", "def initialize(self, runInfo, inputs, initDict):\n self._initializeLSpp(runInfo, inputs, initDict)\n self._initializeLSppROM(self.inputs[self.indexes])", "def __init__(self, id: ID, database: Database, inputs: Tuple[ID, ...], outputs: Tuple[ID, ...]):\r\n\t\tsuper(RawDataToEpochsData, self).__init__(\r\n\t\t\tid, database, inputs, outputs)\r\n\r\n\t\tif inputs[0].get_type() is RawData:\r\n\t\t\tself.set_params = self._set_params_raw\r\n\r\n\t\telse:\r\n\t\t\traise Exception('Input Data type is not RawData\\n'\r\n\t\t\t 'input type={}'.format(inputs[0].get_type()))", "def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))", "def _assign_inputs_and_outputs(self, execution, execution_data, interface):\n with self.remote_context() as ctx:\n execution._inputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_input_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.inputs),\n )\n if execution.is_complete and not execution.error:\n execution._outputs = TypeEngine.literal_map_to_kwargs(\n ctx=ctx,\n lm=self._get_output_literal_map(execution_data),\n python_types=TypeEngine.guess_python_types(interface.outputs),\n )\n return execution", "def _instantiate_input_states(self, context=None):\n num_values = len(self.monitored_values)\n values = [None] * num_values\n names = self.names or [None] * num_values\n\n # If default_input_value arg (assigned to variable in __init__) was used to specify the size of inputStates,\n # pass those values for use in instantiating inputStates\n if self.variable is not None:\n input_state_sizes = self.variable\n else:\n input_state_sizes = values\n for i, monitored_value, name in zip(range(num_values), self.monitored_values, names):\n values[i] = self._instantiate_input_state_for_monitored_value(input_state_sizes[i],\n monitored_value,\n name,\n context=context)\n\n # If self.variable was not specified, construct from values of inputStates\n if self.variable is None:\n # If all items of self.variable are numeric and of the same length, convert to ndarray\n dim_axis_0 = len(values)\n dim_axis_1 = len(values[0])\n if all((is_numeric(values[i]) and len(values[i])==dim_axis_1) for i in range(dim_axis_0)):\n self.variable = np.zeros((dim_axis_0,dim_axis_1), dtype=float)\n # Otherwise, just use list of values returned from instantiation above\n else:\n self.variable = values.copy()\n\n self.variableClassDefault = self.variable.copy()\n self.inputValue = list(self.variable)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update or create a contact address and location pair. If the location does not exist it will be automatically created. If the server already has a location assigned with the same name, the contact address specified will be added if it doesn't already exist (Management and Log Server can have multiple address for a single location).
def update_or_create( self, location, contact_addresses, with_status=False, overwrite_existing=False, **kw ): updated, created = False, False location_ref = location_helper(location) if location_ref in self: for loc in self: if loc.location_ref == location_ref: if overwrite_existing: loc["addresses"][:] = contact_addresses updated = True else: for ca in contact_addresses: if ca not in loc.addresses: loc["addresses"].append(ca) updated = True else: self.data.setdefault("multi_contact_addresses", []).append( dict(addresses=contact_addresses, location_ref=location_ref) ) created = True if updated or created: self.update() if with_status: return self, updated, created return self
[ "def set_device_location(self, name=None, location: str = None, lat: float = None, lng: float = None):\n loc = self.get_location(location)\n if loc:\n location_id = loc.id\n # location exist, check if data is modified\n data = AttrDict()\n if loc.lng != lng:\n data.lng = lng\n if loc.lat != lat:\n data.lat = lat\n if len(data):\n res = self.edit_location(location=location, lat=lat, lng=lng)\n if not res:\n raise KeyError(\"Location could not be updated\")\n loc = self.get_location(location)\n if loc is None:\n raise KeyError(\"Location could not be retrieved after update\")\n\n else:\n # location does not exist, create new location\n location_id = self.add_location(location=location, lat=lat, lng=lng)\n if not location_id:\n raise KeyError(\"Location could not be created\")\n\n data = AttrDict(location_id=location_id)\n r = self.update_device(name=name, data=data)\n return r.get(\"status\", \"\") == \"ok\"", "def create_location(self):\n return self.client().post('/api/organizations/1/locations/',\n data=self.location_data)", "def get_location_or_create(location_str, address=None):\n\t\t\n\t\tlocation_arr = LocationUtil.get_all_locations_array()\n\t\t\n\t\tfor location in location_arr:\n\t\t\tif(CrawlUtil.is_similar(location_str, location[1])):\n\t\t\t\treturn LocationUtil.get_location(location[0])\n\n\t\t#not found similar one in the database, therefore create\n\t\tnew_location = LocationUtil.create_location()\n\t\tnew_location.name = location_str\n\t\tif(address != None):\n\t\t\tnew_location.address = address\n\t\t\t\n\t\tLocationUtil.update_location(new_location)\n\t\t\n\t\treturn new_location", "def Location_get(street_address, city, state, zip, name) -> Tuple[Location, bool]:\n return Location.objects.get_or_create(\n street_address=street_address,\n city=city,\n state=state,\n zip=zip,\n defaults=dict(name=name),\n )", "def insert_address(self, contact_id, address_details):\n pass", "def create(cls, data, id_=None, **kwargs):\n data[\"location\"] = {\n \"$ref\": cls._location_resolver_path.format(\n scheme=current_app.config[\"JSONSCHEMAS_URL_SCHEME\"],\n host=current_app.config[\"JSONSCHEMAS_HOST\"],\n internal_location_pid=data[cls.pid_field],\n )\n }\n return super(InternalLocation, cls).create(data, id_=id_, **kwargs)", "def add_location(self, location: str=None, lat: float=None, lng: float=None):\n data = AttrDict(location=location)\n if lat:\n data.lat = lat\n else:\n data.lat = 0.001\n if lng:\n data.lng = lng\n else:\n data.lng = 0.001\n r = self.call_api(method=\"POST\", endpoint=f\"/locations\", data=data)\n res = r.json()\n print(\"res\", res)\n if res.get(\"status\", \"\") != \"ok\":\n return None\n location_id = int(res[\"message\"].split(\"#\")[1])\n\n # Librenms add_location requires lat, lng\n # edit_location accepts None, so adjust\n r = self.edit_location(location=location_id, lat=lat, lng=lng)\n\n self._load_locations(refresh=True)\n return location_id", "def create_location(id, name, abbr, subtitle=\"\", audio=\"\"):\r\n\r\n location = Location(id=id, \r\n name=name, \r\n abbr=abbr, \r\n default_subtitle = subtitle, \r\n default_audio = audio)\r\n\r\n db.session.add(location)\r\n db.session.commit() \r\n\r\n return location", "def set_contact_info(self, name, phone_number, email, address, office_hours=\"<not specified>\",\n office_number=\"<not specified>\"):\n if not self.contact_info:\n ci = ContactInfo.create(self.username, name, phone_number, email, address, office_hours,\n office_number).save()\n self.contact_info = ci\n else:\n ci = self.contact_info\n ci.name = name\n ci.phoneNumber = phone_number\n ci.email = email\n ci.address = address\n if office_hours != \"<not specified>\":\n ci.officeHours = office_hours\n if office_number != \"<not specified>\":\n ci.officeNumber = office_number\n self.save()", "def update_address():\n session = connect()\n try:\n user = load_user(current_user.id)\n address = get_address(user.address_id)\n except AttributeError:\n return 'Error getting user data'\n if address is None:\n address = Address()\n if request.method == 'POST':\n if request.form['street_1']:\n address.street_1 = request.form['street_1']\n if request.form['street_2']:\n address.street_2 = request.form['street_2']\n if request.form['city']:\n address.city = request.form['city']\n if request.form['state']:\n address.state = request.form['state']\n if request.form['zip_code']:\n address.zip_code = request.form['zip_code']\n address_string = get_address_string(address)\n if validate_address(address_string) is False:\n flash(\"Address is invalid or outside delivery radius!\")\n return redirect(url_for('cart_edit_address'))\n address = session.add(address)\n user.address_id = get_address_id(address)\n user = session.merge(user)\n flash(\"Address saved!\")\n session.commit()\n return redirect(url_for('show_cart'))", "def add_location(self, location):\n # TODO Test\n # TODO Update all employees' availability at the hours of the new place\n self.locations.append(location)\n self.schedule.shifts.update({location: {}})", "def test_view_can_update_a_location(self):\n self.create_org()\n\n new_data = {\n \"name\": \"Chicago Loop\"\n }\n # create location\n self.client().post('/api/organizations/1/locations/',\n data=self.location_data)\n # update the location\n res = self.client().put('/api/organizations/1/locations/1',\n data=new_data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Chicago Loop\", str(res.data))", "def create_or_edit_contact(\n self,\n address_book_definition_id: Union[str, int],\n contact_fields: list,\n contact_id: Union[str, int] = None,\n submitter: Optional[str] = None,\n **kwargs,\n ) -> str:\n return self._requester.request(\n method_name=\"createOrEditContact\", params=cleanup_args(locals()), **kwargs\n )", "def test_view_can_update_a_physical_address(self):\n\n self.create_org()\n self.create_location()\n\n new_data = {\n \"postal_code\": \"60603\"\n }\n # create address\n self.client().post('/api/organizations/1/locations/1/addresses/',\n data=self.address_data)\n # update the address\n res = self.client().put('/api/organizations/1/locations/1/addresses/1',\n data=new_data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"60603\", str(res.data))", "def update_address(self, address_details):\n pass", "def save_location(data):\n location_id = data['id']\n latitude = data['latitude']\n longitude = data['longitude']\n db_session.query(Location).filter_by(id=location_id).update({\"latitude\": latitude, \"longitude\": longitude})\n db_session.commit()", "def put(address_type, address):", "def test_locations_correct(self):\n location = \"/var/www/foo/\"\n handle_servername = ServerName(**{\n \"domain\" : self.valid_domain,\n }\n )\n handle_location = Location(**{\n \"location\" : location\n }\n )\n handle_servername.locations = handle_location\n self.assertEqual(handle_servername.locations[location].location, location)\n del handle_location\n del handle_servername", "def add_user_location():\n\n form = LocationForm()\n\n if form.validate_on_submit():\n\n\n if form.env.data == \"alp\":\n location = Location(name=form.name.data,\n user_id = g.user.id,\n location = form.location.data,\n latitude=form.latitude.data,\n longitude=form.longitude.data,\n image_url=form.image_url.data,\n description=form.description.data,\n is_snowy=True,\n is_desert=False,\n )\n\n if form.env.data == \"sand\":\n location = Location(name=form.name.data,\n user_id = g.user.id,\n location = form.location.data,\n latitude=form.latitude.data,\n longitude=form.longitude.data,\n image_url=form.image_url.data,\n description=form.description.data,\n is_snowy=False,\n is_desert=True,\n )\n\n if form.env.data == \"none\":\n location = Location(name=form.name.data,\n user_id = g.user.id,\n location = form.location.data,\n latitude=form.latitude.data,\n longitude=form.longitude.data,\n image_url=form.image_url.data,\n description=form.description.data,\n is_snowy=False,\n is_desert=False,\n )\n \n db.session.add(location)\n db.session.commit()\n \n return redirect(\"/user_locations\")\n\n else:\n return render_template('location-add.html', form=form)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provides a reference to contact addresses used by this server. Obtain a reference to manipulate or iterate existing contact
def contact_addresses(self): return MultiContactAddress( href=self.get_relation("contact_addresses"), type=self.typeof, name=self.name )
[ "def accessContacts(self):\n\n self.db.execute(\"SELECT ALL id FROM contacts\")\n contacts = self.db.fetchall()\n\n for id in range(1, len(contacts)+1):\n self.info(id)\n\n return", "def get_contacts(self):\n logger.info(\"Retrieve Phonebook\")\n ready = yield WaitDBus(self.gsm_sim.GetSimReady)\n if ready == False:\n logger.info(\"ready false\")\n while 1:\n status = yield WaitDBusSignal(self.gsm_sim, 'ReadyStatus')\n if status == True:\n logger.debug(\"ready now true breaking\")\n break\n else:\n logger.debug(\"ready still flase not breaking\")\n continue\n \n entries = yield retry_on_sim_busy(self.gsm_sim.RetrievePhonebook,\n 'contacts')\n logger.info(\"Got %d contacts\" % len(entries))\n #logger.debug('get contacts : %s', entries)\n\n ret = []\n for entry in entries:\n index = int(entry[0])\n name = unicode(entry[1])\n tel = str(entry[2])\n contact = SIMContact(name=name, tel=tel, sim_index=index)\n self.indexes[index] = contact\n ret.append(contact)\n yield ret", "def add_contacts(self, contacts: List[FullContact]):\n for contact in contacts:\n # noinspection PyProtectedMember\n self.add_contact(**(contact._asdict()))", "def z_listaddresses(self):\n return self._call('z_listaddresses')", "def nextAddresses(self) -> List[ghidra.program.model.address.Address]:\n ...", "def update_contacts(self, ego_network, contact_type: Union[Hashable, Tuple[Hashable]] = \"__all__\"):\n contact_type = standardize_contact_type(contact_type)\n for ct in contact_type:\n for contact in ego_network._contacts_by_type[ct]:\n self.add_contact(**contact._asdict())", "def set_contacts(self, contact):\n self.contacts.append(contact)", "def get_contacts(self):\n try:\n contacts = []\n res = self.session.query(Contact).all()\n for contact in res:\n contacts.append(contact)\n return contacts\n except:\n print \"[-] No contacts found!\"\n return False", "def addresses(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"addresses\"),\n )", "def get_contacts(self, resp):\n contacts_list = ContactList()\n response = resp['contacts']\n for value in resp['contacts']:\n contact = Contact()\n contact.set_contact_id(value['contact_id'])\n contact.set_contact_name(value['contact_name'])\n contact.set_company_name(value['company_name'])\n contact.set_contact_type(value['contact_type'])\n contact.set_status(value['status'])\n contact.set_payment_terms(value['payment_terms'])\n contact.set_payment_terms_label(value['payment_terms_label'])\n contact.set_currency_id(value['currency_id'])\n contact.set_currency_code(value['currency_code'])\n contact.set_outstanding_receivable_amount(value[\\\n 'outstanding_receivable_amount'])\n contact.set_outstanding_payable_amount(value[\\\n 'outstanding_payable_amount'])\n contact.set_unused_credits_receivable_amount(value[\\\n 'unused_credits_receivable_amount'])\n contact.set_unused_credits_payable_amount(value[\\\n 'unused_credits_payable_amount'])\n contact.set_first_name(value['first_name'])\n contact.set_last_name(value['last_name'])\n contact.set_email(value['email'])\n contact.set_phone(value['phone'])\n contact.set_mobile(value['mobile'])\n contact.set_created_time(value['created_time'])\n contact.set_last_modified_time(value['last_modified_time'])\n contacts_list.set_contacts(contact)\n page_context_object = PageContext()\n page_context = resp['page_context']\n page_context_object.set_page(page_context['page'])\n page_context_object.set_per_page(page_context['per_page'])\n page_context_object.set_has_more_page(page_context['has_more_page'])\n page_context_object.set_applied_filter(page_context['applied_filter'])\n page_context_object.set_sort_column(page_context['sort_column']) \n page_context_object.set_sort_order(page_context['sort_order']) \n \n contacts_list.set_page_context(page_context_object)\n return contacts_list", "def getVolatileAddresses(self) -> ghidra.program.model.address.AddressSetView:\n ...", "def getReferencesFromAddress(self,addr):\n return HopperLowLevel.getReferencesFromAddress(self.__internal_segment_addr__,addr)", "def getContactInfo(self):\n ci_email = ContactInfo(\"email\", \"johdoe@mycompany.com\")\n ci_phone = ContactInfo(\"phone\", \"+1 5551324567\")\n ci_sms = ContactInfo(\"sms\", \"+1 5557654321\")\n return [ci_email, ci_phone, ci_sms]", "def contact_ids(self):\n ret = self._get_attr(\"contactIds\")\n return ret", "def getReferencesOfAddress(self,addr):\n return HopperLowLevel.getReferencesOfAddress(self.__internal_segment_addr__,addr)", "def service_addresses(self):\n return tuple(map(lambda p: Address(dict(ip=p['ip'], port=p['port'])), self.cfg))", "def resolve_supplier_contacts(self, info, **kwargs):\n return self.get_supplier_contacts", "def get_address_contacts(self, access_levels = (constants.PUBLIC_ACCESS,),\n viewer_profile = None):\n \n query = UserAddress.objects.filter(profile = self)\n return filter_access_levels(query, \"access\", access_levels, \"profile\",\n viewer_profile)", "def address_get(self, adr_pref=None):\n adr_pref = set(adr_pref or [])\n if 'contact' not in adr_pref:\n adr_pref.add('contact')\n result = {}\n visited = set()\n for partner in self:\n current_partner = partner\n while current_partner:\n to_scan = [current_partner]\n # Scan descendants, DFS\n while to_scan:\n record = to_scan.pop(0)\n visited.add(record)\n if record.type in adr_pref and not result.get(record.type):\n result[record.type] = record.id\n if len(result) == len(adr_pref):\n return result\n to_scan = [c for c in record.child_ids\n if c not in visited] + to_scan\n #if not c.is_company] + to_scan\n\n # Continue scanning at ancestor if current_partner is not a commercial entity\n if current_partner.is_company or not current_partner.parent_id:\n break\n current_partner = current_partner.parent_id\n\n # default to type 'contact' or the partner itself\n default = result.get('contact', self.id or False)\n for adr_type in adr_pref:\n result[adr_type] = result.get(adr_type) or default\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove contact address by name of location. You can obtain all contact
def remove_contact_address(self, location): return self.contact_addresses.delete(location)
[ "def remove_contact(self, contact):\n self.contacts.remove(contact)", "def remove_contact(self, contact_id):\n pass", "def remove_address(self, address_id):\n pass", "def remove_street(estreet={}, estreet_name=[]):\n del estreet[estreet_name]", "def del_contact(self, contact_name):\n\n self.session.query(self.Contacts)\\\n .filter_by(user=contact_name)\\\n .delete()\n self.session.commit()", "def removeContact(self, contact): \n contactSelected = None\n for item in self.__agenda:\n if item.getPlayer() == contact:\n contactSelected = item\n break\n if contactSelected:\n self.__agenda.remove(contactSelected)\n self.save(\"player\")", "def delete(name):\n try:\n number = phonebook[name]\n del phonebook[name]\n return 'Delete contact {} with phone number {}. Successfully!'.format(name, number)\n except KeyError:\n raise PhoneBookError('Contact with the name {} is absent in our phone book.'.format(name))", "def remove_address(self, address: str):\n receiver = self.receivers.pop(address)\n receiver.close()", "def clear_address_from_mycity_object(mycity_object):\n if intent_constants.ZIP_CODE_KEY in mycity_object.session_attributes:\n del(mycity_object.session_attributes[intent_constants.ZIP_CODE_KEY])\n\n if intent_constants.CURRENT_ADDRESS_KEY in mycity_object.session_attributes:\n del(mycity_object.session_attributes[\n intent_constants.CURRENT_ADDRESS_KEY])\n\n return mycity_object", "def removeExternalLocation(self, externalAddr: ghidra.program.model.address.Address) -> bool:\n ...", "def delete_(self, name, phone_number):\r\n contact = self.check_contact(name, phone_number)\r\n if contact:\r\n self.contacts.remove(contact)\r\n self.save()\r\n return 'Delete contact {} with phone number {}. Successfully!'.\\\r\n format(name, phone_number)\r\n return PhoneBookError('No found contact with name={} and phone={}'.\r\n format(name, phone_number))", "def remove_location(self, location=None):\n raise NotImplementedError", "def remove_person(self, id, name, phone, address):\r\n p = Person(id, name, phone, address)\r\n self.__repo - p", "def remove_emergency_contact(self, id: int):\n raise NotImplementedError()", "def Delete_Contact(self, index):\n return self.__contactList.pop(index)", "def removeLdapContact(self, id, cursor, uid):\n conn = self.connectToLdap(cursor, uid, context={})\n to_delete = None\n try:\n to_delete = self.getLdapContact(conn, id)\n except ldap.NO_SUCH_OBJECT:\n logger.notifyChannel(\"Warning\", netsvc.LOG_INFO,\n _(\"'no object to delete in ldap' %s\") % (id))\n except Exception, e:\n raise e\n try:\n if to_delete:\n conn.connexion.delete_s(to_delete[0])\n conn.connexion.unbind_s()\n except Exception, e:\n raise e", "def remove_sync_map_entry():\n\tdat = json.loads(request.data)\n\tto_number = dat[\"phone_number\"]\n\n\toutput = trello.remove_customer_from_sync_map(to_number)\n\n\treturn \"SUCCESS\"", "def _clear_address(self):\n for part_addr in [\n \"street\",\n \"house\",\n \"slash\",\n \"letter\",\n \"corpus\",\n \"building\",\n \"room\",\n \"hotel\",\n \"num_address_type\",\n \"region\",\n \"area\",\n \"location\",\n \"place\",\n ]:\n setattr(self, part_addr, \"\")", "def delete_contact(self, contact):\n self.validate_id('Sorry, unable to delete contact from this address '\n 'book, as no ID value has been defined for the '\n 'address book.')\n\n contact.validate_id('Sorry, unable to delete this contact from the '\n 'address book, as the contact has no ID value.')\n\n connection.delete(\n '{}/{}/contacts/{}'.format(self.end_point, self.id, contact.id)\n )\n\n return True", "def delete_address(self, address):\n params = {'address': address}\n self._make_request('deleteAddress', **params)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restart Web Access on Mgt Server.
def restart_web_access(self): session = _get_session(SMCRequest._session_manager) response = session.session.put( url=self.href+"/restart_web_access" ) if response.status_code != 200: raise SMCOperationFailure(response) return 0
[ "def restart_appserver():\n\n require('hosts')\n \n run(\"invoke restart\")", "def restart_webserver(site='localhost'):\n with hide('output'), settings(host_string=site):\n status = sudo('supervisorctl restart %s' % APP_NAME)\n if 'RUNNING' in status:\n print 'Server restarted. Access the site at:'\n print 'http://%s/' % _get_ip(site)", "def restart_webservers():\n hr()\n print magenta('Restart Web Servers')\n hr()\n print magenta('Restart Green Unicorn..')\n sudo('stop atrend_shop_app; start atrend_shop_app')\n print magenta('Restart Nginx..')\n sudo('service nginx restart')\n hr()\n print magenta('[DONE] Web Servers is up.')", "def restart(self):\n\t\tbody = dict()\n\t\tbody[\"restart_server\"] = {\n\t\t\t\"stop_type\" : \"soft\",\n \t\t\t\"timeout\" : \"30\",\n \t\t\t\"timeout_action\" : \"destroy\"\n\t\t}\n\t\tself.cloud_manager.post_request(\"/server/\" + self.uuid + \"/restart\" , body)\n\t\tobject.__setattr__(self, \"state\", \"maintenance\") # post_request already handles any errors from API", "def restart_server():\n caput('13XRM:SCANDB:Shutdown', 1)", "def restart_webserver():\n require('service_name')\n sudo('service nginx reload')\n try:\n sudo('stop %(service_name)s' % env)\n except: # Might be already stopped\n pass\n try:\n sudo('start %(service_name)s' % env)\n except: # Might be already started\n pass", "def restart_webserver():\n sudo(\"stop joshgachnang\")\n # Give uwsgi time to shut down cleanly\n time.sleep(2)\n sudo(\"start joshgachnang\")\n \n \n sudo(\"/etc/init.d/nginx reload\")", "def restart(self):\n self.logger.debug('Server - td-agent-bit - restart call.')\n self.change_service_status(\"restart\")", "def restart():\n supervisor_run(\"restart welt2000\")\n run(\"sleep 1\")\n supervisor_run(\"tail welt2000\")", "def restart_apache(self):\n\n if self.distro == Dist.UBUNTU:\n self.run('sudo service apache2 restart')\n else:\n util.error('restart_apache has unknown platform')", "def server_reboot(self):\n return self._post(Endpoint.REBOOT_SERVER)", "def restart_route():\n # using run instead of sudo because sudo prompts for a password\n run('sudo /etc/init.d/mwana-route restart')\n # print out the top of the log file in case there are errors\n import time\n time.sleep(2)\n run('head -n 15 %s/route.log' % env.path)", "def ds_restart(self, servers):\n with log_output(self):\n tangoctl.starter_restart_servers(server_name=servers)", "def restart(self):\n\t\trun('/etc/init.d/puppet restart')", "def stop_webserver():\r\n _webserver_do('stop')", "def restart(self):\n logging.warning(\"Restarting openbts\")\n envoy.run(\"sudo supervisordctl restart openbts\")", "def restart():\n restart_uwsgi()\n clear_logs()\n restart_celeryd()", "def restart():\n print(\"Restarting watch\")\n python = sys.executable\n os.execl(python, python, * sys.argv)", "def restart(self):\n self.write(\"miner_restart\")\n response = self.read()\n\n if response:\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Port that is used for log forwarding. The default port used by IPFIX/NetFlow data collectors is 2055. Note! If you have to define an Access rule that allows traffic to the target host, make sure that the Port you select is also used as the Port in the Access rule.
def netflow_collector_port(self): return self.data["netflow_collector_port"]
[ "def port(self):\n return self._val.port or DEFAULT_PORTS.get(self._val.scheme)", "def port(self):\n\n return self.server_address[1]", "def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.close()\n return port\n except Exception as e:\n pass\n return -1", "def getPort(self):\n return int(self[SipViaHeader.PARAM_PORT]) if SipViaHeader.PARAM_PORT in self else None", "def udp_port(self):\n return self.get_port(protocol.SERVICE_UDP)", "def get_proxy_port(self):\n return self._proxy_port", "def target_port(self) -> Optional[pulumi.Input['ServiceMonitorSpecEndpointsTargetPortArgs']]:\n return pulumi.get(self, \"target_port\")", "def host_port(self):\n ret = self._get_attr(\"hostPort\")\n return ret", "def port(self) -> str:\n return self.email_config['port']", "def get_port(self) -> int:\n return self.settings['prometheus_port']", "def port_index(self):\n return self._port_index", "def get_tcp_receiver_port():\n tcp_receiver_port = 5502\n if _CONFIG_PARSER.has_option(\"TransportTCP\", \"TCPReceiverPort\"):\n tcp_receiver_port = _CONFIG_PARSER.getint(\"TransportTCP\", \"TCPReceiverPort\")\n return tcp_receiver_port", "def port_index(self):\n return self.__port_index", "def host_port(self):\n e = self.environ\n host = e.get('HTTP_HOST')\n if host is not None:\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n url_scheme = e['wsgi.url_scheme']\n if url_scheme == 'https':\n port = '443'\n else:\n port = '80'\n else:\n port = e['SERVER_PORT']\n return port", "def __pget_wifi_port(self):\n try:\n return self.__cp.getint(SEC, KEY_WIFI_PORT)\n except (ValueError, AttributeError), e:\n log.warning(\"config '%s' malformed (%s)\" % (KEY_WIFI_PORT, e))\n return 34271", "def port_id(self):\n # type: () -> int\n return self._get_property('port_id')", "def port(n: str) -> int:\n\ttry:\n\t\tp = int(n)\n\texcept ValueError as exc:\n\t\traise argparse.ArgumentError('invalid value for port!') from exc\n\n\tif 0 < p < 65536:\n\t\treturn p\n\telse:\n\t\traise argparse.ArgumentError('port value out of range!')", "def teleporter_port(self):\n ret = self._get_attr(\"teleporterPort\")\n return ret", "def get_port(self) -> str:\n return self.__serial.port" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field/value pair used to insure server identity when connecting to Sys Log server using TLS Optional If not provided, server identity is not checked This is ignored if service is not tcp_with_tls
def tlsIdentity(self): return self.data["tlsIdentity"]
[ "def tls(self) -> Optional['outputs.ClusterClientAuthenticationTls']:\n return pulumi.get(self, \"tls\")", "def tls_config(self) -> Optional[pulumi.Input['PrometheusSpecApiserverConfigTlsConfigArgs']]:\n return pulumi.get(self, \"tls_config\")", "def tls_config(self) -> Optional[pulumi.Input['PrometheusSpecRemoteReadTlsConfigArgs']]:\n return pulumi.get(self, \"tls_config\")", "def auth_server_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_server_id\")", "def tls_config(self) -> Optional[pulumi.Input['PrometheusSpecRemoteWriteTlsConfigArgs']]:\n return pulumi.get(self, \"tls_config\")", "def ssl_verify_server_cert(self):\n return \"\"\"--ssl-verify-server-cert\"\"\"", "def get_server_id(self):", "def get_server_id():\n return getattr(seaserv, 'SERVER_ID', '-')", "def login_handler(self, values, extra=None): \n if self.proto=='http':\n err_msg(\"This client only supports HTTPS (SSL) connections\")\n elif self.proto=='https':\n return self.https_login_handler(values,extra)", "def tls_config(self) -> Optional[pulumi.Input['PrometheusSpecAlertingAlertmanagersTlsConfigArgs']]:\n return pulumi.get(self, \"tls_config\")", "def try_configuration(self) -> None:\n with self.context():\n kerberos.getServerPrincipalDetails(self.service, self.hostname)", "def setSSLInfo(self, **kwargs):\n kwargs['server_side'] = True\n self.__ssl_info = {}\n self.__ssl_info.update(kwargs)", "def server_information(self):", "def auth_ldap_use_tls(self):\n return self.appbuilder.get_app.config[\"AUTH_LDAP_USE_TLS\"]", "def __init__(self, username, password, server = None, ssl = False):\n self.username = username\n if server is None:\n server = \"meta.meta01.rambla.be\"\n super(MetaService, self).__init__(username = username, password = password, server = server, ssl = ssl)", "def dns_over_tls(self) -> DNSOverTLSEnabled | None:\n return self.properties[DBUS_ATTR_DNS_OVER_TLS]", "def is_tls_enabled():\n global tls_enabled\n if tls_enabled is None:\n hadoop_conf_path = os.environ['HADOOP_CONF_DIR']\n xmldoc = minidom.parse(os.path.join(hadoop_conf_path,'core-site.xml'))\n itemlist = xmldoc.getElementsByTagName('property')\n for item in itemlist:\n name = item.getElementsByTagName(\"name\")[0]\n if name.firstChild.data == \"ipc.server.ssl.enabled\":\n tls_enabled = item.getElementsByTagName(\"value\")[0].firstChild.data == 'true'\n return tls_enabled", "def get_next_serveropt(self):\n val = None\n ret = 'NO'\n\n # Test output:\n # if self.records < 1\n # ret = 'OK'\n # val = Mailserver('imap_ssl', 'imap.example.com', 993,\n # 'smtp_ssl', 'smtp.example.com', 465,\n # 'username@example.com', 'qwerty', 'newusername@mymail.com', 1*(60*60*24*31),\n # 'smtp', 'smtp.posta.com', 587, 'otherusername@posta.com', 'qwe123')\n\n return ret, val", "def get_selenoid_info(self):\n host_url = '{}/host/{}'.format(self.server_url, self.session_id)\n try:\n selenoid_info = requests.get(host_url).json()\n except Exception:\n return None\n self.driver_wrapper.logger.info(f'Selenoid host info: {selenoid_info}')\n return selenoid_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A collection of NetflowCollector
def netflow_collector(self): return [NetflowCollector(**nc) for nc in self.data.get("netflow_collector", [])]
[ "def add_netflow_collector(self, netflow_collectors):\n if \"netflow_collector\" not in self.data:\n self.data[\"netflow_collector\"] = {\"netflow_collector\": []}\n\n for p in netflow_collectors:\n self.data[\"netflow_collector\"].append(p.data)\n self.update()", "def __init__(self,\n first: 'FlowLogCollectorCollectionFirst',\n flow_log_collectors: List['FlowLogCollector'],\n limit: int,\n total_count: int,\n *,\n next: 'FlowLogCollectorCollectionNext' = None) -> None:\n self.first = first\n self.flow_log_collectors = flow_log_collectors\n self.limit = limit\n self.next = next\n self.total_count = total_count", "def _collect(self):\n if not self._check_alive_collectors():\n self.signal_stop()\n\n if not self._all_nodes_finished():\n logging.warning(\n \"New collect ordered before last one was finished, skipping.\")\n return\n\n logging.info(\"Triggering new collection for all nodes.\")\n self._collect_start_time = datetime.utcnow()\n for collector in self._node_collectors:\n collector.collect()", "def collector(self):\n\n\t\tlogger = getlogger(\"Collector\")\n\t\tlogger.info(\"Starting collector.\")\n\n\t\t# Set up new poll context and initialize it\n\t\tlogger.info(\"Setting up poll context\")\n\t\tconf = CONF['collector']\n\t\tcontext = PollContext((conf['c_addr'], conf.as_int('c_port')))\n\t\tcontext.initialize()\n\n\t\tsteps = self.collect_interval\n\n\t\t# Main loop\n\t\tlogger.info(\"Collector is running\")\n\t\twhile self.running:\n\n\t\t\t# Wait for next event\n\t\t\ttry:\n\t\t\t\tevents = context.wait(1000)\n\t\t\texcept Exception, e:\n\t\t\t\tlogger.error(str(e))\n\n\t\t\tfor event, data, sock in events:\n\n\t\t\t\t# No register event coming\n\t\t\t\tif event == \"TIMEOUT\":\n\t\t\t\t\tif steps > 0:\n\t\t\t\t\t\tsteps -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug(\"Send DATA event to each one of workers\")\n\t\t\t\t\t\tfor key, worker in self.worker_set.items():\n\t\t\t\t\t\t\t# send DATA to each worker\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tworker['fd'].send(\"DATA\")\n\t\t\t\t\t\t\t\tlogger.debug(\"send to %s\"%key)\n\n\t\t\t\t\t\t\t# remove disconnected worker from worker set\n\t\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\t\tif e.errno == errno.EBADF:\n\t\t\t\t\t\t\t\t\tdel self.worker_set[key]\n\t\t\t\t\t\t\t\t\tlogger.info(\"The connection to worker %s is closed.\" % key)\n\n\t\t\t\t\t\tsteps = self.collect_interval\n\n\t\t\t\t\tbreak\n\n\t\t\t\t# New worker coming\n\t\t\t\tif event == \"REGISTER\":\n\t\t\t\t\tip = sock.getpeername()[0]\n\n\t\t\t\t\t# initialize coming worker\n\t\t\t\t\tself.worker_set[ip] = {\n\t\t\t\t\t\t'fd': sock, \n\t\t\t\t\t\t'addr': eval(data),\n\t\t\t\t\t\t'agents': 0}\n\n\t\t\t\t\tlogger.info(\"Worker %s has registered, address on %s\" % (ip, data))\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Metric data coming\n\t\t\t\tif event == \"DATA\":\n\n\t\t\t\t\t# insert data into database\n\t\t\t\t\tcount = self.db.insert_metric(data)\n\t\t\t\t\tlogger.info(\"store metrics : %d\" % count)\n\n\t\tlogger.info(threading.currentThread().getName() + \" is closing\")", "def _get_collector(self):\n return self.__collector", "def create_collector(self):\n # TODO\n return LearningDataCollector()", "def collector():\n channels = Builder(\"channels\")\n\n with open(\"ids.pickle\", \"rb\") as fp:\n items = pickle.load(fp)\n\n ids = list(items[\"ids\"])\n start = 0\n while True:\n end = start + 50\n batch = ids[start:end]\n if batch:\n batch = \",\".join(batch)\n # If i haven´t reach the end of the list, request info\n response = channels.list(\n part=\"snippet,statistics\",\n id=batch,\n maxResults=50,\n fields=\"items(id,snippet(title,publishedAt,country),statistics(viewCount,commentCount,subscriberCount,videoCount))\",\n )\n print(response)\n break\n # start = end\n else:\n break", "def predict_collect(self, src, collector) -> None:\n ...", "def collect(self) -> core.Metric:\n results = self._tester.test()\n\n download_speed = core.GaugeMetricFamily('download_speed_bps',\n 'Download speed (bit/s)')\n download_speed.add_metric(labels=[], value=results.download)\n yield download_speed\n\n upload_speed = core.GaugeMetricFamily('upload_speed_bps',\n 'Upload speed (bit/s)')\n upload_speed.add_metric(labels=[], value=results.upload)\n yield upload_speed\n\n ping = core.GaugeMetricFamily('ping_ms', 'Latency (ms)')\n ping.add_metric(labels=[], value=results.ping)\n yield ping\n\n bytes_received = core.GaugeMetricFamily('bytes_received',\n 'Bytes received during test')\n bytes_received.add_metric(labels=[], value=results.bytes_received)\n yield bytes_received\n\n bytes_sent = core.GaugeMetricFamily('bytes_sent',\n 'Bytes sent during test')\n bytes_sent.add_metric(labels=[], value=results.bytes_sent)\n yield bytes_sent", "def collect(self):\n data = self.provider.collect()\n self.collect_done(data)", "def remove_netflow_collector(self, netflow_collector):\n _netflow_collector = []\n changed = False\n for nf in self.netflow_collector:\n if nf != netflow_collector:\n _netflow_collector.append(nf.data)\n else:\n changed = True\n\n if changed:\n self.data[\"netflow_collector\"] = _netflow_collector\n self.update()\n\n return changed", "def _add_collect(cls):\n cls._count_collect = cls._count_collect + 1", "def _fold_metrics(self, metrics):\n # Store the metrics as a dictionary by queue type\n metrics_by_type = {}\n for metric in metrics:\n key = type(metric)\n metrics_by_type.setdefault(key, [])\n metrics_by_type[key].append(metric)\n\n # Fold over the metrics\n data = []\n now = time.time()\n for cls,metrics in metrics_by_type.iteritems():\n data.extend(cls.fold(metrics, now, **self.metrics_settings.get(cls, {})))\n\n return data", "def aggregate(self):\n self.__log.call()\n\n for collector in self._collectors:\n self._merge_metadata(\n collector.metadata, self.metadata,\n keys=[\n \"album_title\",\n \"album_artist\",\n \"album_label\",\n \"album_genre\",\n \"album_year\",\n \"album_cover\",\n ])\n\n self._merge_metadata(\n collector.metadata[\"__custom\"], self.metadata[\"__custom\"],\n keys=[\n key for key in collector.metadata[\"__custom\"].keys()\n if key not in self.metadata[\"__custom\"]])\n\n # not terribly useful, but not sure what else could possibly be\n # done here if there are discrepancies; best to just leave it up to\n # the user to edit these fields appropriately\n for field in [\"album_discnumber\", \"album_disctotal\"]:\n if collector.metadata[field] > self.metadata[field]:\n self.metadata[field] = collector.metadata[field]\n\n t = 1\n for track_metadata in collector.metadata[\"__tracks\"][1:]:\n self._merge_metadata(\n track_metadata, self.metadata[\"__tracks\"][t],\n keys=[\n \"track_title\",\n \"track_artist\",\n \"track_genre\",\n \"track_year\",\n ])\n\n # it is possible for collectors to not find track artist and/or\n # genre, so make sure those fields have value(s)\n for field in [\"artist\", \"genre\"]:\n if not self.metadata[\"__tracks\"][t][\"track_\" + field]:\n self.metadata[\"__tracks\"][t][\"track_\" + field] = list(\n self.metadata[\"album_\" + field])\n\n self._merge_metadata(\n track_metadata[\"__custom\"],\n self.metadata[\"__tracks\"][t][\"__custom\"],\n keys=[\n key for key in track_metadata[\"__custom\"].keys()\n if key not in\n self.metadata[\"__tracks\"][t][\"__custom\"]])\n\n t += 1\n\n for (key, value) in self.metadata[\"__custom\"].items():\n for track_metadata in self.metadata[\"__tracks\"][1:]:\n if key not in track_metadata[\"__custom\"]:\n track_metadata[\"__custom\"][key] = value\n\n # add LAME genres to album and track metadata\n self.__add_lame_genres(self.metadata[\"album_genre\"])\n for track_metadata in self.metadata[\"__tracks\"][1:]:\n self.__add_lame_genres(track_metadata[\"track_genre\"])\n\n # currently, neither Gracenote nor MusicBrainz provide \"year\" metadata\n # on a per-track basis; so if \"track_year\" is empty after aggregation,\n # default it to the same options as \"album_year\"\n t = 1\n album_year = self.metadata[\"album_year\"]\n for track_metadata in self.metadata[\"__tracks\"][t:]:\n if not track_metadata[\"track_year\"]:\n track_metadata[\"track_year\"] = list(album_year) # use a copy\n\n # write album cover image data to temporary files\n self.__save_album_covers()\n\n # persisted metadata takes precedence and provides some values not\n # collected by regular collectors\n if self.persistence.restored:\n # I trust myself more than the music databases :)\n self.metadata[\"album_discnumber\"] = \\\n self.persistence.metadata[\"album_discnumber\"]\n self.metadata[\"album_disctotal\"] = \\\n self.persistence.metadata[\"album_disctotal\"]\n\n # regular collectors do not track the following fields\n self.metadata[\"album_compilation\"] = \\\n self.persistence.metadata[\"album_compilation\"]\n # issues/5\n for naming_field in [\n \"__flac_subroot_trie\",\n \"__flac_album_folder\",\n \"__flac_track_filename\",\n \"__mp3_subroot_trie\",\n \"__mp3_album_folder\",\n \"__mp3_track_filename\",\n ]:\n if naming_field in self.persistence.metadata:\n self.metadata[naming_field] = \\\n self.persistence.metadata[naming_field]\n\n t = 1\n for track_metadata in self.persistence.metadata[\"__tracks\"][t:]:\n # sanity check\n assert (\n track_metadata[\"track_number\"] ==\n self.metadata[\"__tracks\"][t][\"track_number\"] ==\n t)\n\n # regular collectors do not store the \"track_include\" flag\n self.metadata[\"__tracks\"][t][\"track_include\"] = \\\n track_metadata[\"track_include\"]\n\n t += 1", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n if \"queues\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"queues\"]:\n for metric in metric_rq():\n yield metric\n\n if \"reports\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"reports\"]:\n for metric in metric_reports():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\"netbox_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\")\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def consumers(self):\n ops_set = frozenset(self._ops)\n res = []\n for output in self._output_ts:\n consumers = [op for op in output.consumers() if op not in ops_set]\n util.concatenate_unique(res, consumers)\n return res", "def collect(self):\n with self._data_lock:\n for m in self._metrics:\n gauge = GaugeMetricFamily(m.name, m.description, labels=self._label_names + m.label_names)\n for (label_values, value) in self._data.get(m.name, []):\n gauge.add_metric(label_values, value)\n yield gauge", "def classify(gr,numSource,numSink):\n \n flows, cuts = maximum_flow(gr, 0, numSource)\n\n return cuts", "def create(services):\n metrics = []\n for service, actions in services.items():\n for action, outparams in actions.items():\n try:\n m = ActionGauge(service, action, outparams)\n metrics.append(m)\n except ValueError:\n # ValueError: Duplicated timeseries in CollectorRegistry:\n # {'tr64_ftpwanport'}\n pass\n return metrics" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add netflow collector/s to this log server.
def add_netflow_collector(self, netflow_collectors): if "netflow_collector" not in self.data: self.data["netflow_collector"] = {"netflow_collector": []} for p in netflow_collectors: self.data["netflow_collector"].append(p.data) self.update()
[ "def netflow_collector(self):\n return [NetflowCollector(**nc) for nc in self.data.get(\"netflow_collector\", [])]", "def remove_netflow_collector(self, netflow_collector):\n _netflow_collector = []\n changed = False\n for nf in self.netflow_collector:\n if nf != netflow_collector:\n _netflow_collector.append(nf.data)\n else:\n changed = True\n\n if changed:\n self.data[\"netflow_collector\"] = _netflow_collector\n self.update()\n\n return changed", "def _collect(self):\n if not self._check_alive_collectors():\n self.signal_stop()\n\n if not self._all_nodes_finished():\n logging.warning(\n \"New collect ordered before last one was finished, skipping.\")\n return\n\n logging.info(\"Triggering new collection for all nodes.\")\n self._collect_start_time = datetime.utcnow()\n for collector in self._node_collectors:\n collector.collect()", "def collector(self):\n\n\t\tlogger = getlogger(\"Collector\")\n\t\tlogger.info(\"Starting collector.\")\n\n\t\t# Set up new poll context and initialize it\n\t\tlogger.info(\"Setting up poll context\")\n\t\tconf = CONF['collector']\n\t\tcontext = PollContext((conf['c_addr'], conf.as_int('c_port')))\n\t\tcontext.initialize()\n\n\t\tsteps = self.collect_interval\n\n\t\t# Main loop\n\t\tlogger.info(\"Collector is running\")\n\t\twhile self.running:\n\n\t\t\t# Wait for next event\n\t\t\ttry:\n\t\t\t\tevents = context.wait(1000)\n\t\t\texcept Exception, e:\n\t\t\t\tlogger.error(str(e))\n\n\t\t\tfor event, data, sock in events:\n\n\t\t\t\t# No register event coming\n\t\t\t\tif event == \"TIMEOUT\":\n\t\t\t\t\tif steps > 0:\n\t\t\t\t\t\tsteps -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug(\"Send DATA event to each one of workers\")\n\t\t\t\t\t\tfor key, worker in self.worker_set.items():\n\t\t\t\t\t\t\t# send DATA to each worker\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tworker['fd'].send(\"DATA\")\n\t\t\t\t\t\t\t\tlogger.debug(\"send to %s\"%key)\n\n\t\t\t\t\t\t\t# remove disconnected worker from worker set\n\t\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\t\tif e.errno == errno.EBADF:\n\t\t\t\t\t\t\t\t\tdel self.worker_set[key]\n\t\t\t\t\t\t\t\t\tlogger.info(\"The connection to worker %s is closed.\" % key)\n\n\t\t\t\t\t\tsteps = self.collect_interval\n\n\t\t\t\t\tbreak\n\n\t\t\t\t# New worker coming\n\t\t\t\tif event == \"REGISTER\":\n\t\t\t\t\tip = sock.getpeername()[0]\n\n\t\t\t\t\t# initialize coming worker\n\t\t\t\t\tself.worker_set[ip] = {\n\t\t\t\t\t\t'fd': sock, \n\t\t\t\t\t\t'addr': eval(data),\n\t\t\t\t\t\t'agents': 0}\n\n\t\t\t\t\tlogger.info(\"Worker %s has registered, address on %s\" % (ip, data))\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Metric data coming\n\t\t\t\tif event == \"DATA\":\n\n\t\t\t\t\t# insert data into database\n\t\t\t\t\tcount = self.db.insert_metric(data)\n\t\t\t\t\tlogger.info(\"store metrics : %d\" % count)\n\n\t\tlogger.info(threading.currentThread().getName() + \" is closing\")", "def _set_collector(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"collector_ip_address collector_port_number use_vrf\",collector.collector, yang_name=\"collector\", rest_name=\"collector\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-ip-address collector-port-number use-vrf', extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-mode': None, u'callpoint': u'SflowCollector', u'info': u'Sflow Collector Configuration'}}), is_container='list', yang_name=\"collector\", rest_name=\"collector\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-mode': None, u'callpoint': u'SflowCollector', u'info': u'Sflow Collector Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"collector must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"collector_ip_address collector_port_number use_vrf\",collector.collector, yang_name=\"collector\", rest_name=\"collector\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-ip-address collector-port-number use-vrf', extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-mode': None, u'callpoint': u'SflowCollector', u'info': u'Sflow Collector Configuration'}}), is_container='list', yang_name=\"collector\", rest_name=\"collector\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-suppress-mode': None, u'callpoint': u'SflowCollector', u'info': u'Sflow Collector Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__collector = t\n if hasattr(self, '_set'):\n self._set()", "def __init__(self,\n first: 'FlowLogCollectorCollectionFirst',\n flow_log_collectors: List['FlowLogCollector'],\n limit: int,\n total_count: int,\n *,\n next: 'FlowLogCollectorCollectionNext' = None) -> None:\n self.first = first\n self.flow_log_collectors = flow_log_collectors\n self.limit = limit\n self.next = next\n self.total_count = total_count", "def _add_collect(cls):\n cls._count_collect = cls._count_collect + 1", "def collect(self, app):\n with app.app_context():\n LOGGER.info(\"Start collecting metrics\")\n try:\n if db_util.is_port_open():\n with db_util.get_connection() as conn:\n for metric in self.metrics:\n LOGGER.debug(\"collect %s\", metric)\n if hasattr(metric, 'query'):\n result = db_util.get_query_result(conn, metric.query)\n metric.collect(app, result)\n else:\n metric.collect(app)\n else:\n for metric in self.metrics:\n if hasattr(metric, 'is_up_metric'):\n metric.collect(app)\n except InterfaceError as e:\n LOGGER.error(\"Exception when collecting metrics: %s\",\n str(e),\n exc_info=True)\n LOGGER.info(\"Finish collecting metrics\")", "def add_flowentry(fields, ip=DEV_VM_URL):\n url = \"http://%s:%d/stats/flowentry/add\" % (ip, OF_REST_PORT)\n data = json.dumps(fields)\n return _ovs_api_request('POST', url, data=data, return_json=False)", "def collect_packets(self):\n sniff(iface=self.interface_name, store=0, prn=self.filter_clients_and_access_points)", "def multiprocessing_start(self):\n debug_mode = False\n # For each collector\n for c in self.collectors:\n # If the collector is enabled I.e. should run an import with frequency higher than 0\n if c.seconds != 0:\n # If collector is running in debug mode, pass it the message queue\n if c.debug:\n c.set_q(self.q)\n debug_mode = True\n # Create an Api2Db instance passing the collector as parameter\n api2db = Api2Db(c)\n # Start the collector by calling the Api2Db wrap_start method\n api2db.wrap_start()\n if debug_mode:\n print(\"All collector processes started. Running in development mode.\")\n formatter = Formatter(fmt=\"Pid: %(process)-6d Tid: %(thread)-6d %(asctime)s %(levelname)-7s %(message)s\",\n datefmt=\"%Y-%m-%d %I:%H:%M\")\n handler = StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n listener = QueueListener(self.q, handler)\n listener.start()\n else:\n print(\"All collector processes started. Running in production mode.\")", "def collectd_init(self):\n self.server = KatcpServer(self.config['host'],\n int(self.config['port']))\n self.server.start()", "def _update_network(self, snapshot, ts):\n _n = snapshot['system']['netio']\n with self.lock:\n self._stats['network']['connections_total'] = \\\n len(snapshot['proc']['connections'])\n self._stats['network']['bytes_sent'].append(\n (_n.bytes_sent, ts)\n )\n self._stats['network']['bytes_recv'].append(\n (_n.bytes_recv, ts)\n )\n self._stats['network']['bytes_sent_s'].append(\n (snapshot['system']['netstats']['sent_s'], ts)\n )\n self._stats['network']['bytes_recv_s'].append(\n (snapshot['system']['netstats']['recv_s'], ts)\n )\n self._stats['network']['connections'] = \\\n snapshot['proc']['connections']", "def add_ovs_flows():\n check_output(split(\"ovs-ofctl del-flows s1\"))\n\n check_output(\n split(\n 'ovs-ofctl add-flow s1 \"{proto},in_port={in_port},actions=output={out_port}\"'.format(\n **{\n \"in_port\": get_ofport(\"s1-client\"),\n \"out_port\": get_ofport(\"s1-vnf\"),\n \"proto\": \"udp\",\n }\n )\n )\n )\n check_output(\n split(\n 'ovs-ofctl add-flow s1 \"{proto},in_port={in_port},actions=output={out_port}\"'.format(\n **{\n \"in_port\": get_ofport(\"s1-server\"),\n \"out_port\": get_ofport(\"s1-client\"),\n \"proto\": \"udp\",\n }\n )\n )\n )", "def register_for_new_logs(self):\n pass", "def _collect_from(self, filename):\n if not os.access(filename, os.R_OK):\n self.log.error('HadoopMetrics2Collector unable to read \"%s\"', filename)\n return\n\n self.config['hostname_method'] = 'uname_short'\n\n if str_to_bool(self.config['truncate']):\n # It is too dangerous to truncate a file that may be in the process\n # of being appended to; especially since most file systems do not\n # provide functionality for removing data from the start of a file.\n # As such we simply rename the file and delete the entire file when\n # we are finished.\n original_filename = filename\n filename = os.path.join(os.path.dirname(original_filename),\n self.__generate_unique_filename(RESERVED_NAME))\n os.rename(original_filename, filename)\n\n _file = open(filename, 'r')\n\n for line in _file:\n match = self.re_log.match(line)\n if not match:\n continue\n raw_data = match.groupdict()\n\n metrics = {}\n extra_data = {}\n for metric in raw_data['metrics'].split(','):\n metric = metric.strip()\n if '=' in metric:\n key, value = metric.split('=', 1)\n try:\n metrics[key] = round(float(value))\n except ValueError:\n extra_data[key] = value\n\n host = extra_data.get('Hostname', None) or self.get_hostname()\n partial_path = 'hadoop.{}.'.format(raw_data['name'])\n\n for key, val in metrics.items():\n full_path = partial_path + key\n self._publish(key, val, full_path, host, raw_data['timestamp'])\n\n _file.close()\n\n if str_to_bool(self.config['truncate']):\n os.remove(filename)", "def AddGatheringFunction(self, gf):\n if isinstance(gf, BLNlpClipsGatheringFunction):\n self._gatheringFunctions.append(gf)", "def add_firewall_rules(self):\n self.fw.append([\"\", \"front\",\n \"-A INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT\" % self.dev\n ])\n\n if self.config.has_dns():\n self.fw.append([\n \"\", \"front\",\n \"-A INPUT -i %s -d %s/32 -p udp -m udp --dport 53 -j ACCEPT\" % (self.dev, self.ip)\n ])\n\n self.fw.append([\n \"\", \"front\",\n \"-A INPUT -i %s -d %s/32 -p tcp -m tcp --dport 53 -j ACCEPT\" % (self.dev, self.ip)\n ])", "def _add_gage_ids_natur_flow_to_network(self):\n print(\"Adding Gage Station and Natur Flow info from: {0}\".format(self.gage_ids_natur_flow_file))\n gage_id_natur_flow_table = csv_to_list(self.gage_ids_natur_flow_file)\n for stream_info in gage_id_natur_flow_table[1:]:\n if stream_info[0] != \"\":\n stream_index = self._find_stream_segment_index(int(float(stream_info[0])))\n if stream_index != None:\n #add natural flow\n self.stream_segments[stream_index].natural_flow = int(float(stream_info[1]))\n #add station id\n try:\n station_id = str(int(float(stream_info[2])))\n except Exception:\n continue\n pass\n if station_id != \"\":\n self.stream_undex_with_usgs_station.append(stream_index)\n self.stream_segments[stream_index].station = USGSStreamGage(station_id)\n #removed: don't add unless valid data aquired\n #self.stream_segments[stream_index].station_distance = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a netflow collector from this log server.
def remove_netflow_collector(self, netflow_collector): _netflow_collector = [] changed = False for nf in self.netflow_collector: if nf != netflow_collector: _netflow_collector.append(nf.data) else: changed = True if changed: self.data["netflow_collector"] = _netflow_collector self.update() return changed
[ "def disconnect_env_collector(self, clname, exc=True):\r\n found = None\r\n foundi = None\r\n for i, co in enumerate(self._added_collectors):\r\n if clname == co.__class__.__name__:\r\n found = co\r\n foundi = i\r\n break\r\n if found is not None and not exc:\r\n return None\r\n if found is None:\r\n raise ValueError(\"Unable to find a collector '{0}' in \\n{1}\".format(\r\n clname, \"\\n\".join(map(lambda x: x.__class__.__name__, self._added_collectors))))\r\n for v in found.listener_ids.values():\r\n self.disconnect(v)\r\n del self._added_collectors[foundi]\r\n return found", "def delete_flow_log_collector(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_flow_log_collector')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/flow_log_collectors/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def req_remove_flow(self, msg):\n msg.__class__ = DR2DPMessageRemoveFlow\n try:\n msg.unpack()\n except:\n self.log.warn('invalid remove_flow message')\n return\n\n if self.click_interface != None:\n self.click_interface.send_msg_to_dr(msg.pack())", "def netflow_collector(self):\n return [NetflowCollector(**nc) for nc in self.data.get(\"netflow_collector\", [])]", "def add_netflow_collector(self, netflow_collectors):\n if \"netflow_collector\" not in self.data:\n self.data[\"netflow_collector\"] = {\"netflow_collector\": []}\n\n for p in netflow_collectors:\n self.data[\"netflow_collector\"].append(p.data)\n self.update()", "def remove(self, flows: typing.Sequence[mitmproxy.flow.Flow]) -> None:\n for f in flows:\n if f.id in self._store:\n if f.killable:\n f.kill()\n if f in self._view:\n # We manually pass the index here because multiple flows may have the same\n # sorting key, and we cannot reconstruct the index from that.\n idx = self._view.index(f)\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f, index=idx)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n if len(flows) > 1:\n ctx.log.alert(\"Removed %s flows\" % len(flows))", "def removes_channel(channel):", "def clear_heartbeat(self, collector, daemon):\n self._router_request(\n self._make_request_data(\n 'clear_heartbeat',\n dict(\n monitor=collector,\n daemon=daemon,\n )\n )\n )\n\n return True", "def remove_refuel_notif(self, id_refuel):\r\n self.main_widget.del_delivery(id_refuel)", "def remove(self):\n self.connections.remove(self)", "def removefsgroup(self, groupname):", "def detach_listener(self, route):\n pass", "def disconn_output_sink(self, output):\n # Delete timer handler\n self.remove_request_timer_handler()", "def remove_listener(self, event, f):\n self._events[event].pop(f)", "def remove(env, securitygroup_id, network_component, server, interface):\n _validate_args(network_component, server, interface)\n\n mgr = SoftLayer.NetworkManager(env.client)\n component_id = _get_component_id(env, network_component, server, interface)\n\n ret = mgr.detach_securitygroup_component(securitygroup_id,\n component_id)\n if not ret:\n raise exceptions.CLIAbort(\"Could not detach network component\")\n\n table = formatting.Table(REQUEST_COLUMNS)\n table.add_row([ret['requestId']])\n\n env.fout(table)", "def on_remove_menu(self, item):\n for row in Gtk_Main.Gtk_Main().lateral_pane.firewalls.model:\n if row[0].split('\\n')[0] == self.node.object.hostname:\n Gtk_Main.Gtk_Main().lateral_pane.firewalls.model.remove(row.iter)\n break\n Gtk_Main.Gtk_Main().lateral_pane.details.clear()\n Gtk_Main.Gtk_Main().lateral_pane.path.clear()\n Gtk_Main.Gtk_Main().notebook.close_all_closable()\n NetworkGraph.NetworkGraph().remove_firewall(self.node)\n Gtk_Main.Gtk_Main().draw()", "def remove_sink(self, ident):\n # If this is the last bin, we should add fakesink back first\n if (len(self._bins.keys()) == 1):\n self.add_sink('_fakesink', FakeAudioSink())\n\n if (ident in self._bins.keys()):\n sink = self._bins.pop(ident)\n if (sink is None):\n raise Exception('Unrecognized sink object ident = ' + ident)\n sink_obj = sink['sink_obj']\n srcpad = sink['srcpad']\n # Safely remove the element from the tee gst.Bin\n sinkpad = sink_obj.get_static_pad('sink')\n if (self._is_running()):\n sinkpad.set_blocked(True)\n srcpad.unlink(sinkpad)\n self.tee.release_request_pad(srcpad)\n self.remove(sink_obj)\n sink_obj.set_state(gst.STATE_NULL)", "def remove_server(self, server):\n assert(isinstance(server, MySQLServer))\n assert(server.group_id == self.__group_id)\n server.group_id = None", "def unpublish(self, cls):\r\n self.classes.pop(cls, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export the certificate request for the component when working with an External PKI. This can return None if the component does not have a certificate request.
def pki_export_certificate_request(self, filename=None): result = self.make_request( CertificateExportError, raw_result=True, resource="pki_export_certificate_request" ) if filename is not None: save_to_file(filename, result.content) return return result.content
[ "def requestCertificate(self):\n # Get Cert from the request's environment\n if \"CLIENT_RAW_CERT\" in request.environ:\n return request.environ[\"CLIENT_RAW_CERT\"]\n if \"SSL_CLIENT_CERT\" in request.environ:\n return request.environ[\"SSL_CLIENT_CERT\"]\n return None", "def sp_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sp_cert\")", "def sp_cert(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sp_cert\")", "def intermediate_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"intermediate_cert\")", "def cert(self):\n return self.just_get_me_a_certificate()", "def get_cert(self):\n return self.cert", "def get_local_cert_obj(self):\n cert_pem = self.get_pki_asset(\"cert\")\n cert = x509.load_pem_x509_certificate(cert_pem, backend=default_backend()) # NOQA\n return cert", "def IssueCertificate(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def access_cert(self) -> str:\n return pulumi.get(self, \"access_cert\")", "def get_request_certificate(req_id):\n cursor = _get_db().cursor()\n cursor.execute('SELECT cert FROM certs WHERE token=?', (req_id,))\n val = cursor.fetchone()\n if val is None:\n return None\n return val[0]", "def IssueCertificate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_read_certificate_signing_request(self):\n pass", "def certificate_store(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate_store\")", "def pem_certificate(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pem_certificate\")", "def tbs_certrequest_bytes(self):", "def peer_certificate(self, space, der=False):\n if not self.handshake_done:\n raise oefmt(space.w_ValueError, \"hanshake not done yet\")\n if not self.peer_cert:\n return space.w_None\n\n if der:\n # return cert in DER-encoded format\n return _certificate_to_der(space, self.peer_cert)\n else:\n verification = libssl_SSL_CTX_get_verify_mode(\n libssl_SSL_get_SSL_CTX(self.ssl))\n if not verification & SSL_VERIFY_PEER:\n return space.newdict()\n else:\n return _decode_certificate(space, self.peer_cert)", "def SpectrumInquiry(self, request, ssl_cert=None, ssl_key=None):\n pass", "def test_patch_certificate_signing_request(self):\n pass", "def import_certificate(cert): # pylint: disable=unused-argument\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a valid certificate. Certificate can be either a file path or a string of the certificate. If string certificate, it must include the BEGIN CERTIFICATE string.
def pki_import_certificate(self, certificate): self.make_request( CertificateImportError, method="create", resource="pki_import_certificate", headers={"content-type": "multipart/form-data"}, files={ # decode certificate or use it as it is "signed_certificate": open(certificate, "rb") if not pem_as_string(certificate) else certificate }, )
[ "def import_certificate(cert): # pylint: disable=unused-argument\n pass", "def test_import_cert(cert_path, thumbprint, certs, json_certs):\n kwargs = {\"name\": cert_path}\n mock_value = MagicMock(return_value=cert_path)\n with patch.dict(win_pki.__salt__, {\"cp.cache_file\": mock_value}), patch(\n \"salt.modules.win_pki._cmd_run\", MagicMock(return_value=json_certs)\n ), patch(\n \"salt.modules.win_pki._validate_cert_path\", MagicMock(return_value=None)\n ), patch(\n \"salt.modules.win_pki.get_cert_file\",\n MagicMock(return_value=certs[thumbprint]),\n ), patch(\n \"salt.modules.win_pki.get_certs\", MagicMock(return_value=certs)\n ):\n assert win_pki.import_cert(**kwargs)", "def load_certificate():\n params = demisto.params()\n cert = params.get(\"certificate\")\n cert = base64.b64decode(cert)\n passphrase = params.get('passphrase_creds', {}).get('password') or params.get(\"passphrase\", \"\")\n return cert, passphrase", "def add_certificate(certificate):\n new_certificate = Certificates(\n title=certificate['title'],\n description=certificate['description'],\n url=certificate['url'],\n image=certificate['image']\n )\n session.add(new_certificate)\n session.commit()\n return new_certificate", "def _tryload_certificatefile(filename):\n\n filename = sanatizefilename(filename)\n\n if syhelpers.tls.load_certificate(filename) and syhelpers.tls.load_privatekey(filename):\n return True\n else:\n return False", "def load(filename):\n\t\tbuffer = [];\n\t\tb64_contents = \"\";\n\t\ttry:\n\t\t\thandle = open(filename, \"r\");\n\t\t\traw_contents = handle.readlines();\n\t\t\tfor line in raw_contents:\n\t\t\t\tif line.startswith(\"----\"):\n\t\t\t\t\tcontinue\n\t\t\t\tb64_contents += line.strip();\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Failed to read PEM file: \" + str(e));\n\t\tbuffer = b64decode(b64_contents);\n\t\treturn X509v3Certificate(buffer);", "def parse_certificate(body):\n if isinstance(body, str):\n body = body.encode('utf-8')\n\n return x509.load_pem_x509_certificate(body, default_backend())", "def import_and_upload_key_certificate(self, value_string=None):\n self.ss_keys_and_cert.click_import_cert()\n self.ss_keys_and_cert_dlg_import_cert.verify_title_file_dlg()\n self.ss_keys_and_cert_dlg_import_cert.click_browse_upload_button()\n self.make_cert_file_upload(value_string)\n sleep(7)\n self.ss_keys_and_cert_dlg_import_cert.file_upload_ok()\n sleep(7)", "def insertCertificate(self, certificate):\n certificate = IdentityCertificate(certificate)\n certName = certificate.getName()[:-1]\n self._cache[certName.toUri()] = certificate.wireEncode()", "def __init__(self, certificate_path=None, certificate_password=None):\n\t\tcert_path = './cert.p12' if certificate_path is None else certificate_path\n\t\tcert_password = environ.get('CERTIFICATE_PASSWORD') if certificate_password is None else certificate_password\n\t\tcert_password_bytes = cert_password.encode()\n\t\twith open(cert_path, 'rb') as cert_file:\n\t\t\tcertificate_file = cert_file.read()\n\t\t\tcert_file.close()\n\t\t\tself.p12 = crypto.load_pkcs12(certificate_file, cert_password_bytes)\n\t\t\tself.pkey = self.p12.get_privatekey()\n\t\t\tself.cert = self.p12.get_certificate()", "def install_certificate():\n stream = open(\"/bootflash/poap_device_recipe.yaml\", 'r')\n dictionary = yaml.load(stream)\n config_file_second = open(os.path.join(\"/bootflash\", options[\"split_config_second\"]), \"a+\")\n \n if (\"Trustpoint\" in dictionary):\n for ca in dictionary[\"Trustpoint\"].keys():\n ca_apply = 0\n for tp_cert, crypto_pass in dictionary[\"Trustpoint\"][ca].items():\n tp_cert = tp_cert.strip()\n file = tp_cert.split('/')[-1]\n if (file.endswith(\".p12\") or file.endswith(\".pfx\")):\n poap_log(\"Installing certificate file. %s\" % file)\n if (ca_apply == 0):\n config_file_second.write(\"crypto ca trustpoint %s\\n\" % ca)\n ca_apply = 1\n config_file_second.write(\"crypto ca import %s pkcs12 bootflash:poap_files/%s/%s %s\\n\" % (ca, ca, file, crypto_pass))\n poap_log(\"Installed certificate %s succesfully\" % file)", "def load_pfx(file_path, password):\n\n with open(file_path, 'rb') as fp:\n return pkcs12.load_key_and_certificates(fp.read(), password.encode(), backends.default_backend())", "def test_creating_cert(self):\n\n certificate = keyper.Certificate(AppleKeychainTests.TEST_CERT_PATH, password=AppleKeychainTests.TEST_CERT_PASSWORD)\n self.assertEqual(certificate.sha1, \"75:22:4C:AD:D6:A0:BD:0C:88:5F:B1:77:85:2F:83:A4:F6:80:69:70\")\n self.assertEqual(certificate.common_name, \"TestCertificate_CodeSign\")\n self.assertEqual(certificate.private_key_name, \"TestCertificate_CodeSign\")", "def load_from_url(url: str) -> Optional[dataclasses.Certificate]:\n parsed = urlparse(url)\n if parsed.scheme != \"https\":\n return None\n\n hostname = parsed.netloc\n port = parsed.port or 443\n try:\n cert = get_certification(hostname, port)\n\n text: str = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode()\n fingerprint: str = cert.digest(\"sha256\").decode().replace(\":\", \"\").lower()\n subject = cert.get_subject()\n issuer = cert.get_issuer()\n not_after = cert.get_notAfter()\n not_before = cert.get_notBefore()\n\n return dataclasses.Certificate(\n text=text,\n fingerprint=fingerprint,\n subject=components_to_string(subject.get_components()),\n issuer=components_to_string(issuer.get_components()),\n not_after=asn1time_to_datetime(not_after),\n not_before=asn1time_to_datetime(not_before),\n )\n except (ssl.SSLError, ValueError):\n return None", "def raw_certificate(self, raw_certificate):\n\n self._raw_certificate = raw_certificate", "def x509_load_certificate_from_data_str(pem_data) -> bytes:\n return x509.load_pem_x509_certificate(str(pem_data).encode(\"utf-8\"), default_backend())", "def validate_certificate(self, value):\n self._set_property('validate_certificate', value)", "def test_adding_cert(self):\n\n with keyper.TemporaryKeychain() as keychain:\n certificate = keyper.Certificate(AppleKeychainTests.TEST_CERT_PATH, password=AppleKeychainTests.TEST_CERT_PASSWORD)\n keychain.install_cert(certificate)", "def load_x509_certificate_pem(path):\n\n with open(path, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n return cert" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start renewal process on component when using external PKI mode. It generates new private key and prepare a new certificate request.
def pki_renew_certificate(self): self.make_request( CertificateError, method="update", resource="pki_start_certificate_renewal", )
[ "def _initkeycertificate(self):\n\n if self.privatekey and self.fingerprint and self.certificate and self.publickeyxml:\n return # all set up\n\n filename = sanatizefilename(self.options['STAGECERTIFICATEFILE']['Value'])\n\n self.privatekey = syhelpers.tls.load_privatekey(filename)\n if not self.privatekey:\n print_error(\"Failed to load privatekey, please check STAGECERTIFICATEFILE\")\n return\n self.certificate = syhelpers.tls.load_certificate(filename)\n if not self.certificate:\n print_error(\"Failed to load certificate, please check STAGECERTIFICATEFILE\")\n return\n self.publickeyxml = self._getrsapublickeyxml()\n self.fingerprint = self._getfingerprint()", "def fetch_pubkey():\n \n certificate = arizonaconfig.get_option(\"certificate\")\n reppath = arizonaconfig.get_option(\"repositorypath\")\n useinsecure = arizonaconfig.get_option(\"insecure\")\n\n restartFlag = False\n \n #check to see if the certificate exists\n if not useinsecure and not os.path.isfile(certificate):\n arizonareport.send_error(1,\"could not open certificate: \"+certificate+\" while trying to initiate curl transfer to download public key file. Use --insecure to transfer without a certificate\")\n return False\n\n #build the file we want to download\n #default reppath should be:\n # https://nr06.cs.arizona.edu/user-upload/\n #so construct the rest of the path to the\n #file we want to download\n file = reppath + \"pubkeys/\" + arizonageneral.getusername() + \".publickey\"\n \n \n arizonareport.send_out(3,\"[INFO] Attempting to download your custom public key from: \"+file)\n\n pubkey = arizonaconfig.get_option(\"publickeyfile\")\n #ensure that the directory exists\n keydir = None\n lastslash = pubkey.rfind(\"/\")\n if lastslash > 0:\n try:\n keydir = pubkey[:lastslash]\n if not os.path.exists(keydir):\n os.makedirs(keydir)\n except (OSError, IndexError):\n pass\n\n # Generate a temp to hold the public key we'll d/l\n (tmppubkeyfd, tmppubkeyfile) = tempfile.mkstemp(suffix=\"arizonacurlpub.temp\")\n os.close(tmppubkeyfd)\n\n # Try finally block to remove the temp file\n try:\n if useinsecure:\n execstring = \"curl --insecure -w '%{http_code}' \" + file + \" -o \"+ tmppubkeyfile\n else:\n execstring = \"curl --cacert \"+certificate+\" -w '%{http_code}' \" + file + \" -o \"+tmppubkeyfile\n out, err, status = arizonageneral.popen5(execstring)\n\n if len(out) < 1 or out[0] != \"200\":\n arizonareport.send_error(3,\"[INFO]: I was unable to download your public key from: \"+file+\" . If you would like to upload one please go to http://quiver.cs.arizona.edu/testphp/upload.php. After you upload your public for your slice it will be automatically distributed when stork starts up. [USING DEFAULT PUBKEY]: \"+pubkey)\n else:\n # JRP: putting in code to get rid of any of the old conffiles\n if os.path.isfile(pubkey+\".old\"):\n os.system(\"rm -f \"+pubkey+\"*.old* 2>/dev/null\")\n\n # move the public key file to its new location\n if pubkey != \"\":\n # I dont think there is a option for key location: at least\n # not that I could find.. Maybe I need to look harder.\n # We really shouldn't be hardcoding paths in here.\n if not os.path.isdir(\"/usr/local/stork/var/keys\"):\n os.mkdir(\"/usr/local/stork/var/keys\")\n\n # check to see if the pub key has changed\n if os.path.isfile(pubkey):\n curhash = arizonacrypt.get_fn_hash(pubkey)\n newhash = arizonacrypt.get_fn_hash(tmppubkeyfile)\n if not curhash == newhash:\n restartFlag = True\n #back up the file if it has changed\n move_file(pubkey)\n shutil.copy(tmppubkeyfile, pubkey)\n else:\n # there was no existing pubkey file\n restartFlag = True\n shutil.copy(tmppubkeyfile, pubkey)\n\n finally:\n # clean up the downloaded file\n try:\n os.unlink(tmppubkeyfile)\n except OSError:\n arizonareport.send_error(3, \"[INFO] Could not remove temporary file `\" + str(tmppubkeyfile) + \"'\")\n\n return restartFlag", "def renew_sslcert():\n exec_cmd('letsencrypt renew ' + LETSENCRYPT_ARGS)", "def load_certificate():\n db_uuid = read_file_first_line('odoo-db-uuid.conf')\n enterprise_code = read_file_first_line('odoo-enterprise-code.conf')\n if db_uuid and enterprise_code:\n url = 'https://www.odoo.com/odoo-enterprise/iot/x509'\n data = {\n 'params': {\n 'db_uuid': db_uuid,\n 'enterprise_code': enterprise_code\n }\n }\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n response = http.request(\n 'POST',\n url,\n body = json.dumps(data).encode('utf8'),\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n )\n result = json.loads(response.data.decode('utf8'))['result']\n if result:\n write_file('odoo-subject.conf', result['subject_cn'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/\"])\n Path('/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/root_bypass_ramdisks/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/etc/cups\"])\n subprocess.check_call([\"sudo\", \"service\", \"nginx\", \"restart\"])", "def refresh_cert_and_key(self):\n d = None\n\n if \"post_body\" in self.config[\"cfssl\"]:\n d = self.config[\"cfssl\"][\"post_body\"]\n else:\n d = {\n \"request\": self.config[\"cfssl\"][\"request\"]\n }\n\n url = \"{}/api/v1/cfssl/newcert\".format(self.config[\"cfssl\"][\"url\"])\n\n kwargs = {}\n\n if \"auth\" in self.config[\"cfssl\"]:\n kwargs[\"auth\"] = (self.config[\"cfssl\"][\"auth\"][\"user\"],\n self.config[\"cfssl\"][\"auth\"][\"password\"])\n\n if \"ca_bundle\" in self.config[\"cfssl\"]:\n kwargs[\"verify\"] = self.config[\"cfssl\"][\"ca_bundle\"]\n\n try:\n resp = requests.post(url, json=d, **kwargs)\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(\"cfssl refresh failed! {}\".format(e))\n\n if \"onfailure\" in self.config:\n if \"post_to_slack\" in self.config[\"onfailure\"]:\n\n msg_lines = [\n \"exception: `{}`\".format(e),\n \"request:\",\n \"```\",\n \"{}\".format(\n json.dumps(self.config[\"cfssl\"][\"request\"],\n indent=2)),\n \"```\"\n ]\n\n self._post_to_slack(\"cfssl refresh failed!\", msg_lines)\n\n return False\n\n r = resp.json()\n\n self._write_out_cert_files(r[\"result\"])\n\n if \"onsuccess\" in self.config:\n if \"execute_command\" in self.config[\"onsuccess\"]:\n args = shlex.split(\n self.config[\"onsuccess\"][\"execute_command\"]\n )\n\n child = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = child.communicate()\n\n if child.returncode != 0:\n if \"onfailure\" in self.config:\n if \"post_to_slack\" in self.config[\"onfailure\"]:\n msg_lines = [\n \"args: `{}`\".format(args),\n \"rc: {}\".format(child.returncode),\n \"stdout: `{}`\".format(stdout.strip()),\n \"stderr: `{}`\".format(stderr.strip()),\n ]\n\n self._post_to_slack(\n \"post cfssl refresh execute command failed!\",\n msg_lines)\n\n return False\n\n return True", "def make_key(self):\n\t\tif self.key:\n\t\t\tif not os.path.isfile(os.path.join(self.root, self.key + \".biprivatekey\")):\n\t\t\t\tprint_green(\"\\nRequested key does not exist.\")\n\t\t\t\tret = subprocess.call([self.dscreatekey, self.key], stdout = subprocess.DEVNULL if self.quiet else None, stderr = subprocess.DEVNULL if self.quiet else None) # Created in root\n\t\t\t\tif ret == 0:\n\t\t\t\t\tprint_blue(\"Created: \" + os.path.join(self.root, self.key + \".biprivatekey\"))\n\t\t\t\telse:\n\t\t\t\t\tprint_error(\"Failed to create key!\")\n\n\t\t\t\ttry:\n\t\t\t\t\tprint_blue(\"Copying public key to release directory.\\n\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.join(self.release_dir, \"Keys\"))\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tshutil.copyfile(os.path.join(self.root, self.key + \".bikey\"), os.path.join(self.release_dir, \"Keys\", self.key + \".bikey\"))\n\n\t\t\t\texcept:\n\t\t\t\t\tprint_error(\"Could not copy key to release directory.\\n\")\n\t\t\t\t\traise\n\n\t\t\telse:\n\t\t\t\tprint_green(\"\\nNOTE: Using key \" + os.path.join(self.root, self.key + \".biprivatekey\\n\"))\n\n\t\t\tself.key = os.path.join(self.root, self.key + \".biprivatekey\")", "def generate_kubernetes_rootca_cert(self, context, subject, duration=None):\n\n # Step 1: Pre-checking\n # check actual procedure entry\n try:\n update = self.dbapi.kube_rootca_update_get_one()\n except exception.NotFound:\n msg = \"Kubernetes root CA update not started\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # check if procedure is in a state that allow us to generate new cert\n if update.state != kubernetes.KUBE_ROOTCA_UPDATE_STARTED:\n msg = \"A new root CA certificate already exists\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n if update.to_rootca_cert:\n LOG.info(\"root CA target with serial number %s \"\n \"will be overwritten\" % update.to_rootca_cert)\n\n # extract current k8s rootca identifier\n current_cert = \\\n cutils.get_certificate_from_file(kubernetes.KUBERNETES_ROOTCA_CERT)\n if not current_cert:\n msg = \"Not able to get the current kube rootca\"\n return dict(success=\"\", error=msg)\n\n if duration is None:\n # extract validation period from current cert\n # the generated one will have the same period of validity\n validation_period = current_cert.not_valid_after - \\\n current_cert.not_valid_before\n\n # convert duration into hours to apply in resource spec\n duration = validation_period.days * 24\n\n # Step 2: Generating a self-signed issuer\n kube_operator = kubernetes.KubeOperator()\n selfsigned_issuer_name = constants.KUBE_SELFSIGNED_ISSUER\n api_version = \"%s/%s\" % (kubernetes.CERT_MANAGER_GROUP,\n kubernetes.CERT_MANAGER_VERSION)\n selfsigned_issuer = {\n 'apiVersion': api_version,\n 'kind': 'Issuer',\n 'metadata': {\n 'name': selfsigned_issuer_name,\n 'namespace': kubernetes.NAMESPACE_DEPLOYMENT\n },\n 'spec': {\n 'selfSigned': {}\n }\n }\n\n try:\n kube_operator.apply_custom_resource(kubernetes.CERT_MANAGER_GROUP,\n kubernetes.CERT_MANAGER_VERSION,\n kubernetes.NAMESPACE_DEPLOYMENT,\n 'issuers',\n selfsigned_issuer_name,\n selfsigned_issuer)\n except Exception:\n msg = \"Failed to generate self-signed issuer in cert-manager\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # Step 3: Generating a self-signed CA from issuer\n rootca_certificate_name = constants.KUBE_ROOTCA_SECRET\n spec = {\n 'isCA': True,\n 'duration': str(duration) + 'h',\n 'renewBefore': constants.K8S_CERTIFICATE_MINIMAL_DURATION,\n 'commonName': 'kubernetes',\n 'secretName': rootca_certificate_name,\n 'issuerRef': {\n 'name': selfsigned_issuer_name,\n 'kind': 'Issuer'\n },\n 'keyEncoding': 'pkcs8'\n }\n\n spec = cutils.add_certificate_subject(subject, spec)\n\n rootca_certificate = {\n 'apiVersion': api_version,\n 'kind': 'Certificate',\n 'metadata': {\n 'name': rootca_certificate_name,\n 'namespace': kubernetes.NAMESPACE_DEPLOYMENT\n },\n 'spec': spec\n }\n\n try:\n kube_operator.apply_custom_resource(kubernetes.CERT_MANAGER_GROUP,\n kubernetes.CERT_MANAGER_VERSION,\n kubernetes.NAMESPACE_DEPLOYMENT,\n 'certificates',\n rootca_certificate_name,\n rootca_certificate)\n except Exception:\n msg = (\"Failed to generate root CA certificate in cert-manager\")\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # Step 4: Generating issuer to sign certificates within newly\n # root CA certificate\n certificate_issuer_name = constants.KUBE_ROOTCA_ISSUER\n\n certificate_issuer = {\n 'apiVersion': api_version,\n 'kind': 'Issuer',\n 'metadata': {\n 'name': certificate_issuer_name,\n 'namespace': kubernetes.NAMESPACE_DEPLOYMENT\n },\n 'spec': {\n 'ca': {\n 'secretName': rootca_certificate_name\n }\n }\n }\n\n try:\n kube_operator.apply_custom_resource(kubernetes.CERT_MANAGER_GROUP,\n kubernetes.CERT_MANAGER_VERSION,\n kubernetes.NAMESPACE_DEPLOYMENT,\n 'issuers',\n certificate_issuer_name,\n certificate_issuer)\n except Exception as e:\n msg = (\"Failed to create root CA issuer in cert-manager: %s\" % e)\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # Step 5: Extracting information from current and new root CA\n # The new root CA will be stored in the secret\n # system-kube-rootca-certificate as indicated in Certificate\n # resource above\n secret = kube_operator.get_cert_secret(rootca_certificate_name,\n kubernetes.NAMESPACE_DEPLOYMENT)\n if secret is None:\n msg = (\"TLS Secret creation timeout\")\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n data = secret.data\n tls_crt = base64.decode_as_bytes(data['tls.crt'])\n certs = cutils.extract_certs_from_pem(tls_crt)\n\n # extract information regarding the new rootca\n try:\n new_cert = cutils.build_cert_identifier(certs[0])\n except Exception:\n msg = \"Failed to extract issuer and serial number from new root CA\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # update db\n update_obj = {'state': kubernetes.KUBE_ROOTCA_UPDATE_CERT_GENERATED,\n 'to_rootca_cert': new_cert}\n\n r = self.dbapi.kube_rootca_update_update(update.id, update_obj)\n return dict(success=r.to_rootca_cert, error=\"\")", "def _config_selfsigned_certificate(self, context):\n\n mode = constants.CERT_MODE_SSL\n passphrase = None\n certificate_file = constants.SSL_PEM_SS_FILE\n\n # Generate a self-signed server certificate to enable https\n csr_config = \"\"\"\n [ req ]\n default_bits = 2048\n distinguished_name = req_distinguished_name\n prompt = no\n [ req_distinguished_name ]\n CN = StarlingX\n \"\"\"\n\n try:\n with open(os.devnull, \"w\") as fnull:\n openssl_cmd = \"(openssl req -new -x509 -sha256 \\\n -keyout {file} -out {file} -days 365 -nodes \\\n -config <(echo \\\"{config}\\\")) && sync\" \\\n .format(file=certificate_file, config=csr_config)\n subprocess.check_call(openssl_cmd, # pylint: disable=not-callable\n stdout=fnull, stderr=fnull,\n shell=True, executable='/usr/bin/bash')\n except subprocess.CalledProcessError as e:\n LOG.exception(e)\n msg = \"Fail to generate self-signed certificate to enable https.\"\n raise exception.SysinvException(_(msg))\n\n with open(certificate_file) as pemfile:\n pem_contents = pemfile.read()\n\n LOG.info(\"_config_selfsigned_certificate mode=%s file=%s\" % (mode, certificate_file))\n\n cert_list, private_key = \\\n self._extract_keys_from_pem(mode, pem_contents,\n serialization.PrivateFormat.PKCS8,\n passphrase)\n\n personalities = [constants.CONTROLLER]\n\n config_uuid = self._config_update_hosts(context, personalities)\n private_bytes = self._get_private_bytes_one(private_key)\n public_bytes = self._get_public_bytes(cert_list)\n file_content = private_bytes + public_bytes\n config_dict = {\n 'personalities': personalities,\n 'file_names': [constants.SSL_PEM_FILE],\n 'file_content': file_content,\n 'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,\n 'nobackup': True,\n }\n self._config_update_file(context, config_uuid, config_dict)\n\n # copy the certificate to shared directory\n with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,\n os.O_CREAT | os.O_TRUNC | os.O_WRONLY,\n constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),\n 'wb') as f:\n f.write(file_content)\n\n # Inventory the self signed certificate.\n # In case the self signed cert is ICA signed,\n # skip these intermediate CA certs.\n for cert in cert_list:\n if not cert.get('is_ca', False):\n values = {\n 'certtype': mode,\n 'signature': cert.get('signature'),\n 'start_date': cert.get('cert').not_valid_before,\n 'expiry_date': cert.get('cert').not_valid_after,\n }\n self.dbapi.certificate_create(values)\n break\n else:\n msg = \"Fail to inventory the self signed certificate, \\\n no leaf cert found.\"\n raise exception.SysinvException(_(msg))", "def start_challenge(self):\r\n\t\tif self.state=='KEY_EXCHANGE':\r\n\r\n\t\t\tlogger.info(\"Starting Challenge\")\r\n\t\t\tnonce = os.urandom(16)\r\n\t\t\tself.challenge_nonce = nonce\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Content-Type': 'application/json',\r\n\t\t\t\t\t'session_id' : str(self.session_id)\r\n\t\t\t\t\t}\t\r\n\t\t\tmessage = json.dumps({\r\n\t\t\t\t'method': 'START_CHALLENGE',\r\n\t\t\t\t'nonce': nonce.decode('latin'), \r\n\t\t\t\t'cert': self.certificate.public_bytes(serialization.Encoding.PEM).decode('latin'),\r\n\t\t\t}).encode('latin')\t\t\r\n\t\t\tdata,iv = self.encrypt_message(message,key)\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sucessfuly encrypted challenge and certificate\")\r\n\t\t\t\r\n\t\t\tmessage = {\r\n\t\t\t\t'data': base64.b64encode(data),\r\n\t\t\t\t'iv': base64.b64encode(iv),\r\n\t\t\t\t'hmac': base64.b64encode(self.add_hmac(data,key)),\r\n\t\t\t\t'salt': base64.b64encode(salt)\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sending POST Challenge and Client Certificate\")\r\n\t\t\trequest = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)\r\n\t\t\t\r\n\t\t\tresponse = json.loads(request.text)\r\n\t\t\tmessage, key, iv, salt, hmac = self.receive_message(response)\r\n\t\t\t#iv = base64.b64decode(response['iv'])\r\n\t\t\t#hmac = base64.b64decode(response['hmac'])\r\n\t\t\t#salt = base64.b64decode(response['salt'])\r\n\t\t\t#msg = base64.b64decode(response['message'])\r\n\t\t\t\r\n\t\t\t#key, _ = self.derive_key(self.shared_key,salt)\r\n\t\t\tif not self.verify_hmac(hmac,message,key):\r\n\t\t\t\texit(0)\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"HMAC OK\")\r\n\t\t\t\tmessage = self.decrypt_message(message,iv,key)\r\n\t\t\t\tmessage = json.loads(message)\r\n\t\t\t\tnonce = message['snonce'].encode('latin')\r\n\t\t\t\tnonce2 = message['nonce2'].encode('latin')\r\n\t\t\t\tself.state='START_CHALLENGE'\r\n\t\t\t\tif self.verify_challenge(nonce):\r\n\t\t\t\t\tself.accept_challenge(nonce2)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n\t\telse:\r\n\t\t\treturn False", "def install_certificate():\n stream = open(\"/bootflash/poap_device_recipe.yaml\", 'r')\n dictionary = yaml.load(stream)\n config_file_second = open(os.path.join(\"/bootflash\", options[\"split_config_second\"]), \"a+\")\n \n if (\"Trustpoint\" in dictionary):\n for ca in dictionary[\"Trustpoint\"].keys():\n ca_apply = 0\n for tp_cert, crypto_pass in dictionary[\"Trustpoint\"][ca].items():\n tp_cert = tp_cert.strip()\n file = tp_cert.split('/')[-1]\n if (file.endswith(\".p12\") or file.endswith(\".pfx\")):\n poap_log(\"Installing certificate file. %s\" % file)\n if (ca_apply == 0):\n config_file_second.write(\"crypto ca trustpoint %s\\n\" % ca)\n ca_apply = 1\n config_file_second.write(\"crypto ca import %s pkcs12 bootflash:poap_files/%s/%s %s\\n\" % (ca, ca, file, crypto_pass))\n poap_log(\"Installed certificate %s succesfully\" % file)", "def custom_cert(org_name):\n mchp_cert_info = isMCHP_cert()\n if mchp_cert_info['status'] == Status.ATCA_SUCCESS:\n status = mchp_cert_bckp(mchp_cert_info)\n\n #org_name = \"{:<24}\".format(org_name[:24]).replace(\" \", \"_\")\n #print('Adjusted Orgname for size and spaces:' + org_name)\n #assert len(org_name) < 25, \"Org name can be maximum of 24 characters\"\n\n root_key_path = Path('root_key.key')\n root_pub_key_path = Path('root_pub_key.pem')\n root_cert_path = Path('root_crt.crt')\n signer_id = 'FFFF'\n signer_key_path = Path('signer_'+signer_id+'.key')\n signer_cert_path = Path('signer_'+signer_id+'.crt')\n device_cert_path = Path('device_template.crt')\n\n certs_handler.set_root_user_data(org_name)\n certs_handler.set_signer_user_data(org_name)\n certs_handler.set_device_user_data(org_name)\n certs_handler.create_trust_chain(root_key_path, root_cert_path, signer_id, signer_key_path, signer_cert_path, device_cert_path)\n signer_cert_def, device_cert_def = certs_handler.generate_cert_def_files(root_cert_path, signer_id, signer_cert_path, device_cert_path)\n\n with open(root_pub_key_path, 'w') as f:\n f.write(common_helper.convert_ec_pub_to_pem(certs_handler.get_public_key(root_key_path)))\n f.close()\n\n print('Read device serial number...', end='')\n serial_num = bytearray(9)\n assert Status.ATCA_SUCCESS == atcab_read_serial_number(serial_num)\n print('OK (SN: {})'.format(serial_num.hex().upper()))\n\n print('Read device public key from slot {}...'.format(device_cert_def.private_key_slot), end='')\n public_key = bytearray(64)\n assert Status.ATCA_SUCCESS == atcab_get_pubkey(device_cert_def.private_key_slot, public_key)\n print('OK (Public Key: {})'.format(public_key.hex().upper()))\n\n print('Generating device certificate...'.format(device_cert_def.private_key_slot), end='')\n device_cert_path = Path('device_{}.crt'.format(base64.b16encode(serial_num).decode('ascii')))\n device_cert = certs_handler.build_device_cert(serial_num, public_key, signer_key_path, signer_cert_path, device_cert_path)\n print('OK (saved to {})'.format(device_cert_path))\n\n print('Saving signer certificate to device...', end='')\n signer_cert_der = read_cert(signer_cert_path).public_bytes(encoding=Encoding.DER)\n assert Status.ATCA_SUCCESS == atcacert_write_cert(signer_cert_def, signer_cert_der, len(signer_cert_der))\n print('OK')\n\n print('Saving device certificate to device...', end='')\n device_cert_der = device_cert.public_bytes(encoding=Encoding.DER)\n assert Status.ATCA_SUCCESS == atcacert_write_cert(device_cert_def, device_cert_der, len(device_cert_der))\n print('OK')\n\n for extension in device_cert.extensions:\n if extension.oid._name != 'subjectKeyIdentifier':\n continue # Not the extension we're looking for, skip\n thing_name = binascii.b2a_hex(extension.value.digest).decode('ascii')\n print('Thing ID {}'.format(thing_name))\n\n kit_info = certs_handler.read_kit_info()\n kit_info['thing_name'] = thing_name.lower()\n certs_handler.save_kit_info(kit_info)\n\n print('\\n\\r---------------------------------------------')\n print('Custom certificate generation and provisioning - SUCCESS')\n print('---------------------------------------------\\n\\r')\n\n # validate and print the certificate chain\n status = certs_handler.validate_and_print_certificate_chain(read_cert(root_cert_path),\n read_cert(signer_cert_path), read_cert(device_cert_path))\n if status == Status.ATCA_SUCCESS:\n return 'success'\n else:\n return 'danger'", "def _create_csr(cert, private_key):\n\n subject_public_key_info = decoder.decode(private_key.public_key().public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n ), asn1Spec=rfc2314.SubjectPublicKeyInfo())[0]\n\n subject = cert[0]['tbsCertificate']['subject']\n\n # Microsoft OID: szOID_RENEWAL_CERTIFICATE\n renewal_certificate_type = rfc2314.AttributeType((1, 3, 6, 1, 4, 1, 311, 13, 1))\n renewal_certificate_value = rfc2314.univ.SetOf().setComponents(cert[0])\n\n renewal_certificate = rfc2314.Attribute()\n renewal_certificate.setComponentByName('type', renewal_certificate_type)\n renewal_certificate.setComponentByName('vals', renewal_certificate_value)\n\n attributes = rfc2314.Attributes().subtype(\n implicitTag=rfc2314.tag.Tag(rfc2314.tag.tagClassContext,\n rfc2314.tag.tagFormatConstructed, 0))\n attributes.setComponents(renewal_certificate)\n\n certification_request_info = rfc2314.CertificationRequestInfo()\n certification_request_info.setComponentByName('version', 0)\n certification_request_info.setComponentByName('subject', subject)\n certification_request_info.setComponentByName('subjectPublicKeyInfo', subject_public_key_info)\n certification_request_info.setComponentByName('attributes', attributes)\n\n raw_signature, signature_algorithm = _sign(private_key,\n encoder.encode(certification_request_info))\n\n signature = rfc2314.univ.BitString(hexValue=binascii.hexlify(raw_signature).decode('ascii'))\n\n certification_request = rfc2314.CertificationRequest()\n certification_request.setComponentByName('certificationRequestInfo', certification_request_info)\n certification_request.setComponentByName('signatureAlgorithm', signature_algorithm)\n certification_request.setComponentByName('signature', signature)\n\n return encoder.encode(certification_request)", "def test_patch_certificate_signing_request(self):\n pass", "def test_replace_certificate_signing_request_approval(self):\n pass", "def _generate_csr(order_model, repos):\n \"\"\"\n TODO(alee-3) Implement this method.\n\n * Get the public key from the container_ref\n * Generate a CSR from the public key.\n * Add the CSR to the order_metadata as the \"request\"\n \"\"\"\n pass", "def test_update_cloud_certificate(self):\n pass", "def test_create_certificate_signing_request(self):\n pass", "def __init__(self, path):\n\n self._base64Key = list()\n self._base16Key = list()\n\n if not os.path.isfile(path):\n sys.exit(\"Path \" + path + \" does not exist or is not a file!\")\n\n pkFile = open(path, 'r').readlines()\n base64Key = \"\"\n lineNo = 1\n certNo = 1\n inCert = False\n for line in pkFile:\n line = line.strip()\n # Are we starting the certificate?\n if line == \"-----BEGIN CERTIFICATE-----\":\n if inCert:\n sys.exit(\"Encountered another BEGIN CERTIFICATE without \" +\n \"END CERTIFICATE on line: \" + str(lineNo))\n\n inCert = True\n\n # Are we ending the ceritifcate?\n elif line == \"-----END CERTIFICATE-----\":\n if not inCert:\n sys.exit(\"Encountered END CERTIFICATE before \" +\n \"BEGIN CERTIFICATE on line: \" + str(lineNo))\n\n # If we ended the certificate trip the flag\n inCert = False\n\n # Sanity check the input\n if len(base64Key) == 0:\n sys.exit(\"Empty certficate , certificate \" + str(certNo) +\n \" found in file: \" + path)\n\n # ... and append the certificate to the list\n # Base 64 includes uppercase. DO NOT tolower()\n self._base64Key.append(base64Key)\n try:\n # Pkgmanager and setool see hex strings with lowercase,\n # lets be consistent\n self._base16Key.append(base64.b16encode(base64.b64decode(base64Key)).lower())\n except TypeError:\n sys.exit(\"Invalid certificate, certificate \" +\n str(certNo) + \" found in file: \" + path)\n\n # After adding the key, reset the accumulator as pem files\n # may have subsequent keys\n base64Key = \"\"\n\n # And increment your cert number\n certNo = certNo + 1\n\n # If we haven't started the certificate, then we should not record\n # any data\n elif not inCert:\n lineNo += 1\n continue\n\n # else we have started the certificate and need to append the data\n elif inCert:\n base64Key += line\n\n else:\n # We should never hit this assert, if we do then an unaccounted\n # for state was entered that was NOT addressed by the\n # if/elif statements above\n assert(False == True)\n\n # The last thing to do before looping up is to increment line number\n lineNo = lineNo + 1", "def test_patch_certificate_signing_request_approval(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the certificate request if any is defined for this component.
def pki_delete_certificate_request(self): self.make_request(method="delete", resource="pki_delete_certificate_request")
[ "def test_delete_certificate_signing_request(self):\n pass", "def test_certificate_delete(self):\n response = self.client.open(\n '/api/v1.0/domain/{domainName}/certificate/{certificateId}'.format(domainName='domainName_example', certificateId='certificateId_example'),\n method='DELETE',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_delete_collection_certificate_signing_request(self):\n pass", "def test_delete_cloud_certificate(self):\n pass", "def deleteCertificate(self, certificateName):\n try:\n self._cache.pop(certificateName.toUri())\n except KeyError:\n pass", "def delete_certificate(self, context, mode, signature):\n LOG.info(\"delete_certificate mode=%s, signature=%s\" %\n (mode, signature))\n\n if mode == constants.CERT_MODE_SSL_CA:\n try:\n cert_file = \\\n os.path.join(constants.SSL_CERT_CA_LIST_SHARED_DIR,\n signature)\n os.remove(cert_file)\n except Exception as e:\n msg = \"Failed to delete cert file: %s\" % str(e)\n LOG.warn(msg)\n raise exception.SysinvException(_(msg))\n\n self._consolidate_cert_files()\n\n personalities = [constants.CONTROLLER,\n constants.WORKER,\n constants.STORAGE]\n config_uuid = self._config_update_hosts(context, personalities)\n config_dict = {\n \"personalities\": personalities,\n \"classes\": ['platform::config::runtime']\n }\n self._config_apply_runtime_manifest(context,\n config_uuid,\n config_dict,\n force=True)\n else:\n msg = \"delete_certificate unsupported mode=%s\" % mode\n LOG.error(msg)\n raise exception.SysinvException(_(msg))", "def delete_cert(self, context, cert_ref, resource_ref, service_name=None):\n # TODO(rm_work): We won't take any action on a delete in this driver,\n # but for now try the legacy driver's delete and ignore failure.\n try:\n legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth)\n legacy_mgr.delete_cert(\n context, cert_ref, resource_ref, service_name=service_name)\n except Exception:\n # If the delete failed, it was probably because it isn't legacy\n # (this will be fixed once Secrets have Consumer registration).\n pass", "def delete_request():", "def collaboration_request_delete(self, sso_session_id, request_key):\n return self.delete(\n url=url_collaboration_request_detail.format(request_key=request_key),\n authenticator=self.authenticator(sso_session_id),\n )", "def delete_certificate(self, certificate_arn):\n return self.client.delete_certificate(\n CertificateArn=certificate_arn\n )", "def delete(self):\n if hasattr(self, 'path'):\n os.unlink(self.path)\n # we should keep the key path around, but\n # we dont seem to, but it's consistent\n parts = self.path.split('.')\n key_path = \"%s-key.pem\" % parts[0]\n os.unlink(key_path)\n else:\n raise Exception('no path, not deleted')", "def revoke(self, name=None, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers.copy()\n uri = 'https://%s/rest/certificates/ca/%s' % (self.fusion_client._host, name)\n response = self.fusion_client.delete(uri=uri, headers=headers)\n return response", "def test_delete_ca_csr(self):\n ca_cn = 'Test Delete CA CSR'\n csr = SpokeCSR(ca_cn, self.ca_name, ca=True)\n csr.create()\n result = csr.delete(delete_key=True)\n expected_result = {'count': 0, 'type': 'Request', 'data': [], 'exit_code': 3,\n 'msg': 'Deleted Request:'}\n self.assertEqual(result, expected_result)", "def clear_request(self):\n self.request_data.clear()", "def delete_certificates_with_http_info(self, **kwargs):\n\n all_params = ['ids', 'names']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_certificates\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'ids' in params:\n query_params.append(('ids', params['ids']))\n collection_formats['ids'] = 'csv'\n if 'names' in params:\n query_params.append(('names', params['names']))\n collection_formats['names'] = 'csv'\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['AuthTokenHeader']\n\n return self.api_client.call_api('/1.10/certificates', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def request_del(self, r: rule) -> None:\n\n if r in self._add_promises or r in self._del_promises:\n msg = \"Rule {} already registered for a promised update.\"\n raise ValueError(msg.format(r))\n elif r not in self:\n raise ValueError(\"Cannot delete non-existent rule.\")\n else:\n self._del_promises.add(r)", "def revoke_certificate(self, certificate, comments):\r\n base_url = current_app.config.get(\"EJBCA_URL\")\r\n\r\n authority = get_authority(certificate.authority_id)\r\n authority_const = authority.name.upper().replace('-', '_')\r\n\r\n cert_body = certificate.body\r\n\r\n x509 = load_certificate(FILETYPE_PEM, cert_body)\r\n\r\n issuer = x509.get_issuer()\r\n issuer_dn = get_subject_dn_string(issuer.get_components())\r\n\r\n # create certificate revocation request\r\n hex_serial = hex(int(certificate.serial))[2:]\r\n \r\n cert_serial_hex = str(hex_serial)\r\n \r\n create_url = \"{0}/ejbca/ejbca-rest-api/v1/certificate/{1}/{2}/revoke\".format(\r\n base_url, issuer_dn, cert_serial_hex\r\n )\r\n \r\n # print(create_url)\r\n session = requests.Session()\r\n session.mount('https://', HttpsAdapter())\r\n session.cert = current_app.config.get(f'EJBCA_PEM_PATH_{authority_const}')\r\n\r\n session.verify = current_app.config.get(\"EJBCA_TRUSTSTORE\")\r\n session.hooks = dict(response=log_status_code)\r\n\r\n metrics.send(\"ejbca_revoke_certificate\", \"counter\", 1)\r\n reason = comments.get('crl_reason') or comments.get('crlReason', 'unspecified')\r\n\r\n reason_dict = {\r\n 'unspecified': 'UNSPECIFIED',\r\n 'keyCompromise': 'KEY_COMPROMISE',\r\n 'cACompromise': 'CA_COMPROMISE',\r\n 'affiliationChanged': 'AFFILIATION_CHANGED',\r\n 'superseded': 'SUPERSEDED',\r\n 'cessationOfOperation': 'CESSATION_OF_OPERATION',\r\n 'certificateHold': 'CERTIFICATE_HOLD',\r\n 'removeFromCRL': 'REMOVE_FROM_CRL',\r\n 'privilegeWithdrawn': 'PRIVILEGES_WITHDRAWN',\r\n 'aACompromise': 'AA_COMPROMISE',\r\n }\r\n reason = reason_dict.get(reason, reason)\r\n response = session.put(create_url, params={'reason': reason})\r\n # print(response)\r\n extra_logger.info(f'{certificate} was revoked with reason {reason}')\r\n return handle_response(response)", "def DeleteReceiptRule(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self):\n self._assert_c8y()\n self.c8y.identity.delete(self.external_id, self.external_type)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new HTTP Proxy service. Proxy must define at least one primary address but can optionally also define a list of secondary addresses.
def create( cls, name, address, proxy_port=8080, username=None, password=None, secondary=None, comment=None, ): json = { "name": name, "address": address, "comment": comment, "http_proxy_port": proxy_port, "http_proxy_username": username if username else "", "http_proxy_password": password if password else "", "secondary": secondary if secondary else [], } return ElementCreator(cls, json)
[ "def create_proxy(self, addr='127.0.0.1', port=0, proxy_config=None, options=None):\n if self._proxy_mgr_addr is not None and self._proxy_mgr_port is not None:\n # TODO: ask the proxy manager to create a proxy and return that\n pass\n\n if options is None:\n options = {}\n\n custom_response_handler = options.get('custom_response_handler')\n if custom_response_handler is not None:\n self._capture_request_handler = create_custom_capture_request_handler(custom_response_handler)\n else:\n self._capture_request_handler = CaptureRequestHandler\n self._capture_request_handler.protocol_version = 'HTTP/1.1'\n self._capture_request_handler.timeout = options.get('connection_timeout', 5)\n self._proxy = ProxyHTTPServer((addr, port), self._capture_request_handler,\n proxy_config=proxy_config, options=options)\n\n t = threading.Thread(name='Selenium Wire Proxy Server', target=self._proxy.serve_forever)\n t.daemon = not options.get('standalone')\n t.start()\n\n socketname = self._proxy.socket.getsockname()\n self._proxy_addr = socketname[0]\n self._proxy_port = socketname[1]\n\n log.info('Created proxy listening on {}:{}'.format(self._proxy_addr, self._proxy_port))\n return self._proxy_addr, self._proxy_port", "def _create_proxy(self,\r\n url:str=None,\r\n hit_interval:int=None,\r\n interval_length:int=60,\r\n proxy_params:dict=None) -> dict:\r\n url = \"%s/sharing/rest/content/users/%s/items/%s/createProxies\" % (self._portal.url,\r\n self._user_id,\r\n self.id)\r\n params = {\r\n 'f' : 'json',\r\n 'proxies' : [],\r\n 'serviceProxyParams': {}\r\n }\r\n if url and hit_interval and interval_length:\r\n params['proxies'].append({\r\n \"sourceUrl\": url,\r\n \"hitPerInterval\" : hit_interval,\r\n \"intervalSeconds\" : interval_length\r\n })\r\n if proxy_params is not None:\r\n params['serviceProxyParams'] = proxy_params\r\n res = self._portal.con.post(url, params)\r\n return Item(gis=self._gis, itemid=res['id'])", "def create_ndp_proxy(self, **attrs):\n return self._create(_ndp_proxy.NDPProxy, **attrs)", "def allocate_public_service_instance(self, **kwargs):\n address_pool_name = kwargs.pop(\"address_pool_name\")\n\n am_service = AddressManagerService.objects.all() # TODO: Hardcoded dependency\n if not am_service:\n raise Exception(\"no addressing services\")\n am_service = am_service[0]\n\n ap = AddressPool.objects.filter(name=address_pool_name, service_id=am_service.id)\n if not ap:\n raise Exception(\"Addressing service unable to find addresspool %s\" % name)\n ap = ap[0]\n\n ip = ap.get_address()\n if not ip:\n raise Exception(\"AddressPool '%s' has run out of addresses.\" % ap.name)\n\n ap.save() # save the AddressPool to account for address being removed from it\n\n subscriber_service = None\n if \"subscriber_service\" in kwargs:\n subscriber_service = kwargs.pop(\"subscriber_service\")\n\n subscriber_service_instance = None\n if \"subscriber_tenant\" in kwargs:\n subscriber_service_instance = kwargs.pop(\"subscriber_tenant\")\n elif \"subscriber_service_instance\" in kwargs:\n subscriber_service_instance = kwargs.pop(\"subscriber_service_instance\")\n\n # TODO: potential partial failure -- AddressPool address is allocated and saved before addressing tenant\n\n t = None\n try:\n t = AddressManagerServiceInstance(owner=am_service, **kwargs) # TODO: Hardcoded dependency\n t.public_ip = ip\n t.public_mac = self.ip_to_mac(ip)\n t.address_pool_id = ap.id\n t.save()\n\n if subscriber_service:\n link = ServiceInstanceLink(subscriber_service = subscriber_service, provider_service_instance=t)\n link.save()\n\n if subscriber_service_instance:\n link = ServiceInstanceLink(subscriber_service_instance = subscriber_service_instance, provider_service_instance=t)\n link.save()\n except:\n # cleanup if anything went wrong\n ap.put_address(ip)\n ap.save() # save the AddressPool to account for address being added to it\n if (t and t.id):\n t.delete()\n raise\n\n return t", "def create_dbproxy_endpoint_address(\n self,\n request: rds_20140815_models.CreateDBProxyEndpointAddressRequest,\n ) -> rds_20140815_models.CreateDBProxyEndpointAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.create_dbproxy_endpoint_address_with_options(request, runtime)", "def test_create_cloud_proxy(self):\n pass", "def __init__(self,\n service_url=None,\n service_port=None,\n bzedge_conf_file=None,\n timeout=DEFAULT_HTTP_TIMEOUT,\n **kwargs):\n\n super(Proxy, self).__init__(service_url=service_url,\n service_port=service_port,\n bzedge_conf_file=bzedge_conf_file,\n timeout=timeout,\n **kwargs)", "def test_create_proxy(\n repo_format, strict, c_policy, remote_auth_type, faker, nexus_client,\n cli_runner):\n remote_url = faker.uri()\n repo_name = pytest.helpers.repo_name(\n 'proxy', repo_format, strict, c_policy,\n remote_auth_type)\n create_cmd = (\n f'repository create proxy {repo_format} {repo_name} {remote_url} '\n f'{strict} {c_policy} ')\n if remote_auth_type is not None:\n create_cmd += (\n f'--remote-auth-type={remote_auth_type} '\n f'--remote-username={faker.user_name()} '\n f'--remote-password={faker.password()}')\n\n result = cli_runner.invoke(nexus_cli, create_cmd)\n\n assert result.output == ''\n assert result.exit_code == exception.CliReturnCode.SUCCESS.value\n assert nexus_client.repositories.get_by_name(repo_name).name == repo_name", "def proxyPOSTService(self, **kwargs):\n\n allParams = ['name', 'namespaces']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method proxyPOSTService\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/proxy/namespaces/{namespaces}/services/{name}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'POST'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = '*/*'\n headerParams['Content-Type'] = '*/*,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)", "def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype,addr,port,rdns,username,password)", "def proxyPOSTService(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'path:*']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method proxyPOSTService\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/proxy/namespaces/{namespaces}/services/{name}/{path:*}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'POST'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = '*/*'\n headerParams['Content-Type'] = '*/*,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n if ('path:*' in params):\n replacement = str(self.apiClient.toPathValue(params['path:*']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'path:*' + '}',\n replacement)\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)", "def CreateProxy(xml_group, xml_name, session=None):\n global ActiveConnection\n if not session:\n session = ActiveConnection.Session\n if not session:\n raise RuntimeError (\"Cannot create objects without a session.\")\n pxm = ProxyManager(session)\n return pxm.NewProxy(xml_group, xml_name)", "def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)", "def start_proxy(config):\n cleanflux = Cleanflux(config.backend_host, config.backend_port,\n config.backend_user, config.backend_password,\n config.rules,\n config.auto_retrieve_retention_policies, config.retention_policies,\n config.aggregation_properties,\n config.counter_overflows,\n config.max_nb_points_per_query,\n config.max_nb_points_per_series)\n http_proxy_daemon = HttpDaemon(config=config, cleanflux=cleanflux)\n\n daemon = daemonocle.Daemon(\n pidfile=config.pidfile,\n detach=(not config.foreground),\n shutdown_callback=shutdown,\n worker=http_proxy_daemon.run\n )\n daemon.do_action(config.command)", "def get_proxy(self):\n address = next(self._address_pool_cycle) # pick random address\n proxy = {\"http\": address, \"https\": address}\n return proxy", "def proxyClientFactoryClass(self, *args, **kwargs):\n client_factory = HTTPProxyClientFactory(*args, **kwargs)\n \n if self.__ssl_enabled:\n with open(server.config.ssl.certificate) as cert_file:\n cert = ssl.Certificate.loadPEM(cert_file.read())\n\n # TLSMemoryBIOFactory is the wrapper that takes TLS options and\n # the wrapped factory to add TLS to connections\n return TLSMemoryBIOFactory(\n ssl.optionsForClientTLS(self.host.decode('ascii'), cert),\n isClient=True, wrappedFactory=client_factory)\n else:\n return client_factory", "def NewProxy(self, group, name):\n if not self.SMProxyManager:\n return None\n aProxy = self.SMProxyManager.NewProxy(group, name)\n if not aProxy:\n return None\n aProxy.UnRegister(None)\n return aProxy", "def create_dbproxy_endpoint_address_with_options(\n self,\n request: rds_20140815_models.CreateDBProxyEndpointAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> rds_20140815_models.CreateDBProxyEndpointAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.connection_string_prefix):\n query['ConnectionStringPrefix'] = request.connection_string_prefix\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbproxy_connect_string_net_type):\n query['DBProxyConnectStringNetType'] = request.dbproxy_connect_string_net_type\n if not UtilClient.is_unset(request.dbproxy_endpoint_id):\n query['DBProxyEndpointId'] = request.dbproxy_endpoint_id\n if not UtilClient.is_unset(request.dbproxy_engine_type):\n query['DBProxyEngineType'] = request.dbproxy_engine_type\n if not UtilClient.is_unset(request.dbproxy_new_connect_string_port):\n query['DBProxyNewConnectStringPort'] = request.dbproxy_new_connect_string_port\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.vpcid):\n query['VPCId'] = request.vpcid\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='CreateDBProxyEndpointAddress',\n version='2014-08-15',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n rds_20140815_models.CreateDBProxyEndpointAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def create_vpn_service(self, **attrs):\n return self._create(_vpn_service.VpnService, **attrs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The proxy service for this proxy server configuration
def proxy_service(self): return self.data.get("http_proxy")
[ "def getRemoteConfigServiceProxy(self):", "def get_proxy(self):\n address = next(self._address_pool_cycle) # pick random address\n proxy = {\"http\": address, \"https\": address}\n return proxy", "def proxy(self):\n if self._proxy is not None:\n if self._proxy[:7] == \"http://\":\n self._proxy = {'http://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['http://'])\n elif self._proxy[:8] == \"https://\":\n self._proxy = {'https://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['https://'])\n elif self._proxy[:3] == \"ftp\":\n self._proxy = {'ftp': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['ftp'])\n else:\n self._proxy = \"\"\n return self._proxy", "def get_server_proxy():\n cp = get_configparser() # assuming it's already loaded at this point\n address = 'http://%s:%s' % (cp.get('DEFAULT', 'zdstack_rpc_hostname'),\n cp.get('DEFAULT', 'zdstack_port'))\n ZDSLOG.debug(\"%s(%s)\" % (RPC_PROXY_CLASS, address))\n return get_rpc_proxy_class()(address)", "def get_proxy (self):\n return self.proxy", "def proxy(self) -> \"Proxy\":\n if self._proxy is None:\n from twilio.rest.proxy import Proxy\n\n self._proxy = Proxy(self)\n return self._proxy", "def get_proxy_host(self):\n return self._proxy_host", "def getProxyServer(self):\n return proxy.CachingRepositoryServer(self.getNetServerConfig(),\n self.getURL(), self.getNetServer())", "def proxy_url(self) -> str:\n return pulumi.get(self, \"proxy_url\")", "def get_useproxyport(self):\n return self.options['useproxyport']", "def useProxy(self, config, logger=None):\n return self.use_proxy", "def useproxyport(self) :\n try :\n return self._useproxyport\n except Exception as e:\n raise e", "def __configure_proxy(self):\n\n if self._is_not_empty(\"proxy\", self.get_global_config):\n proxy_config = self.get_global_config[\"proxy\"]\n\n if self._is_not_empty_bool(\"enabled\", proxy_config):\n\n self.check_zap_result(\n result=self.get_zap.core.set_option_use_proxy_chain(\n boolean=str(proxy_config[\"enabled\"]).lower()\n ),\n method_name=\"set_option_use_proxy_chain\",\n )\n self.__configure_proxy_settings(proxy_config)\n self.__configure_proxy_authentication(proxy_config)\n self.__configure_socks(proxy_config)\n else:\n logging.debug(\n \"Proxy configuration is not enabled (global.proxy.enabled: true)\"\n )\n else:\n logging.debug(\"No proxy configuration defined (global.proxy: ...).\")", "def configure_opener(self):\n \n if \"proxyhost\" in self.service.config:\n proxy_support = urllib2.ProxyHandler({'http': self.service.config[\"proxyhost\"]})\n opener = urllib2.build_opener(proxy_support)\n else:\n opener = urllib2.build_opener()\n urllib2.install_opener(opener)", "def dns_proxy(self):\n ret = self._get_attr(\"DNSProxy\")\n return ret", "def __init__(self,\n service_url=None,\n service_port=None,\n bzedge_conf_file=None,\n timeout=DEFAULT_HTTP_TIMEOUT,\n **kwargs):\n\n super(Proxy, self).__init__(service_url=service_url,\n service_port=service_port,\n bzedge_conf_file=bzedge_conf_file,\n timeout=timeout,\n **kwargs)", "def set_proxy(self, host, port):\n self.proxy = {\n 'host': host,\n 'port': port\n }", "def get_xmlrpc_proxy(self):\n host = \"%s:%s/RPC2\" % (MOSES_HOST, MOSES_PORT)\n proxy = ServerProxy(host, transport=TimeoutTransport())\n return proxy", "def proxy_config(self) -> Optional['outputs.MustGatherSpecProxyConfig']:\n return pulumi.get(self, \"proxy_config\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The specified services for inspection. An inspected service is a reference to a protocol that can be forwarded for inspection, such as HTTP, HTTPS, FTP and SMTP.
def inspected_services(self): return [ InspectedService(**service) for service in self.make_request(resource="inspected_services") ]
[ "def get_servicesinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Firewall\n try:\n fw = ''\n firewalld = get_service(ns, 'firewalld')\n if firewalld and firewalld.Status == 'OK':\n fw = 'on (firewalld)'\n else:\n iptables = get_service(ns, 'iptables')\n if iptables and iptables.Status == 'OK':\n fw = 'on (iptables)'\n if not fw:\n fw = 'off'\n except Exception:\n fw = 'N/A'\n tf.produce_output([('Firewall:', fw)])\n\n # Logging\n try:\n logging = ''\n journald = get_service(ns, 'systemd-journald')\n if journald and journald.Status == 'OK':\n logging = 'on (journald)'\n else:\n rsyslog = get_service(ns, 'rsyslog')\n if rsyslog and rsyslog.Status == 'OK':\n logging = 'on (rsyslog)'\n if not logging:\n logging = 'off'\n except Exception:\n logging = 'N/A'\n tf.produce_output([('Logging:', logging)])\n\n return []", "def list_services(ctx):\n\n ctx.respond(ctx._(\"I am running: {services}\").format(\n services=\", \".join(ctx.bot.services))\n )", "def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)", "def services(self):\n return self.__services", "def services(\n self,\n ) -> google.protobuf.internal.containers.MessageMap[\n builtins.str, global___GapicMetadata.ServiceForTransport\n ]:", "def deferrable_services():\n _svcs = services()\n _svcs.extend(['ovs-vswitchd', 'ovsdb-server',\n 'openvswitch-switch', 'ovs-record-hostname'])\n return list(set(_svcs))", "def parse_services(self):\n #Client\n for item in self.client_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.client_services_list.append(service) \n\n #Server\n for item in self.server_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.server_services_list.append(service)", "def getServicesInfo(self):\n res = self.serv.getServicesInfo()\n return res", "def service_names(self):\n return self.services.keys()", "def check_services():\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def _verify_services_syntax(services):\n num_errors = 0\n num_warnings = 0\n \n for key, value in services.items():\n if \"network-interfaces\" in value and not isinstance(value[\"network-interfaces\"], list):\n logging.error(\"Network interfaces must be a list for service %s\", key)\n num_errors += 1\n if \"template\" in value:\n pass\n elif \"image\" in value:\n pass\n elif \"compose-file\" in value:\n pass\n else:\n logging.error(\"Invalid service definition: %s\", key)\n num_errors += 1\n return num_errors, num_warnings", "def find_service_providers(self, service: ServiceDescriptor) -> list:\n return ['ALICE', ]", "def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')", "def getStatefulServices(self):\n rv = []\n sf = self.sf\n services = sf.activeServices()\n for srv in services:\n try:\n prx = sf.getByName(srv)\n prx = omero.api.StatefulServiceInterfacePrx.checkedCast(prx)\n if prx is not None:\n rv.append(prx)\n except:\n self.__logger.warn(\"Error looking up proxy: %s\" % srv, exc_info=1)\n return rv", "def test_services_list(self):\n pass", "def _extract_services(plugins):\n services = []\n for plugin in filter(_has_services, plugins):\n services.extend(plugin.services)\n return services", "def get_all_services(limit=None, columns=None, extra_filter=None):\n return query(\"GET services\\n\", limit=limit, columns=columns, \n item_type=\"services\" , extra_filter=extra_filter)", "def check_service_all(management, verbosity):\n global logger\n\n logger.debug('Retrieving all hosted services')\n hosted_services = management.list_hosted_services()\n error_code_all = 0\n errors = []\n if not hosted_services:\n error_code_all = 1\n errors.append('No hosted services found')\n for service in hosted_services:\n logger.debug('Checking status of '+service.service_name)\n error_code, error = check_service(management, \n service.service_name, verbosity)\n errors.append(' '.join(('{0}:'.format(service.service_name), error)))\n error_code_all = max (error_code_all, error_code)\n return error_code_all, '; '.join(errors)", "def test_introspect_vrouter_services(self, k8s_tf_vrouter,\n tf_vrouter_services):\n service_name = k8s_tf_vrouter.name + \"-\" + tf_vrouter_services\n # Get endpoint addresses for service\n endpoint = k8s_tf_vrouter.read_namespaced_endpoints(service_name)\n env_nodes = []\n for address in endpoint.subsets[0].addresses:\n env_nodes.append(address.ip)\n # Get port for service\n service = k8s_tf_vrouter.read_namespaced_service(service_name)\n port = None\n for p in service.spec.ports:\n if p.name == \"introspect\":\n port = p.port\n\n msg = \"Node: {}, Module: {} Status: {}\"\n errors = []\n for node in env_nodes:\n ic = IntrospectClient(ip=node, port=port)\n ns = ic.get_NodeStatusUVEList()\n state = ns.NodeStatusUVE[0].NodeStatus[0].ProcessStatus[0].state\n module_id = \\\n ns.NodeStatusUVE[0].NodeStatus[0].ProcessStatus[0].module_id\n logger.info(msg.format(node, module_id, state))\n if state != \"Functional\":\n errors.append(msg.format(node, module_id, state))\n if len(errors) != 0:\n logger.error(errors)\n assert False, \"Service isn't functional\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP address (Required)
def address(self): return self.data.get("ntp_host_name")
[ "def ntp_host_name(self):\n return self.data.get(\"ntp_host_name\")", "def getNTPSetup(self):\n return NTPSetup(self.__screen)", "def get_tilemill_server_address(self):\n return Common.prepare_config_address(self.tilemill_server_address)", "def get_ntp_cfg(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM)\n\n if not self.s.is_element_present(self.info['loc_cfg_system_time_ntp_textbox']):\n raise Exception(\"Element %s not found\" % self.info['loc_cfg_system_time_ntp_textbox'])\n time.sleep(2)\n ntp_conf = self.s.get_value(self.info['loc_cfg_system_time_ntp_textbox'])\n\n return ntp_conf", "def _system_to_ntp_time(timestamp):\n return timestamp + _NTPBasePacket._NTP_DELTA", "def tweet_ip():\r\n if check_internet():\r\n tweet_ip_address()", "def get_ntp_enabled(self):\n return None", "def rtp_ip(self):\n return self.__rtp_ip", "def hub_address(self) -> str | None:\n return self.ip", "def getIpAddress():\n # type: () -> String\n return socket.gethostbyname(str(getHostName()))", "def ip_address(self) -> str | None:\n return self._device.ip_address", "def teleporter_address(self):\n ret = self._get_attr(\"teleporterAddress\")\n return ret", "def ntp_to_system_time(date):\n return date - _NTP_DELTA", "def get_actual_peer_addr(self):\n # Use the device instance, the global control interface doesn't have\n # station address\n h = self.get_instance()\n sta = h.get_sta(None)\n if sta is None or 'addr' not in sta:\n # Maybe station is not connected?\n addr = None\n else:\n addr=sta['addr']\n return addr", "def get_addr(self):\n return self._ip + ':' + str(self._port)", "def address_string(self):\n host = self.client_address[0]\n # original: return socket.getfqdn(host)\n return '%s (no getfqdn)' % host", "def get_time_server():\n ret = salt.utils.mac_utils.execute_return_result(\n \"systemsetup -getnetworktimeserver\"\n )\n return salt.utils.mac_utils.parse_return(ret)", "def ntp_auth_key(self):\n return self.data.get(\"ntp_auth_key\")", "def sync_time(self):\n os.system('sudo /usr/sbin/ntpd {}'.format(self.ntp_server))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Host Name (Not Required)
def ntp_host_name(self): return self.data.get("ntp_host_name")
[ "def address(self):\n return self.data.get(\"ntp_host_name\")", "def hostname(self):\n return \"host%d\" % (self.host_id)", "def host_name(self):\n return self.__host_name", "def hostname(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"hostname\"),\n )", "def hostname(self):\n return getattr(self, 'computer_name', None)", "def get_host_name(self):\n return self.__get_value(\"agentLevelParams/hostname\")", "def getHostName():\n # type: () -> String\n return socket.gethostname()", "def hostname(self):\n return self._domain_info.get('hostname')", "def host_name(self):\n if self._host_name:\n return self._host_name\n else:\n if self._compute_node_id:\n # TODO: 03 HostAssc: Need optional session on compute_node_get?\n compute_node = nova_db_api.compute_node_get(\n nova.context.get_admin_context(),\n self._compute_node_id)\n if compute_node:\n self._host_name = compute_node.service.host\n return self._host_name\n else:\n return None\n else:\n return None", "def dns_name(self):\n return getattr(self, 'computer_dns_name', None)", "def _get_hostname(self) -> str:\n hostname = identity.get_hostname()\n\n if not hostname:\n try:\n hostname = identity.set_hostname()\n except identity.Error as e:\n raise Error(e) from e\n\n return hostname", "def llmnr_hostname(self) -> str | None:\n return self.properties[DBUS_ATTR_LLMNR_HOSTNAME]", "def get_canonical_host(self):\n host = self.host.lower()\n if self.port is not None:\n host = \"%s:%s\" % (host, self.port)\n return host", "def instrument_host_name(self):\n return self.label['INSTRUMENT_HOST_NAME']", "def route_host_name(self) -> Optional[str]:\n return pulumi.get(self, \"route_host_name\")", "def ex_get_hypervisor_hostname(self):\r\n hostname = self.connection.getHostname()\r\n return hostname", "def get_hostname():\n\treturn os.uname()[1]", "def hostname(self) -> str | None:\n return self._device.name", "def _prompt_hostname(self):\n data = get_input(\"What is the server's hostname [%s]: \" %\n socket.getfqdn())\n if data != '':\n self.shostname = data\n else:\n self.shostname = socket.getfqdn()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key Type (Required)
def ntp_auth_key_type(self): return self.data.get("ntp_auth_key_type")
[ "def ntp_auth_key(self):\n return self.data.get(\"ntp_auth_key\")", "def ntp_auth_key_id(self):\n return self.data.get(\"ntp_auth_key_id\")", "def type(self) -> Optional[pulumi.Input['PublicKeyType']]:\n return pulumi.get(self, \"type\")", "def keytype(authkey):\n if authkey is None:\n return '%s (inherited)' % keytype(current_process().authkey)\n else:\n return authkey if authkey == 'PublicKey' else 'AuthKey'", "def _ntp_packet(self, data):\n #mode = struct.unpack('B', data[0])[0] & 0x7\n mode = data[0] & 0x7\n if mode == 3:\n return NTPMode3Packet(data)\n elif mode == 6:\n return NTPMode6Packet(data)\n elif mode == 7:\n return NTPMode7Packet(data)\n else:\n raise NTPException(\n 'Unknown/unsupported NTP packet (mode %d) - %s' % (mode, base64.b64encode(data)))", "def grant_key(self,o,keytype):\n type_table = {\n 'manager': managerkey,\n 'auditor': managerkey,\n 'staff': managerkey,\n 'customer': customerkey,\n 'guest': guestkey,\n }\n# test for legal keytype\n try:\n k = type_table[keytype]\n except Exception:\n return None\n return k\n\n # call creator method for keytype\n def guestkey(self,o):\n k = AuthKey()\n k.operator = o\n k.type='guest'\n k.expiration = -1\n k.timestamp = 1 # get a real timestamp here\n return k\n\n def customerkey(self,o):\n k = self.guestkey(o)\n k.customerid = '' # get it from a legal place\n k.type = 'customer'\n return k\n\n def managerkey(self,o):\n k = self.guestkey(o)\n k.managerid = '' # get this from a legal source\n k.type = 'manager'\n return k", "def auth_type(self):\n ret = self._get_attr(\"authType\")\n return AuthType(ret)", "def security_encryption_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_encryption_type\")", "def get_ntp_enabled(self):\n return None", "def generate_authorization_key(self, health_authority_api_key, key_type):\n assert key_type in ['DIAGNOSED']\n auth_key = generate_authorization_key()\n with self.conn.cursor() as cur:\n cur.execute(\"INSERT INTO authorization_keys(authorization_key, \\\n api_key, key_type) VALUES (%s, %s, %s);\",\n (auth_key, health_authority_api_key, key_type))\n self.conn.commit()\n return auth_key", "def protocol(self):\n\t\tt = self.rr_type() \n\t\tif t == \"DNSKEY\":\n\t\t\treturn int(self[5])\n\t\telse:\n\t\t\treturn -1", "def getNTPSetup(self):\n return NTPSetup(self.__screen)", "def kty(self) -> str:\n return self._kty", "def AUTHORIZED_KEYS(self):\n\t\treturn \"{} {}\".format(self.keytype, base64.b64encode(self.pubkey).decode('ascii'))", "def key_creation_time(self) -> 'outputs.KeyCreationTimeResponse':\n return pulumi.get(self, \"key_creation_time\")", "def generate_otp_key(self, otp_key_size=None):\n if otp_key_size is None:\n if hasattr(self, 'otp_key_size'):\n otp_key_size = getattr(self, 'otp_key_size')\n else:\n otp_key_size = 32\n return generate_secret(otp_key_size * 8)", "def extType(self):\r\n return ExtensionType.cert_type", "def test_get_object_type(self):\n expected = enums.ObjectType.SYMMETRIC_KEY\n key = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n observed = key.object_type\n\n self.assertEqual(expected, observed)", "def ifttt_key():\n global IFTTT_SERVICE_KEY\n try:\n cookie = request.cookies.get('session')\n if cookie is None or cookie != get_session_key():\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n \"Invalid request: session cookie not set or not valid\")\n\n keyvalue = request.form[\"iftttkey\"].strip()\n if len(keyvalue) == 64:\n entity = datastore.Entity(key=DSCLIENT.key(\"config\", \"ifttt_key\"))\n entity[\"value\"] = keyvalue\n DSCLIENT.put(entity)\n IFTTT_SERVICE_KEY = None\n return redirect(\"/\")\n\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n 'Invalid IFTTT key: length is not 64. <br><br>'\\\n '<a href=\"/\">Click here to return home</a>')\n except:\n traceback.print_exc()\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n 'Error while processing IFTTT key. See the logs. <br><br>'\\\n '<a href=\"/\">Click here to return home</a>')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key ID (Not Required) value between 1 65534
def ntp_auth_key_id(self): return self.data.get("ntp_auth_key_id")
[ "def ntp_auth_key(self):\n return self.data.get(\"ntp_auth_key\")", "def ntp_auth_key_type(self):\n return self.data.get(\"ntp_auth_key_type\")", "def _make_private_key(self):\n\t\treturn int(binascii.hexlify(os.urandom(16)), 16)", "def _new_session_id(self):\n return os.urandom(32).encode('hex')", "def generate_session_id():\n return str(secrets.randbits(32))", "def generate_nonce() -> str:\n b64_str = base64.b64encode(os.urandom(NONCE_LENGTH))\n return b64_str.decode().rstrip('=')", "def generate_key(self):\n try:\n return self.proto.genuid()\n except ValueError:\n return uuid.uuid4()", "def _create_key(self):\n return uuid.uuid4().hex", "def generate_session_id(hostname):\r\n pass", "def get_unique_id(length=5):\n return str(int(time.time())) + base_token_factory(length)", "def dkim_id(data: bytes, lid: Optional[bytes] = None) -> str:\n hashable: bytes\n lid, hashable = rfc6376_rascal(data, lid)\n digest_256: bytes = hmac.digest(lid, hashable, \"sha256\")\n truncated_bits: int = 160\n return pibble32(digest_256[: truncated_bits // 8])", "def create_client_session_key(apikey):\n session_key = hashlib.sha256()\n session_key.update(str(random.getrandbits(255)).encode('utf-8'))\n session_key.update(str(time.time()).encode('utf-8'))\n session_key.update(apikey.encode('utf-8'))\n return session_key.hexdigest()", "def random_client_id():\r\n return 'py_%s' % base64.b64encode(str(random.randint(1, 0x40000000)))", "def get_key_id(self, code, state):\n return int(\"0x%s%s\"% (hex(code).replace('0x', ''),hex(state & 0xFE).replace('0x', '')),16)", "def gen_api_key():\n m = hashlib.sha256()\n m.update(get_random_word(12))\n return unicode(m.hexdigest()[:12])", "def make_totp_secret():\n return pyotp.random_base32()", "def new_id():\n bs = uuid4().bytes\n return urlsafe_b64encode(bs).strip().replace('=', '')", "def complete_hybi00(headers, challenge):\r\n\r\n key1 = headers[\"Sec-WebSocket-Key1\"]\r\n key2 = headers[\"Sec-WebSocket-Key2\"]\r\n\r\n first = int(\"\".join(i for i in key1 if i in digits)) / key1.count(\" \")\r\n second = int(\"\".join(i for i in key2 if i in digits)) / key2.count(\" \")\r\n\r\n nonce = pack(\">II8s\", first, second, challenge)\r\n\r\n return md5(nonce).digest()", "def wepKey(WEPKEY): \n KEYID = 0 \n tmp_key = \"\"\n if re.match('^([0-9a-fA-F]{2}){5}$', WEPKEY) or re.match ('^([0-9a-fA-F]{2}){13}$', WEPKEY):\n tmp_key = WEPKEY\n elif re.match('^([0-9a-fA-F]{2}[:]){4}[0-9a-fA-F]{2}$', WEPKEY) or re.match('^([0-9a-fA-F]{2}[:]){12}[0-9a-fA-F]{2}$', WEPKEY):\n tmp_key = re.sub(':', '', WEPKEY)\n elif re.match ('^([0-9a-fA-F]{4}[-]){2}[0-9a-fA-F]{2}$', WEPKEY) or re.match ('^([0-9a-fA-F]{4}[-]){6}[0-9a-fA-F]{2}$', WEPKEY):\n tmp_key = re.sub('-', '', WEPKEY)\n else:\n print \"Error! Wrong format for WEP key\"\n sys.exit(1)\n \n g = lambda x: chr(int(tmp_key[::2][x],16)*16+int(tmp_key[1::2][x],16))\n \n for i in range(len(tmp_key)/2):\n dot11.conf.wepkey += g(i)\n \n print \"WEP key: %s (%dbits)\" % (WEPKEY, len(tmp_key)*4)\n \n if KEYID > 3 or KEYID < 0:\n print \"Key id: %s (defaulted to 0 due to wrong -k argument)\" % KEYID\n KEYID = 0\n else:\n print \"Key id: %s\" % KEYID" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key (Not Required)
def ntp_auth_key(self): return self.data.get("ntp_auth_key")
[ "def ntp_auth_key_id(self):\n return self.data.get(\"ntp_auth_key_id\")", "def ntp_auth_key_type(self):\n return self.data.get(\"ntp_auth_key_type\")", "def get_ntp_enabled(self):\n return None", "def getNTPSetup(self):\n return NTPSetup(self.__screen)", "def dns_api_key(self):\n return self.get('dns_api_key')", "def host_key(self) -> str:\n return pulumi.get(self, \"host_key\")", "def key(self):\n msg.message(xverify.steem.connect.auth_url())\n while True:\n key = input('Your Private Posting Key or'\n + ' SteemConnect Refresh Token: ')\n if len(key) < 16:\n msg.error_message('The private posting key you '\n + 'entered is too small.')\n elif xverify.steem.verify_key(acctname=\"\", tokenkey=key):\n self.privatekey = xverify.steem.privatekey\n self.refreshtoken = xverify.steem.refreshtoken\n self.accesstoken = xverify.steem.accesstoken\n self.username = xverify.steem.username\n msg.message(\"Welcome \" + self.username)\n break\n else:\n msg.error_message('Could not verify key or token.')\n return self.username", "def alt_api_key(self):\n return self.get_raw('alt_api_key')", "def get_ntp_cfg(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM)\n\n if not self.s.is_element_present(self.info['loc_cfg_system_time_ntp_textbox']):\n raise Exception(\"Element %s not found\" % self.info['loc_cfg_system_time_ntp_textbox'])\n time.sleep(2)\n ntp_conf = self.s.get_value(self.info['loc_cfg_system_time_ntp_textbox'])\n\n return ntp_conf", "def ifttt_key():\n global IFTTT_SERVICE_KEY\n try:\n cookie = request.cookies.get('session')\n if cookie is None or cookie != get_session_key():\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n \"Invalid request: session cookie not set or not valid\")\n\n keyvalue = request.form[\"iftttkey\"].strip()\n if len(keyvalue) == 64:\n entity = datastore.Entity(key=DSCLIENT.key(\"config\", \"ifttt_key\"))\n entity[\"value\"] = keyvalue\n DSCLIENT.put(entity)\n IFTTT_SERVICE_KEY = None\n return redirect(\"/\")\n\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n 'Invalid IFTTT key: length is not 64. <br><br>'\\\n '<a href=\"/\">Click here to return home</a>')\n except:\n traceback.print_exc()\n return render_template(\"message.html\", msgtype=\"danger\", msg=\\\n 'Error while processing IFTTT key. See the logs. <br><br>'\\\n '<a href=\"/\">Click here to return home</a>')", "def ntp_host_name(self):\n return self.data.get(\"ntp_host_name\")", "def password_reset_key(self):\n mac = hmac.new(settings.SECRET_KEY)\n mac.update(str(self.pw_hash))\n mac.update(self.email.encode('utf-8'))\n if self.real_name:\n mac.update(self.real_name.encode('utf-8'))\n mac.update(str(self.last_login))\n return mac.hexdigest()", "def get_jwt_private_key() -> str:\n return ssm.get_parameter(\"/lumina/jwt/private\")", "def hmac_shared_key_ttl(self) -> ConfigNodePropertyInteger:\n return self._hmac_shared_key_ttl", "def hmac_key(self):\n return _ldns.ldns_key_hmac_key(self)\n #parameters: const ldns_key *,\n #retvals: unsigned char *", "def key_creation_time(self) -> 'outputs.KeyCreationTimeResponse':\n return pulumi.get(self, \"key_creation_time\")", "def address(self):\n return self.data.get(\"ntp_host_name\")", "def _ntp_packet(self, data):\n #mode = struct.unpack('B', data[0])[0] & 0x7\n mode = data[0] & 0x7\n if mode == 3:\n return NTPMode3Packet(data)\n elif mode == 6:\n return NTPMode6Packet(data)\n elif mode == 7:\n return NTPMode7Packet(data)\n else:\n raise NTPException(\n 'Unknown/unsupported NTP packet (mode %d) - %s' % (mode, base64.b64encode(data)))", "def GetKey():\n try:\n response = urllib.urlopen(\n 'http://metadata/computeMetadata/v1beta1/instance/attributes/rpckey')\n return response.read()\n except IOError:\n return ''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a log from a string
def __import_log_from_string(log_string, parameters=None, variant=DEFAULT_VARIANT): temp_file = string_to_file.import_string_to_temp_file(log_string, "xes") return apply(temp_file, parameters=parameters, variant=variant)
[ "def __init__(self, log_string: str):\n if (groups := balsa_log_regex.match(log_string)) is None:\n self.time_stamp = datetime.now()\n self.name = \"\"\n self.file_name = \"\"\n self.line_number = 0\n self.function_name = \"\"\n self.log_level = logging.NOTSET\n self.message = \"\"\n self.structured_record = {}\n else:\n self.time_stamp = dateutil.parser.parse(groups.group(1))\n self.name = groups.group(2)\n self.file_name = groups.group(3)\n self.line_number = int(groups.group(4))\n self.function_name = groups.group(5)\n self.log_level = getattr(logging, groups.group(6)) # log level as an integer value\n\n self.structured_record = {}\n structured_string = groups.group(7).strip()\n if structured_string.endswith(structured_sentinel) and (start_structured_string := structured_string.find(structured_sentinel)) >= 0:\n start_json = start_structured_string + len(structured_sentinel) + 1\n json_string = structured_string[start_json : -len(structured_sentinel)]\n self.message = structured_string[:start_json]\n try:\n self.structured_record = json.loads(json_string)\n except json.JSONDecodeError:\n log.warning(f\"could not JSON decode : {json_string}\")\n self.message += f\" {structured_sentinel} {json_string} {structured_sentinel}\" # fallback if we can't decode the JSON, at least have it as part of the message string\n else:\n self.message = structured_string # no JSON part", "def parse_log(cls, log_file):\n if isinstance(log_file, basestring):\n infile = open(log_file, 'r')\n else:\n infile = log_file\n\n try:\n listener, timestamp = cls._read_header(infile)\n return Log(listener, timestamp, infile)\n finally:\n infile.close()", "def parse_log_line(line: str) -> LogEntry:\n match = LOGPAT.match(line)\n if not match:\n # we could catch that error and skip the line\n raise ValueError(f'incorrect log format: {line}')\n\n entry = match.groups()\n parsed_time = parse(entry[3][:11] + ' ' + entry[3][12:])\n size = int(entry[8]) if entry[8] != '-' else 0\n return LogEntry(\n entry[0], entry[1], entry[2], parsed_time, entry[4], entry[5],\n entry[6], int(entry[7]), size\n )", "def parse_log(self, log_entry: str) -> Optional[dict]:\n match = self.log_grok.match(log_entry)\n\n if match is None:\n return None\n\n if \"timestamp\" in match:\n match[\"timestamp\"] = datetime.strptime(\n match[\"timestamp\"], self.strptime_pattern\n ).isoformat()\n\n # Rename for elasticsearch\n match[\"@timestamp\"] = match.pop(\"timestamp\")\n\n match[\"type\"] = self.type\n\n return match", "def from_bytes(cls, log_bytes: bytes):\n deserialized_log = cbor2.loads(log_bytes)\n if not deserialized_log:\n raise ValueError(f'empty deserialized log: {deserialized_log}')\n address, topics, data = deserialized_log\n return Log(address, topics, data)", "def load_log(log_file):\n file_lines = {}\n for line in open(log_file, 'r'):\n parts = line.split(\" \")\n log_time = datetime.strptime(parts[0] + \" \" + parts[1],\n '%Y-%m-%d %H:%M:%S,%f')\n # Assume that the last part of a log line is the data part\n log_query = parts[-1]\n file_lines[log_time] = log_query\n return file_lines", "def parse_line(cls, line, log):\n m = cls._LOG_LINE_RE.match(line)\n if m is None:\n return None\n entry_type = m.group('type')\n y, mo, d, h, mi, s = map(int, m.group('year', 'month', 'day',\n 'hour', 'min', 'sec'))\n timestamp = datetime.datetime(y, mo, d, h, mi, s, tzinfo = UTC())\n data = m.group('data')\n if entry_type == 'combat':\n return CombatLogEntry(timestamp, data, log)\n else:\n if entry_type == 'info':\n t = LogEntry.INFO\n elif entry_type == 'notify':\n t = LogEntry.NOTIFY\n elif entry_type == 'warning':\n t = LogEntry.WARNING\n elif entry_type == 'question':\n t = LogEntry.QUESTION\n elif entry_type == 'hint':\n t = LogEntry.HINT\n elif entry_type == 'None':\n t = LogEntry.NONE\n else:\n raise ValueError('Unknown log entry type \"%s\".' % entry_type)\n return LogEntry(timestamp, t, data)", "def parseApacheLogLine(logline):\n match = logline.split(\"::\")\n #if match is None:\n # return (logline, 0)\n\n return (Row(\n idPartido=int(match[0]),\n temporada=match[1],\n jornada=int(match[2]),\n equipoLocal=match[3],\n equipoVisitante=match[4],\n golesLocal=int(match[5]),\n golesVisitante=int(match[6]),\n fecha=match[7],\n timestamp=match[8]\n ))", "def start_new_log(self, log_path):", "def load_log(log_name):\n\n with open(log_name, 'rb+') as book_file:\n try:\n data = pickle.load(book_file) # loading data to look what it has inside\n except EOFError:\n add_live_log_entry(\"Plik logu jest pusty (\"+log_name+\")\")\n data = []\n except Exception:\n add_live_log_entry(\"Uwaga! Coś poszło nie tak! (\"+log_name+\")\")\n data = []\n return data", "def ParseLine(self, path, line):\n del path # We don't use the path of the log file.\n return datatypes.Event(json.loads(line.rstrip()))", "def _load_log(self, filename):\n assert(filename)\n _here = Path(__file__).parent\n\n # Try first if it is a default logger\n _logger = _here / f'loggers/{filename}.yaml'\n if _logger.exists():\n with open(_logger, 'r') as stream:\n dictConfig(yaml.load(stream, Loader=sf))\n return _logger\n\n # Otherwise trying it as a path\n _filename = Path(filename)\n\n if not _filename.exists():\n raise ValueError(f\"The file '{filename}' does not exist\")\n\n if _filename.suffix in ('.yaml', '.yml'):\n with open(_filename, 'r') as stream:\n dictConfig(yaml.load(stream, Loader=sf))\n return filename\n\n if _filename.suffix in ('.ini', '.INI'):\n fileConfig(filename)\n return filename\n\n # Otherwise, fail\n raise ValueError(f\"Unsupported log format for {filename}\")", "def import_log(filename, parameters=None):\r\n\r\n if parameters is None:\r\n parameters = {}\r\n\r\n timestamp_sort = False\r\n timestamp_key = \"time:timestamp\"\r\n reverse_sort = False\r\n insert_trace_indexes = False\r\n max_no_traces_to_import = 1000000000\r\n\r\n if \"timestamp_sort\" in parameters:\r\n timestamp_sort = parameters[\"timestamp_sort\"]\r\n if \"timestamp_key\" in parameters:\r\n timestamp_key = parameters[\"timestamp_key\"]\r\n if \"reverse_sort\" in parameters:\r\n reverse_sort = parameters[\"reverse_sort\"]\r\n if \"insert_trace_indexes\" in parameters:\r\n insert_trace_indexes = parameters[\"insert_trace_indexes\"]\r\n if \"max_no_traces_to_import\" in parameters:\r\n max_no_traces_to_import = parameters[\"max_no_traces_to_import\"]\r\n\r\n context = etree.iterparse(filename, events=['start', 'end'])\r\n\r\n log = None\r\n trace = None\r\n event = None\r\n\r\n tree = {}\r\n\r\n for tree_event, elem in context:\r\n if tree_event == EVENT_START: # starting to read\r\n parent = tree[elem.getparent()] if elem.getparent() in tree else None\r\n\r\n if elem.tag.endswith(log_lib.util.xes.TAG_STRING):\r\n if parent is not None:\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY),\r\n elem.get(log_lib.util.xes.KEY_VALUE), tree)\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_DATE):\r\n try:\r\n dt = ciso8601.parse_datetime(elem.get(log_lib.util.xes.KEY_VALUE))\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY), dt, tree)\r\n except TypeError:\r\n logging.info(\"failed to parse date: \" + str(elem.get(log_lib.util.xes.KEY_VALUE)))\r\n except ValueError:\r\n logging.info(\"failed to parse date: \" + str(elem.get(log_lib.util.xes.KEY_VALUE)))\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_EVENT):\r\n if event is not None:\r\n raise SyntaxError('file contains <event> in another <event> tag')\r\n event = log_lib.log.Event()\r\n tree[elem] = event\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_TRACE):\r\n if len(log) >= max_no_traces_to_import:\r\n break\r\n if trace is not None:\r\n raise SyntaxError('file contains <trace> in another <trace> tag')\r\n trace = log_lib.log.Trace()\r\n tree[elem] = trace.attributes\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_FLOAT):\r\n if parent is not None:\r\n try:\r\n val = float(elem.get(log_lib.util.xes.KEY_VALUE))\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY), val, tree)\r\n except ValueError:\r\n logging.info(\"failed to parse float: \" + str(elem.get(log_lib.util.xes.KEY_VALUE)))\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_INT):\r\n if parent is not None:\r\n try:\r\n val = int(elem.get(log_lib.util.xes.KEY_VALUE))\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY), val, tree)\r\n except ValueError:\r\n logging.info(\"failed to parse int: \" + str(elem.get(log_lib.util.xes.KEY_VALUE)))\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_BOOLEAN):\r\n if parent is not None:\r\n try:\r\n val = bool(elem.get(log_lib.util.xes.KEY_VALUE))\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY), val, tree)\r\n except ValueError:\r\n logging.info(\"failed to parse boolean: \" + str(elem.get(log_lib.util.xes.KEY_VALUE)))\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_LIST):\r\n if parent is not None:\r\n # lists have no value, hence we put None as a value\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY), None, tree)\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_ID):\r\n if parent is not None:\r\n tree = __parse_attribute(elem, parent, elem.get(log_lib.util.xes.KEY_KEY),\r\n elem.get(log_lib.util.xes.KEY_VALUE), tree)\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_EXTENSION):\r\n if log is None:\r\n raise SyntaxError('extension found outside of <log> tag')\r\n if elem.get(log_lib.util.xes.KEY_NAME) is not None and elem.get(\r\n log_lib.util.xes.KEY_PREFIX) is not None and elem.get(log_lib.util.xes.KEY_URI) is not None:\r\n log.extensions[elem.get(log_lib.util.xes.KEY_NAME)] = {\r\n log_lib.util.xes.KEY_PREFIX: elem.get(log_lib.util.xes.KEY_PREFIX),\r\n log_lib.util.xes.KEY_URI: elem.get(log_lib.util.xes.KEY_URI)}\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_GLOBAL):\r\n if log is None:\r\n raise SyntaxError('global found outside of <log> tag')\r\n if elem.get(log_lib.util.xes.KEY_SCOPE) is not None:\r\n log.omni_present[elem.get(log_lib.util.xes.KEY_SCOPE)] = {}\r\n tree[elem] = log.omni_present[elem.get(log_lib.util.xes.KEY_SCOPE)]\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_CLASSIFIER):\r\n if log is None:\r\n raise SyntaxError('classifier found outside of <log> tag')\r\n if elem.get(log_lib.util.xes.KEY_KEYS) is not None:\r\n classifier_value = elem.get(log_lib.util.xes.KEY_KEYS)\r\n if \"'\" in classifier_value:\r\n log.classifiers[elem.get(log_lib.util.xes.KEY_NAME)] = [x for x in classifier_value.split(\"'\")\r\n if x.strip()]\r\n else:\r\n log.classifiers[elem.get(log_lib.util.xes.KEY_NAME)] = classifier_value.split()\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_LOG):\r\n if log is not None:\r\n raise SyntaxError('file contains > 1 <log> tags')\r\n log = log_lib.log.TraceLog()\r\n tree[elem] = log.attributes\r\n continue\r\n\r\n elif tree_event == EVENT_END:\r\n if elem in tree:\r\n del tree[elem]\r\n elem.clear()\r\n if elem.getprevious() is not None:\r\n try:\r\n del elem.getparent()[0]\r\n except TypeError:\r\n pass\r\n\r\n if elem.tag.endswith(log_lib.util.xes.TAG_EVENT):\r\n if trace is not None:\r\n trace.append(event)\r\n event = None\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_TRACE):\r\n log.append(trace)\r\n trace = None\r\n continue\r\n\r\n elif elem.tag.endswith(log_lib.util.xes.TAG_LOG):\r\n continue\r\n\r\n del context\r\n\r\n if timestamp_sort:\r\n log = sorting.sort_timestamp(log, timestamp_key=timestamp_key, reverse_sort=reverse_sort)\r\n if insert_trace_indexes:\r\n log.insert_trace_index_as_event_attribute()\r\n\r\n return log", "def RecordFromLine(line):\n try:\n created, level, unused_source_location, message = (\n _StrictParseLogEntry(line, clean_message=False))\n\n\n message = Stripnl(message)\n return LoggingRecord(level, created, message, None)\n except ValueError:\n return StderrRecord(line)", "def logline_to_dict(logline: str) -> dict:\n data = {}\n pairs = re.split('(?<!:):(?!:)', logline.strip().strip('\\0'))\n for p in pairs:\n p = p.replace('::',':')\n keyval = p.split('=')\n try:\n data[keyval[0]] = keyval[1]\n except IndexError as e:\n logging.error('error \"{}\" in keyval \"{}\", logline \"{}\"'.format(e,keyval,logline))\n data[\"v\"] = re.match(r\"(0.\\d+)\", data[\"v\"]).group()\n if \"god\" not in data:\n data[\"god\"] = \"GOD_NO_GOD\"\n\n if \"status\" not in data:\n data[\"status\"] = \"\"\n\n data[\"god\"] = const.GOD_NAME_FIXUPS.get(data[\"god\"],data[\"god\"])\n if \"end\" in data:\n data[\"time\"] = data[\"end\"]\n data[\"ktyp\"] = const.KTYP_FIXUPS.get(data[\"ktyp\"], data[\"ktyp\"])\n data[\"type\"] = \"death.final\"\n data[\"milestone\"] = data[\"tmsg\"]\n\n data[\"runes\"] = data.get(\"urune\", 0)\n # D:0 is D:$ in logfile so we came from D:1 in that case\n data[\"oplace\"] = data.get(\"oplace\",\n data[\"place\"].translate(str.maketrans(\"$\", \"1\")))\n\n return data", "def start_log(self, log_type):\n\n try:\n self.logd = Log()\n\n if log_type == 'smb':\n self.parser = SMB()\n\n elif log_type == 'nginx':\n self.parser = Nginx()\n\n elif log_type == 'dns':\n self.parser = DNS()\n\n self.logd.monitor_log(self.parser.parse,\n settings.log[log_type],\n self.state)\n\n except Exception as msg:\n self.logger.log_exception(msg)", "def import_vulcan_log(log_file_name, header=0):\n # check\n assert isinstance(log_file_name, str), 'Log file name %s must be a string but not of type %s.' \\\n '' % (str(log_file_name), type(log_file_name))\n assert os.path.exists(log_file_name), 'Log file %s does not exist.' % log_file_name\n # use pandas to load the file\n\n # import\n log_set = pd.read_csv(log_file_name, sep='\\t', header=header)\n\n # check\n assert len(log_set) > 1, 'Separation is not tab for VULCAN record file %s.' % log_file_name\n\n return log_set", "def parse_apache_log_line(log_line: str) -> dict:\n try:\n split_ws = log_line.split(\" \")\n parsed_dict = {\n \"IP\": split_ws[0],\n \"Time\": get_time_epoch(split_ws[3][1:], split_ws[4][:-1]),\n \"Request_Method\": split_ws[5][1:],\n \"Request_Resource\": split_ws[6],\n \"Request_Protocol\": split_ws[7][:-1],\n \"Status_Code\": int(split_ws[8]),\n \"Payload_Size\": int(split_ws[9]),\n \"Referer\": split_ws[10].replace(\"\\\"\", \"\"),\n \"User_Agent\": \" \".join(split_ws[11:]).replace(\"\\\"\", \"\")\n }\n return parsed_dict\n except ValueError:\n print(\"FOUND INCORRECT LOG TYPE\")\n return {}\n except IndexError:\n print(\"FOUND INCORRECT LOG STRING\")\n return {}\n except AttributeError:\n print(\"STRING IS TOO SHORT\")\n return {}", "def from_string(cls, data_str):\n help_str = (\"The string mut contain a timestamp (YYYY-mm-dd-HH:mm, \"\n \"e.g. '2021-01-29-23:43') and the number of pulses \"\n \"separated with whitespace.\")\n parts = data_str.split()\n if len(parts) != 2:\n raise ValueError(\"String '{}' isn't a valid data entry. {}\"\n \"\".format(data_str, help_str))\n return cls(timestamp=parts[0], pulses=parts[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter the queryset to all instances matching the given value on the specified lookup field.
def filter_queryset(self, value, queryset, field_name): filter_kwargs = { "%s__%s" % (self.lookup_field or field_name, self.lookup): value } return qs_filter(queryset, **filter_kwargs)
[ "def filterRecsByField(self, field_name, value):\n assert 0, u'Empty method'", "def do_field_filtering(self, request, queryset):\n fields = set(self.get_api_fields(queryset.model)).union({'id'})\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n field = getattr(queryset.model, field_name, None)\n\n if isinstance(field, _TaggableManager):\n for tag in value.split(','):\n queryset = queryset.filter(**{field_name + '__name': tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset", "def queryset(self, request, queryset):\n # Compare the requested value to decide how to filter the queryset.\n if self.value():\n return queryset.filter(school=self.value())\n else:\n return queryset", "def _filter_by_multiple_fields(self, queryset, fields, value):\n\n query = Q()\n for field in fields:\n query |= Q(**{field + \"__icontains\": value})\n\n return queryset.filter(query).distinct()", "def by_attribute(self, schema_field, att_value, is_lookup=False):\n\n clone = self.prepare_attribute_qs()\n real_name = str(schema_field.real_name)\n if not isinstance(att_value, (list, tuple)):\n att_value = [att_value]\n if is_lookup:\n att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)\n if not att_value:\n # If the lookup values don't exist, then there aren't any\n # NewsItems with this attribute value. Note that we aren't\n # using QuerySet.none() here, because we want the result to\n # be a NewsItemQuerySet, and none() returns a normal QuerySet.\n clone = clone.extra(where=('1=0',))\n return clone\n att_value = [val.id for val in att_value]\n if schema_field.is_many_to_many_lookup():\n # We have to use a regular expression search to look for all rows\n # with the given att_value *somewhere* in the column. The [[:<:]]\n # thing is a word boundary.\n for value in att_value:\n if not str(value).isdigit():\n raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')\n clone = clone.extra(where=(\"db_attribute.%s ~ '[[:<:]]%s[[:>:]]'\" % (real_name, '|'.join([str(val) for val in att_value])),))\n elif None in att_value:\n if att_value != [None]:\n raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')\n clone = clone.extra(where=(\"db_attribute.%s IS NULL\" % real_name,))\n else:\n clone = clone.extra(where=(\"db_attribute.%s IN (%s)\" % (real_name, ','.join(['%s' for val in att_value])),),\n params=tuple(att_value))\n return clone", "def find_matching_objects(self, field, value):\n model = field.model\n name = get_name(model)\n field_name = field.field_name\n objects = [model_object for model_object in self.objects_tree[ \\\n name].values() if getattr(model_object, field_name) == value]\n return objects", "def set_custom_queryset(self, obj, kwargs, Model):\n pk = self.extra_lookup_kwargs.get(\n self.filter_field,\n self.root.parent_lookup_kwargs[self.filter_field])\n pk_attr = pk.split('__')\n if obj is None:\n kwargs = {}\n else:\n for attr in pk_attr:\n obj = getattr(obj, attr)\n kwargs = {self.parent_lookup_kwargs[self.filter_field]: obj, }\n qs = Model.objects.filter(**kwargs)\n return qs", "def make_query_filter(self, field_name, value):\n alias_map = self.get_aliased_field_map()\n if field_name in alias_map.keys():\n field_name = alias_map[field_name]\n\n prop = getattr(self.model_type, field_name, None)\n if prop:\n property_type = get_property_type(prop)\n filter_value = None\n operator = Operator.EQUALS\n if property_type == PropertyType.ENUM and value == UNSET:\n operator = Operator.EQUALS_OR_NONE\n # If we're dealing with a comparable property type, look for a prefix that indicates an\n # operator other than EQUALS and strip it off\n if property_type in _COMPARABLE_PROPERTY_TYPES:\n for prefix, op in list(_OPERATOR_PREFIX_MAP.items()):\n if isinstance(value, str) and value.startswith(prefix):\n operator = op\n value = value[len(prefix) :]\n break\n filter_value = self._parse_value(prop, property_type, value)\n return FieldFilter(field_name, operator, filter_value)\n else:\n return None", "def find_by_field_value(self, field_name, value, op='=', collate=False):\n query = self._select().where(self.where(field_name, value, op))\n entities = yield self._query_for_entities(query)\n if collate:\n entities = self.collate(entities)\n returnValue(entities)", "def set_must_match(self, field: str, value: str) -> None:\n terms = {\n \"term\": {\n field: value\n }\n }\n self.query[\"query\"][\"bool\"][\"filter\"].append(terms)", "def _filter_by_range_schema(self, qs, lookup, sublookup, value, schema):\n # This code was written with a single use case in mind. That use case\n # required searching for objects whose ranges *intersect* with given\n # one. I did not invest time in supporting other use cases (such as\n # checking whether the ranges are exactly the same or how they are\n # different). However, such lookups *can* be added later without\n # breaking existing client code. Patches are welcome.\n sublookup = sublookup or RANGE_INTERSECTION_LOOKUP\n if not sublookup == RANGE_INTERSECTION_LOOKUP:\n raise ValueError('Range schema only supports lookup \"%s\".' %\n RANGE_INTERSECTION_LOOKUP)\n try:\n _, _ = value\n except ValueError:\n raise ValueError('Range schema value must be a tuple of min and '\n 'max values; one of them may be None.')\n except TypeError:\n raise TypeError('Expected a two-tuple, got \"%s\"' % value)\n\n value_lookups = zip((\n 'attrs__value_range_max__gte',\n 'attrs__value_range_min__lte',\n ), value)\n conditions = dict((k,v) for k,v in value_lookups if v is not None)\n conditions.update({\n 'attrs__schema': schema,\n })\n return conditions", "def _filter_by_choice_schema(self, qs, lookup, sublookup, value, schema, model=None):\n model = model or self.model\n schemata = dict((s.name, s) for s in model.get_schemata_for_model()) # TODO cache this dict, see above too\n try:\n schema = schemata[lookup]\n except KeyError:\n # TODO: smarter error message, i.e. how could this happen and what to do\n raise ValueError(u'Could not find schema for lookup \"%s\"' % lookup)\n sublookup = '__%s'%sublookup if sublookup else ''\n return {\n 'attrs__schema': schema,\n 'attrs__choice%s'%sublookup: value, # TODO: can we filter by id, not name?\n }", "def _filter_commaseparated_field(self, field, values, queryset):\n field_query = \"%s__icontains\" % field\n filters = map(lambda v: Q(**{field_query: v}), values)\n filters = reduce(operator.or_, filters, Q())\n return queryset.filter(filters)", "def filter_by(self, data, field_name, value):\n return [note for note in data if note.get(field_name) == value]", "def find_by_fieldname(self, name, value):\n response = self.table.scan(\n FilterExpression=Attr(name).eq(value)\n )\n items = response.get(\"Items\", [])\n return items", "def do_filter(self, queryset=None):\n if not queryset:\n queryset = self.modelmap.get_queryset()\n queryset = self.filter(queryset)\n return queryset", "def filter(self, *args, **kwargs):\n return self.list().filter(*args, **kwargs)", "def top_lookups(self, schema_field, count):\n real_name = \"db_attribute.\" + str(schema_field.real_name)\n if schema_field.is_many_to_many_lookup():\n clone = self.prepare_attribute_qs().filter(schema__id=schema_field.schema_id)\n clone = clone.extra(where=[real_name + \" ~ ('[[:<:]]' || db_lookup.id || '[[:>:]]')\"])\n # We want to count the current queryset and get a single\n # row for injecting into the subsequent Lookup query, but\n # we don't want Django's aggregation support to\n # automatically group by fields that aren't relevant and\n # would cause multiple rows as a result. So we call\n # `values()' on a field that we're already filtering by,\n # in this case, schema, as essentially a harmless identify\n # function.\n clone = clone.values('schema').annotate(count=Count('schema'))\n qs = Lookup.objects.filter(schema_field__id=schema_field.id)\n qs = qs.extra(select={'lookup_id': 'id', 'item_count': clone.values('count').query})\n else:\n qs = self.prepare_attribute_qs().extra(select={'lookup_id': real_name})\n qs.query.group_by = [real_name]\n qs = qs.values('lookup_id').annotate(item_count=Count('id'))\n ids_and_counts = [(v['lookup_id'], v['item_count']) for v in qs.values('lookup_id', 'item_count').order_by('-item_count') if v['item_count']][:count]\n lookup_objs = Lookup.objects.in_bulk([i[0] for i in ids_and_counts])\n return [{'lookup': lookup_objs[i[0]], 'count': i[1]} for i in ids_and_counts]", "def apply_query_isnull(cls, queryset, options, value):\n _value_lower = value # TODO: clean up?\n q = q_params(\"exists\", options, query={\"field\": options[\"field\"]})\n\n if _value_lower in TRUE_VALUES:\n return cls.apply_query(\n queryset=queryset,\n options=options,\n args=[~q],\n )\n elif _value_lower in FALSE_VALUES:\n return cls.apply_query(\n queryset=queryset,\n options=options,\n args=[q],\n )\n return queryset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process specified is not running
def test_no_such_process(self): pass
[ "def _is_running(process):\n with hide('output'):\n s = run('ps auwx')\n for x in s.split('\\n'):\n if re.search(process, x):\n print '%s running' % process\n return True\n\n return False", "def _check_process_is_running(self, name: str):\n for proc in psutil.process_iter():\n try:\n if name.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False", "def check_existing_processes():\n pid = os.getpid()\n pipe = subprocess.Popen(\n 'ps aux | grep %s | grep -v grep | grep -v \"/dev/null\" | grep -v \"bash -c\" | grep -v %s ' % (\n 'start_hdmi_cec_monitor', pid), shell=True, stdout=subprocess.PIPE).stdout\n output = pipe.read().decode('utf-8')\n if output != '':\n logging.error('%s is already running, existing process: \\n %s' % (__file__, output))\n raise SystemExit()", "def assert_running():\n ok(is_running(), 'Process is not running.')", "def assert_not_running():\n ok(not is_running(), 'Process is running.')", "def isRunning (self):\n\t\tif not self.job.pid:\n\t\t\treturn False\n\t\treturn ps.exists(int(self.job.pid))", "def stop_if_already_running():\n\n script_name = path.basename(__file__)\n l = getstatusoutput(\"ps aux | grep -e '%s' | grep -v grep | grep -v vim | awk '{print $2}'| awk '{print $2}' \" % script_name)\n if l[1]:\n print \"Already running. Aborting\"\n print l[1]\n exit(0)", "def is_running(name):\n if _job_pid(name) is not None:\n return True\n\n return False", "def isRunning(name):\n\n\t# Import pidfile list\n\tglobal _lPidFiles\n\n\t# Generate the nameof the files\n\tsFile = '/tmp/%s.pid' % name\n\n\t# If the file already exists\n\tif os.path.isfile(sFile):\n\t\treturn True\n\n\t# Create the file, write to, and close the file\n\toFile = open(sFile, 'w')\n\toFile.write(str(os.getpid()))\n\toFile.close()\n\n\t# Add the file to the pidfiles\n\t_lPidFiles.append(sFile)\n\n\t# Return was not running\n\treturn False", "def checkForPicam():\n try:\n subprocess.check_output([\"pgrep\", \"picam\"])\n except:\n sys.exit(\"Picam isn't running?\")", "def if_running(self, process=None):\n self.writeCommand('if_running', process)\n return self", "def is_flume_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "def kill_if_running(self):\r\n if self.process is not None:\r\n if self.process.state() == QProcess.Running:\r\n self.process.kill()\r\n self.process.waitForFinished()", "def is_program_running(self):\n return self.rob.secmon.is_program_running()", "def __init__(self, device_name, msg, details=None):\n super(ProcessNotRunningError, self).__init__(\n device_name, msg, details=details)", "def vlc_process_status(self):\n for p in psutil.process_iter():\n if self.vlc_process_name in p.name():\n print \"vlc.py: VLC Process exists, it has process id {}\".format(p.pid)\n return True\n print \"vlc.py: VLC didn't start, Please check command\"\n return False", "def pid_running_on(self, address):\n return self.state == ProcessStates.RUNNING and address in self.addresses", "def psOnID(pid=0,pgid=0):\n try:\n if(pid):\n p = psutil.Process(pid)\n else:#need to change this to look for all processes with this pgid\n p = psutil.Process(pgid)\n try:#for python 2.7.2 psutil.status is a method\n pstatus = p.status()\n except:#in python v2.7.5 or greater the psutil.status is a property, not a method\n pstatus = p.status\n psOn = p.is_running() and pstatus != psutil.STATUS_ZOMBIE\n return psOn\n except:\n return False\n # cmd = \"kill -0 \"+str(pid) #\"ps -A | pgrep \"+process+\" > /dev/null\"\n # #result of the cmd is 0 if it was successful (i.e. the process exists)\n # return os.system(cmd)==0", "def check_for_activation(self):\n\n def callback(_):\n file = open(self.pid_file, 'r')\n line = file.readline()\n file.close()\n read_pid = line.rstrip()\n if read_pid != self.pid:\n\n # other simulator tries to start running\n # write pid to pid_file to notify this simulator is already running\n pid_file = open(self.pid_file, 'w')\n pid_file.write(self.pid)\n pid_file.close()\n\n if platform.system().lower().startswith('win'):\n self.visualiser.windows_activate()\n else:\n self.visualiser.activate()\n\n clock.schedule_interval(callback, 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send messages to the status window (the only window)
def test_status_window(self): pass
[ "def _update_status_bar(self, message):\n self.window().status_bar = message", "def display_window(instrument, window_num=1, status='ON'):\n status.upper()\n if window_num != 1:\n command = ':DISPlay:WINDow%d:STATe %s' % (window_num, status)\n instrument.write(command)", "def OnStatusBarUpdate(self, message):\n #print message\n self.statusBar_main.SetStatusText(message.data)", "def show_message(self, msg):\n bar = self.statusBar()\n bar.showMessage(msg)", "def _writeToStatusBar(self, message : str, time=3000):\n self.statusbar = self.statusBar()\n self.statusbar.showMessage(message, time)", "def show_message(self, msg):\n self.statusbar.SetLabel(msg)", "def _createStatusBar(self):\n self.statusbar = self.statusBar()\n self.statusbar.showMessage(\"Ready\", 3000)", "def showStatus(msg):\n import ij.IJ\n ij.IJ.showStatus(msg)", "def win(self):\n message = GLabel(\"YOU WIN!!!\", x=self.window.width / 5, y=self.window.height / 2)\n message.font = \"-50\"\n self.window.add(message)", "def draw_win_message(cls):\n cls.blit_text(FontManager.get(\"wl\"), Message.WIN, Color.CYAN, 90, 50)", "def test_statusesChangedOnStatusMessage(self):\n q = []\n dispatcher = self.dispatcher\n dispatcher.statusWatcher = Watcher(q)\n message = \"whatever\"\n # Need to have a socket that will accept the descriptors.\n dispatcher.addSocket()\n subskt = dispatcher._subprocessSockets[0]\n dispatcher.statusMessage(subskt, message)\n dispatcher.statusMessage(subskt, message)\n self.assertEquals(q, [[-1], [-2]])", "def show_message(self, text):\n self.statusbar.showMessage(text, 2000)", "def update_status(self, msg):\n\n self.status_box.append(msg)\n # reference: https://stackoverflow.com/questions/7778726/autoscroll-pyqt-qtextwidget\n self.status_box.moveCursor(QtGui.QTextCursor.End)\n self.update()", "def update_status(self, txt, wait_time=0):\n try:\n self.ui.statusbar.showMessage(txt, wait_time)\n logging.info(txt)\n except Exception as e:\n logger.exception(str(e))", "def send_unsigned_presence(self):\n current_presence = self.core.get_status()\n self.core.command.status('%s %s' % (current_presence.show or 'available', current_presence.message or '',))", "def status(text):\n if SHOW_UI:\n pygame.display.set_caption(text)\n stdout.write('\\r%s' % text)\n stdout.flush()", "async def status(self, ctx: commands.Context, *, status: str):\n await self.bot.change_presence(activity=discord.Game(name=status))", "def redraw_status_window(vDict):\n player = vDict['gameLevel'].player\n\n windowDict = vDict['windowDict']\n statusWindow = windowDict['statusWindow']\n\n blankInvenText = vDict['blankInvenText']\n\n halfBlankInvenText = vDict['halfBlankInvenText']\n\n y = 1\n\n for i in ('Species: {0.species.name}', 'Health: {0.health.amount}/{0.maxHealth}',\n 'Stamna: {0.stamna.amount}/{0.maxStamna}', '{0.magic.amount}/{0.maxMagic}',\n '{0.describeHunger}'):\n statusWindow.draw_str(\n 1, y, halfBlankInvenText.format(\n i.format(player)\n ))\n\n y += 1\n\n y = 1\n\n halfWidth = statusWindow.width // 2\n\n for i, s in zip(('STR', 'END', 'AGI', 'DEX', 'MIN', 'WIL', 'PER', 'MAG'), ALL_STATS):\n statusWindow.draw_str(halfWidth, y, halfBlankInvenText.format('{}: {}'.format(i, player.getTotalStat(s))))\n\n y += 1\n\n tdl.flush()", "def display_status(self):\n\n if self.game.is_end:\n if self.game.status == \"win\":\n text = \"\\nYou won !\\nPress any key to continue...\"\n elif self.game.status == \"lose\":\n text = f\"\\nYou lost ! You only had {str(self.game.player.inventory)}/3 items.\\nPress any key to continue...\"\n\n print(text)\n self.game.is_running = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the fort battle start time left
def _getFortBattleTimer(self): if self.fortState.getStateID() == CLIENT_FORT_STATE.HAS_FORT: fortBattle = self.fortCtrl.getFort().getBattle(prb_getters.getBattleID()) if fortBattle is not None: return fortBattle.getRoundStartTimeLeft() return 0
[ "def get_timeAvailable(self):\r\n\r\n return self._timeAvailable", "def time_left(self):\r\n if self._timestamp is None:\r\n return 0\r\n else:\r\n return self.timeout-(time.time()-self._timestamp)", "def start_time(self):\n # if this hunt is configured for full coverage, then the starting time for the search\n # will be equal to the ending time of the last executed search\n if self.full_coverage:\n # have we not executed this search yet?\n if self.last_end_time is None:\n return local_time() - self.time_range\n else:\n return self.last_end_time\n else:\n # if we're not doing full coverage then we don't worry about the last end time\n return local_time() - self.time_range", "def get_time(self): # TEST\n return self._game.get_time()", "async def get_exposure_time_left(self, **kwargs: Any) -> float:\n\n # if we're not exposing, there is nothing left\n if self._exposure is None:\n return 0.0\n\n # calculate difference between start of exposure and now, and return in ms\n duration = datetime.timedelta(seconds=self._exposure.exposure_time)\n diff = self._exposure.start + duration - datetime.datetime.utcnow()\n return diff.total_seconds()", "def remainingTimeToWait(self) -> int:\n ...", "def registration_time_left_minutes(self):\n return self._registration_time_left_minutes", "def remaining_time(self):\n diff_seconds = (datetime.now() - self.date_start).total_seconds()\n diff_seconds = int(round(diff_seconds))\n\n duration_seconds = self.duration.total_seconds()\n # We have duration in seconds, and seconds of the difference between now and start of parking\n # If diff is less than duration, this will be positive, else negative.\n return int( (duration_seconds - diff_seconds) / 60)", "def remain_time(self):\n return self._camera_data_structure[\"remainTime\"]", "def elapsed_time(self):\n elapsed = time.time() - self.game_start\n return elapsed", "def wait_time(self):\r\n if self.cashier_arrival != None and self.line_arrival != None:\r\n return self.cashier_arrival - self.line_arrival", "def eligible_time(self):\n return self._job.get('eligible_time', 0)", "def totalTime(self):\n return time.time()-self.start", "def getRemainingTime(self, instance):\n return self.user_obj.getInstanceInfo(instance)[\"remaining_time\"]", "def get_remaining_time_in_millis(self):\n return self.time_limit_ms", "def time_wall(self):\n return time.time() - self.time_start", "def request_time(self) -> float:\n if self._finish_time is None:\n return time.time() - self._start_time\n else:\n return self._finish_time - self._start_time", "def _timeleft(stime, timeout):\n if timeout is not None:\n if timeout is 0:\n return 0\n return max(0, timeout - (time.time() - stime))", "def get_time_since_reset(self):\n return self._robot.GetTimeSinceReset()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows fort battle unit window
def _showWindow(self): pInfo = self._entity.getPlayerInfo() if pInfo.isInSlot and not pInfo.isReady: g_eventDispatcher.showFortWindow()
[ "def show1(self,win):\n\n # display invader\n # -------------\n win.addstr(self.yPos, self.xPos,\"-o-\")\n\n win.refresh()", "def mgs_ur_show_launcher():\n window = pm.window(title='Launch Game of Ur')\n\n pm.columnLayout()\n pm.text(\"Click to launch the game,\\nor drag (with the middle mouse)\\nand drop the button to the shelf.\")\n shelf = pm.shelfLayout(height=64, width=64)\n\n pm.shelfButton(annotation=\"Launch the Royal Game of Ur\", image1=\"bulge.svg\", imageOverlayLabel=\"Ur\", command=COMMAND)\n\n pm.showWindow(window)", "def show2(self,win):\n\n # display invader\n # -------------\n win.addstr(self.yPos, self.xPos,\"-0-\")\n\n win.refresh()", "def open_stat(self):\r\n hp = f\"HP: {self.player.hp}/{self.player.max_hp}\".ljust(10)\r\n lv = f\"LV: {self.player.lv}\".ljust(10)\r\n name = self.player.name\r\n layout = [\r\n [sg.Text(f'\"{name}\"')],\r\n [sg.Text(lv)],\r\n [sg.Text(hp)],\r\n [sg.Button(\"Exit\", size=(10, 1), button_color=(\"#edf2ce\", \"#444444\"))]\r\n ]\r\n window = sg.Window(\"Stats\", layout, size=(250, 500), element_justification='c')\r\n window.read()\r\n window.close()", "def show(self,win):\n # display bullet\n # -------------\n if self.yPos > 0:\n win.addstr(self.yPos,self.xPos,\"+\")\n win.refresh()", "def test_display_weapon_location(self):\n self.board.display_weapon_location(\"Pistol\") # No location for weapon\n self.board.set_weapon_location(\"Pistol\",\"Study\")\n self.board.display_weapon_location(\"Pistol\")", "def ui():\n win = ControlWindow()\n win.show()", "def show(self,surface):\n f=self.getForm()\n for case in self.cases:\n case.show(surface,side_color=mycolors.BLACK)\n f.side_color=mycolors.BLACK\n f.side_width=3\n f.show(surface)\n f[0].showText(surface,\"Board\")", "def openMediumSolutionWindow():\n openMediumSolutionWindow = tk.Toplevel(window)\n openMediumSolutionWindow.title(\"Sudoku - Medium Solution\")\n openMediumSolutionWindow.configure(bg='black')\n displayBoardGUI(boardMedium, openMediumSolutionWindow)", "def show_game(self):\n self.G.show_board() # Call the graph's show_board.", "def on_buttonBox_accepted(self):\n #set up messaage box\n msgBox=QMessageBox()\n msgBox.setIcon(3)\n \n #close welcome screen\n self.close()\n \n #if unit 1 chosen, open unit 1\n if self.unit==1:\n openWizard=PopDevWizard()\n openWizard.show()\n result = openWizard.exec_()\n #if unit 2 chosen, open unit 2\n elif self.unit==2:\n openWizard = TourismWizard()\n openWizard.show()\n result = openWizard.exec_()\n #if unit 3 chosen, open unit 3\n else:\n openWizard=EarthquakesWizard()\n openWizard.show()\n result = openWizard.exec_()", "def display_window(instrument, window_num=1, status='ON'):\n status.upper()\n if window_num != 1:\n command = ':DISPlay:WINDow%d:STATe %s' % (window_num, status)\n instrument.write(command)", "def _createBuyWindow(self):\r\n self._buyWindow = Toplevel()\r\n\r\n name = self.game.getTile(self.game.getCurrentPlayer()[\"location\"])[\"name\"]\r\n\r\n Label(self._buyWindow, text=f\"Buy {name}?\").grid(row=0, column=0, columnspan=2)\r\n Button(self._buyWindow, text=\"Yes\", command=self._buy).grid(row=1, column=0)\r\n Button(self._buyWindow, text=\"No\", command=lambda: self._auction(name)).grid(row=1, column=1)", "def unitf_disp(form, db, param, conn, message = '', inp_unit = 0):\n nUnit = int(form.getfirst('unit_id', inp_unit))\n # Counter for current members, scouts and role members\n nCurr_memb = 0\n\n menu_itm = 992\n # If not form unit id, take the last one from the connection rec\n if nUnit == 0:\n # Take it from the home id rather than last id if possible\n if conn.home_level == 'U' and conn.home_id != 0:\n nUnit = conn.home_id\n menu_itm = 1\n else:\n nUnit = conn.last_level_id\n else:\n #check if this is the users home page\n if conn.home_id == nUnit and conn.home_level == 'U':\n menu_itm = 1\n\n unit = dbobj.unitrec(db, nUnit)\n if not unit.found:\n # go to top of browse tree\n app_error(form, param, conn, message = 'Invalid unit id')\n return \n\n group = dbobj.grouprec(db, unit.group_id)\n\n conn.last_level = 'U'\n conn.last_level_id = unit.unit_id\n conn.update()\n\n security = dbobj.security(db, conn.scout_id, 'U', unit.unit_id)\n\n # Define any input parameters\n cStatus = form.getfirst('disp_status', 'C')\n\n jw_header(param, conn, menu_item=menu_itm)\n if message != '':\n print webproc.tag('H2', message)\n\n\n #define outer table, hold unit details at the top, members on the left, & adult roles on the right\n outtable = webproc.table(width='100%', cellpadding = param.ot_cellpad, cellspacing = param.ot_cellspc, border = param.ot_brdr)\n\n # top row if for Unit details\n\n table = webproc.table(width='100%', cellpadding = param.it_cellpad, cellspacing = param.it_cellspc, border = param.it_brdr)\n table.add_row().add_item(webproc.tag('H2', unit.name))\n table.last_row().add_item('Group: ' + group.name, align='CENTRE')\n item = table.last_row().add_item(webproc.jbutton('Up to group - ' + group.name,\\\n \"scout.py?jw_action=groupf_disp&group_id=\" + str(unit.group_id), need_form=1), align = 'right')\n if security.edit_unit:\n item.data += '&nbsp' + webproc.jbutton('Collective awards', 'award.py?jw_action=unitf1_achieve&unit_id=%d' % unit.unit_id)\n \n table.add_row().add_item('<B>Meeting time :</B> ' + pr_str(unit.meet_time))\n table.last_row().add_item('Section: ' + unit.sect_name, align='CENTRE')\n item = table.last_row().add_item('', align='RIGHT')\n if security.edit_unit:\n item.data = webproc.jbutton('Extract member details', 'office.py?jw_action=extractf_unit&unit_id=%d' % unit.unit_id, need_form=0)\n if can_email(db, unit, group, conn):\n item.data += '&nbsp&nbsp&nbsp' + webproc.jbutton('Email unit', 'office.py?jw_action=emailf_unit&unit_id=%d' % unit.unit_id, need_form=0)\n if conn.superuser:\n item.data += '&nbsp&nbsp&nbsp' + webproc.jbutton('Edit unit details', 'heir_acd.py?jw_action=unitf_edit&unit_id=%d' % unit.unit_id, need_form=0)\n\n outtable.add_row().add_item(table.pr_table(), colspan = '2')\n\n # Scout unit members display\n unit.scout_list()\n\n table = webproc.table(cellpadding = '0', cellspacing = '0', width='100%', border = '0')\n\n # top row if for Unit details\n\n if cStatus == 'C':\n table.add_row().add_item(webproc.tag('H3', 'UNIT Members (current)'))\n table.last_row().add_item(webproc.tag('DIV', webproc.jbutton('(Display all including inactive)',\\\n 'scout.py?jw_action=unitf_disp&disp_status=A&unit_id=%d' % unit.unit_id, need_form=0),\\\n 'ALIGN=\"RIGHT\"'))\n else:\n table.add_row().add_item(webproc.tag('H3', 'UNIT Members (including inactive)'))\n table.last_row().add_item( webproc.tag('DIV', webproc.jbutton('(Display current only)',\\\n 'scout.py?jw_action=unitf_disp&disp_status=C&unit_id=%d' % unit.unit_id, need_form = 0),\\\n 'ALIGN=\"RIGHT\"'))\n\n if security.edit_unit:\n table.last_row().add_item(webproc.jbutton('Add new member', 'scout.py?jw_action=scoutf_add&unit_id=%d'\\\n % unit.unit_id, need_form=0), align = 'RIGHT')\n\n for s in unit.scoutlist:\n table.add_row().add_item(webproc.tag('A', string.strip(s.forename) + ' ' + string.strip(s.surname),\\\n 'href=scout.py?jw_action=scoutf_disp&scout_id=%d' % s.scout_id), colspan = '2')\n item = table.last_row().add_item('%d years & %d months' % (s.years, s.months), align = 'right')\n if s.age > unit.end_age:\n item.styleclass = 'error'\n nCurr_memb += 1\n\n if cStatus == 'A':\n table.add_row().add_item(webproc.tag('H4', 'Inactive'))\n group = dbobj.grouprec(db, unit.group_id)\n grscouts = group.all_scout_list()\n for s in grscouts:\n if s.status == 'L':\n if (s.age + 0.5) > unit.start_age and (s.age - 0.5) < unit.end_age:\n table.add_row().add_item(webproc.tag('A', string.strip(s.forename) + ' ' + string.strip(s.surname),\\\n 'href=scout.py?jw_action=scoutf_disp&scout_id=' + str(s.scout_id)))\n table.last_row().add_item(webproc.tag('A', 'Renew membership',\\\n 'href=scout.py?jw_action=scoutf_renew&scout_id=%d&unit_id=%d' % (s.scout_id, unit.unit_id)))\n item = table.last_row().add_item('%d years & %d months' % (s.years, s.months), align = 'right')\n\n outtable.add_row().add_item(table.pr_table())\n\n # Placeholder for role information\n table = webproc.table(cellpadding = '0', cellspacing = '0', width='100%', border = '0')\n table.add_row().add_item(webproc.tag('H3', 'Adult roles'))\n if security.edit_unit:\n table.last_row().add_item(webproc.jbutton('Add new', 'office.py?jw_action=rolef_add1&type=U&type_id=%d'\\\n % unit.unit_id, need_form=0), align='RIGHT')\n unit.role_list()\n for r in unit.rolelist:\n name = r.forename + ' '\n if r.initials != '':\n name += r.initials + ' '\n name += r.surname\n if security.edit_unit:\n name = webproc.tag('A', name, 'href=office.py?jw_action=rolef_add2&type=U&type_id=' + str(unit.unit_id) + '&scout_id=' + str(r.scout_id))\n table.add_row().add_item(name)\n table.last_row().add_item(r.title)\n if security.edit_unit:\n table.last_row().add_item(webproc.tag('A', 'Del', 'href=office.py?jw_action=rolep_del&type=U&type_id=' + str(unit.unit_id) + '&scout_id=' + str(r.scout_id) + ' class=small_disp'))\n nCurr_memb += 1\n\n\n # Print the roles table\n outitem = outtable.last_row().add_item(table.pr_table())\n outitem.valign = 'TOP'\n\n # Now print the table\n print outtable.pr_table()\n\n webproc.form_footer()\n\n if nCurr_memb != unit.curr_memb:\n unit.curr_memb = nCurr_memb\n unit.update()", "def show_board(self) -> None:\n pygame.display.set_caption(\"Qwixx Board\")\n if self.is_turn_invalid:\n self.screen.fill(PyGameUi.red_vibrant)\n else:\n self.screen.fill(PyGameUi.white)\n\n font = pygame.font.SysFont('Comic Sans MS', PyGameUi.font_numbers_size, True, False)\n lock = pygame.font.SysFont('Comic Sans MS', PyGameUi.font_lock_size, True, False)\n\n self._render_colored_rows(font, lock)\n self._render_penalties(font)\n self._render_skip_button(font)\n self._render_dice(font)\n self._show_player_mode(font)\n\n clock = pygame.time.Clock()\n clock.tick(60)\n pygame.display.flip()", "def openHardSolutionWindow():\n openHardSolutionWindow = tk.Toplevel(window)\n openHardSolutionWindow.title(\"Sudoku - Hard Solution\")\n openHardSolutionWindow.configure(bg='black')\n displayBoardGUI(boardHard, openHardSolutionWindow)", "def showWindow(self, sender):", "def turn_display(self):\n myfont = pygame.font.SysFont(\"arial\", 48)\n turndisp = myfont.render(\"Player %s's Turn\"%(self.model.turn%len(self.model.teams)+1), 1, (0,0,0))\n self.screen.blit(turndisp,(10,10))", "def show_instructions(self, event):\n self.controller.show_frame(TkInstructions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a class from a package.module.class string
def create_class(pkg_class: str): splits = pkg_class.split(".") clfclass = splits[-1] pkg_module = splits[:-1] class_ = getattr(import_module(".".join(pkg_module)), clfclass) return class_
[ "def get_class_from_string(class_name: str) -> Type[Any]:\n\n parts = class_name.split(\".\")\n module_name = \".\".join(parts[:-1])\n cls: Type[Any] = __import__(module_name)\n for comp in parts[1:]:\n cls = getattr(cls, comp)\n return cls", "def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n this_class = None\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class", "def get_class(class_name):\n sub_mods = class_name.split(sep=\".\")\n module = __import__(\".\".join(sub_mods[:-1]), fromlist=sub_mods[-1])\n class_module = getattr(module, sub_mods[-1])\n return class_module", "def parse_class_name(string):\n\n class_parts = string.split(\".\")\n class_name = class_parts[-1]\n\n # TODO should we not assume that everything is from neuralmonkey?\n module_name = \".\".join([\"neuralmonkey\"] + class_parts[:-1])\n\n try:\n module = importlib.import_module(module_name)\n except ImportError as exc:\n # if the problem is really importing the module\n if exc.name == module_name:\n raise Exception((\"Interpretation '{}' as type name, module '{}' \"\n \"does not exist. Did you mean file './{}'? \\n{}\")\n .format(string, module_name, string, exc)) from None\n else:\n raise\n\n try:\n clazz = getattr(module, class_name)\n except AttributeError as exc:\n raise Exception((\"Interpretation '{}' as type name, class '{}' \"\n \"does not exist. Did you mean file './{}'? \\n{}\")\n .format(string, class_name, string, exc))\n return clazz", "def import_class(name: str) -> type:\n module_path, class_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_path)\n return getattr(module, class_name)", "def instantiate(classname, *args, **kwargs):\n matched = re.match('(.*)\\.(\\w+)', classname)\n if matched is None:\n raise Exception('can instantiate only class with packages: %s' % classname)\n module = importlib.import_module(matched.groups()[0])\n return getattr(module, matched.groups()[1])(*args, **kwargs)", "def get_module_class(name, module):\n try:\n name_module = import_module(module)\n name_class = getattr(name_module, name)\n except (ImportError, AttributeError) as error:\n raise DaosTestError(\n \"Invalid '{}' class name for {}: {}\".format(\n name, module, error)) from error\n return name_class", "def processClass(processname, path=None, prefix=None, extention=None):\n fileName = findCdtFile(processname, path=path, prefix=prefix, \n extention=extention)\n pycode = Cdt(fileName).parse2pycode()\n\n exec pycode\n # the pycode should contain the variable proc\n # witch is the newly created object\n # and cls for the class \n return cls", "def get_class(path):\n module, dot, cls = path.rpartition('.')\n m = importlib.import_module(module)\n return m.__getattribute__(cls)", "def make_class_ast(src):\n return python.AstTree(ast.parse(src)).classes()[0]", "def load_class(mod, name):\n mod = __import__(mod, globals(), locals(), [name], 0)\n return getattr(mod, name)", "def create_module(module_string):\n module_obj = None\n try:\n module_obj = __import__(module_string, globals(),\n locals(), fromlist=[''])\n except Exception as e:\n LOGGER.error(\"cannot create module from '%s'\" % module_string)\n raise e\n\n return module_obj", "def get_type_from_module_path(module_path: str):\n module_name, class_name = split_module_path(module_path=module_path)\n\n module = importlib.import_module(module_name)\n class_type = getattr(module, class_name)\n return class_type", "def uri_to_class(uri):\n return type(str(uri_to_classname(uri)), (), {'uri': uri})", "def class_name(type_str):\n return _CLASS_NAME.findall(type_str)[0]", "def instantiate_class(args: Union[Any, Tuple[Any, ...]], init: Dict[str, Any]) -> Any:\n kwargs = init.get(\"init_args\", {})\n if not isinstance(args, tuple):\n args = (args,)\n class_module, class_name = init[\"class_path\"].rsplit(\".\", 1)\n module = __import__(class_module, fromlist=[class_name])\n args_class = getattr(module, class_name)\n return args_class(*args, **kwargs)", "def import_class_by_fullname(fullname):\n split = fullname.split('.')\n module_name = '.'.join(split[:-1])\n class_name = split[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)", "def load_module(class_name: str) -> Type:\n # Get a lookup for all classes in `graphnet`\n import graphnet.data\n import graphnet.models\n import graphnet.training\n\n namespace_classes = get_all_grapnet_classes(\n graphnet.data, graphnet.models, graphnet.training\n )\n return namespace_classes[class_name]", "def _mod_name_to_cls_name(modname):\n return \"\".join(map(lambda x: x.capitalize(), modname.split(\"_\")))", "def import_class_by_path(path: str):\n paths = path.split('.')\n path = \".\".join(paths[:-1])\n class_name = paths[-1]\n mod = __import__(path, fromlist=[class_name])\n mod = getattr(mod, class_name)\n return mod" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a function from a package.module.function string
def create_function(pkg_func: list): splits = pkg_func.split(".") pkg_module = ".".join(splits[:-1]) cb_fname = splits[-1] pkg_module = __import__(pkg_module, fromlist=[cb_fname]) function_ = getattr(pkg_module, cb_fname) return function_
[ "def _get_function_from_str(path: str) -> Callable:\n module_name, _, function_name = path.rpartition(\".\")\n module = importlib.import_module(module_name)\n function = getattr(module, function_name)\n return function", "def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod_func(f_name)\n if not mod_name and not func_name:\n raise AttributeError(\n \"%s couldn't be converted to a module or function name\" % f_name)\n\n module = __import__(mod_name)\n\n if not func_name:\n func_name = mod_name # The common case is an eponymous class\n\n return getattr(module, func_name)\n\n except (ImportError, AttributeError) as exc:\n raise RuntimeError(\"Unable to create a callable object for '%s': %s\" %\n (f_name, exc))", "def import_function(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n fn = getattr(sys.modules[mod_str], class_str)\n if not callable(fn):\n msg = '{0} is not callable'\n LOG.error(msg)\n raise TypeError(msg)\n except (ValueError, AttributeError):\n msg = 'Method or function {0} cannot be found.'.format(import_str)\n err_details = traceback.format_exception(*sys.exc_info())\n LOG.error(msg + ' Details: (%s)'.format(err_details))\n raise ImportError(msg)\n except ImportError:\n msg = 'Module {0} cannot be found.'.format(import_str)\n err_details = traceback.format_exception(*sys.exc_info())\n LOG.error(msg + ' Details: (%s)'.format(err_details))\n raise\n return fn", "def function_lookup(pymod_path):\n module_name, func_name = pymod_path.rsplit('.', 1)\n module = importlib.import_module(module_name)\n shell_function = getattr(module, func_name)\n assert callable(shell_function), shell_function\n return shell_function", "def load_function(func_module_path):\n module, function = func_module_path.rsplit(\".\", 1)\n try:\n return getattr(importlib.import_module(module), function)\n except AttributeError:\n raise Exception(\"Function could not be loaded from the module path {}, \"\n \"verify that it is '.' seperated\".format(func_module_path))", "def make_function_ast(src):\n return python.AstTree(ast.parse(src)).functions()[0]", "def _make_symbol_function(handle, name, func_name):\n code, doc_str = _generate_symbol_function_code(handle, name, func_name)\n\n local = {}\n exec(code, None, local) # pylint: disable=exec-used\n symbol_function = local[func_name]\n symbol_function.__name__ = func_name\n symbol_function.__doc__ = doc_str\n symbol_function.__module__ = 'mxnet.symbol'\n return symbol_function", "def function_from_source(source, globals_=None):\n\n module = ast.parse(unindent(source))\n ast.fix_missing_locations(module)\n\n for stmt in module.body:\n if type(stmt) == ast.FunctionDef:\n tree = stmt\n name = stmt.name\n break\n else:\n raise ValueError(\"No function definitions found in the provided source\")\n\n code_object = compile(module, '<nofile>', 'exec', dont_inherit=True)\n locals_ = {}\n eval(code_object, globals_, locals_)\n\n function_obj = locals_[name]\n function_obj._peval_source = astunparse.unparse(tree)\n\n return Function.from_object(function_obj)", "def make_function(text):\n\n try:\n exec 'f = lambda x: ' + text\n 1+f(2.0) ## test to see if there are any errors in the definition\n except ZeroDivisionError: ## ignore zero division errors\n pass\n except:\n raise FunctionError()\n return f", "def process_python_function(self):\r\n exec(self.python_text)\r\n self.func = locals()[self.function_name]\r\n return self.func", "def expand_functions(raw_str):\n def escape(a):\n \"\"\"Escape user input.\"\"\"\n if a[0] == \"'\" and a[-1] == \"'\":\n a = '\"' + a[1:-1] + '\"'\n\n if a[0] == '\"' and a[-1] == '\"' and len(a) > 1:\n r = a[1:-1].replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n return a[0] + r + a[-1]\n\n elif re.match(\"\\d+(\\.\\d+)?\", a): # is number\n return a\n\n else:\n raise ValueError\n\n def callf(match):\n \"\"\"Call the matched function and inject its return value.\"\"\"\n fun_name = match.group('fun')\n args = match.group('args')\n args_separator = settings.get('funargs_separator')\n\n if args:\n argv = [escape(a.strip())\n for a in args.split(args_separator)]\n else:\n argv = []\n\n if fun_name:\n if '1' == vim.eval(\"exists('*{0}')\".format(fun_name)):\n try:\n raw = vim.eval(\"call('{0}',[{1}])\".format(\n fun_name, ','.join(argv)))\n return raw.replace('\\\\', '\\\\\\\\')\n\n except vim.error:\n pass\n else:\n raise NameError # unkwown function\n\n return re.sub('#{(?P<fun>\\w*)(\\((?P<args>.*)\\))?}', callf, raw_str)", "def conv_str_to_func(func_name):\n name_func_dict = {\"None\": None,\n \"str\": str,\n \"int\": int,\n \"float\": float,\n \"bool\": bool,\n }\n if func_name is None:\n return func_name\n elif func_name in name_func_dict:\n return name_func_dict[func_name]\n else:\n raise InvalidDataError(\"Invalid type entry '{}'. Valid options are \")", "def _load_function(self, func_path):\n splt = func_path.split(\".\")\n assert len(splt) == 2, \"Function needs to be given in format file.function\"\n fname, func_name = splt\n fpath, pname, desc = imp.find_module(fname)\n mod = imp.load_module(fname, fpath, pname, desc)\n func = getattr(mod, func_name)\n return func", "def func_ref_to_import(func):\n return f\"{getmodule(func).__name__}.{func.__name__}\"", "def get_function_module(f):\n module = f.__module__\n if module == '__main__':\n import __main__\n module = os.path.splitext(__main__.__file__)[0]\n return module", "def createFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def visit_create_function(element, compiler, **kw):\n func = element.function\n opt_or_replace = 'OR REPLACE' if element.or_replace else None\n volatility = func.volatility.upper()\n strictness = \"STRICT\" if func.strict else None\n leakproof = \"LEAKPROOF\" if func.leakproof else None\n quoted_definition = \"${quote_tag}$\\n{definition}\\n${quote_tag}$\".format(\n quote_tag=func.quote_tag, definition=func.definition)\n\n function_name = func.build_quoted_identifier(quoter=compiler.preparer.quote)\n return _join_tokens(\n \"CREATE\", opt_or_replace, \"FUNCTION\", function_name, \"RETURNS\",\n func.rtype, volatility, strictness, leakproof, \"LANGUAGE\", func.language,\n \"AS\", quoted_definition,\n )", "def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))", "def _parse_func(self, tokens):\n\n params = tokens[0]\n func_name = params[0]\n if func_name in self._pvars and isinstance(\n self._pvars[func_name],\n types.FunctionType):\n func = self._pvars[func_name]\n else:\n func = str_to_attr(params[0])\n return func(*params[1])", "def handleFunction(functionOpening, line):\n\n functionEndIndex = line.find(FUNCTION_CLOSE, functionOpening)\n functionText = line[functionOpening + len(FUNCTION_OPEN):functionEndIndex]\n\n return line[:functionOpening] + \\\n eval(functionText) + \\\n line[functionEndIndex + len(FUNCTION_CLOSE):]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads the current mission and returns it in a list. It is used in save_mission() to get the file information to save.
def download_mission(): print(" Download mission from vehicle") missionlist = list() cmds = vehicle.commands cmds.download() cmds.wait_ready() for cmd in cmds: missionlist.append(cmd) return missionlist
[ "def download_mission():\n # print \" Download mission from vehicle\"\n missionlist=[]\n cmd_list = vehicle.commands\n cmd_list.download()\n cmd_list.wait_ready()\n for cmd in cmd_list:\n missionlist.append(cmd)\n return missionlist", "def get_latest_mission_from_github():\n if core.CTX.dcs_auto_mission:\n LOGGER.debug('getting latest mission from Github')\n commands.DCS.block_start('loading mission')\n if DCSConfig.DCS_AUTO_MISSION_GH_OWNER() and DCSConfig.DCS_AUTO_MISSION_GH_REPO():\n LOGGER.debug('looking for newer mission file')\n latest_version, asset_name, download_url = utils.get_latest_release(\n DCSConfig.DCS_AUTO_MISSION_GH_OWNER(), DCSConfig.DCS_AUTO_MISSION_GH_REPO()\n )\n LOGGER.debug('latest release: %s', latest_version)\n local_file = MissionPath(Path(_get_mission_folder(), asset_name))\n if not local_file:\n LOGGER.info('downloading new mission: %s', asset_name)\n req = requests.get(download_url)\n if req.ok:\n local_file.path.write_bytes(req.content)\n local_file.set_as_active()\n else:\n LOGGER.error('failed to download latest mission')\n else:\n LOGGER.warning('no config values given for [auto mission]')\n commands.DCS.unblock_start('loading mission')\n else:\n LOGGER.debug('skipping mission update')", "def getMission (self):\n return self.mission.getValue ()", "def readmission(aFileName):\r\n print(\"\\nReading mission from file: %s\" % aFileName)\r\n cmds = vehicle.commands\r\n missionlist=[]\r\n with open(aFileName) as f:\r\n for i, line in enumerate(f):\r\n if i==0:\r\n if not line.startswith('QGC WPL 110'):\r\n raise Exception('File is not supported WP version')\r\n else:\r\n linearray=line.split('\\t')\r\n ln_index=int(linearray[0])\r\n ln_currentwp=int(linearray[1])\r\n ln_frame=int(linearray[2])\r\n ln_command=int(linearray[3])\r\n ln_param1=float(linearray[4])\r\n ln_param2=float(linearray[5])\r\n ln_param3=float(linearray[6])\r\n ln_param4=float(linearray[7])\r\n ln_param5=float(linearray[8])\r\n ln_param6=float(linearray[9])\r\n ln_param7=float(linearray[10])\r\n ln_autocontinue=int(linearray[11].strip())\r\n cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)\r\n missionlist.append(cmd)\r\n return missionlist", "def readmission(aFileName):\n print (\"\\nReading mission from file: %s\" % aFileName)\n cmds = vehicle.commands\n missionlist=[]\n with open(aFileName) as f:\n for i, line in enumerate(f):\n if i==0:\n if not line.startswith('QGC WPL 110'):\n raise Exception('File is not supported WP version')\n else:\n linearray=line.split('\\t')\n ln_index=int(linearray[0])\n ln_currentwp=int(linearray[1])\n ln_frame=int(linearray[2])\n ln_command=int(linearray[3])\n ln_param1=float(linearray[4])\n ln_param2=float(linearray[5])\n ln_param3=float(linearray[6])\n ln_param4=float(linearray[7])\n ln_param5=float(linearray[8])\n ln_param6=float(linearray[9])\n ln_param7=float(linearray[10])\n ln_autocontinue=int(linearray[11].strip())\n # pdb.set_trace()\n cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)\n missionlist.append(cmd)\n return missionlist", "def download_list(self):\n\n students = []\n\n #############\n # COMPLETAR #\n #############\n\n return students", "def mission(self):\n try:\n return self.game.missions[self.player_id]\n except AttributeError:\n raise ValueError('Cannot access mission: player is unassigned.')", "def read_mission_info(mission=None):\n curdir = os.path.abspath(os.path.dirname(__file__))\n fname = os.path.join(curdir, \"datasets\", \"xselect.mdb\")\n\n # If HEADAS is defined, search for the most up-to-date version of the\n # mission database\n if os.getenv(\"HEADAS\"):\n hea_fname = os.path.join(os.getenv(\"HEADAS\"), \"bin\", \"xselect.mdb\")\n if os.path.exists(hea_fname):\n fname = hea_fname\n if mission is not None:\n mission = mission.lower()\n\n db = {}\n with open(fname) as fobj:\n for line in fobj.readlines():\n line = line.strip()\n if mission is not None and not line.lower().startswith(mission):\n continue\n if line.startswith(\"!\") or line == \"\":\n continue\n allvals = line.split()\n string = allvals[0]\n value = allvals[1:]\n if len(value) == 1:\n value = value[0]\n\n data = string.split(\":\")[:]\n if mission is None:\n if data[0] not in db:\n db[data[0]] = {}\n previous_db_step = db[data[0]]\n else:\n previous_db_step = db\n data = data[1:]\n for key in data[:-1]:\n if key not in previous_db_step:\n previous_db_step[key] = {}\n previous_db_step = previous_db_step[key]\n previous_db_step[data[-1]] = value\n return _patch_mission_info(db, mission)", "def save_mission(mission, aFileName):\n print(\"\\nSave mission from Vehicle to file: %s\" % aFileName) \n \"\"\"\n #Download mission from vehicle\n missionlist = download_mission()\n \"\"\"\n #Add file-format information\n output='QGC WPL 110\\n'\n\n # とりあえずhome_locatrionは保存しない。\n '''\n #Add home location as 0th waypoint\n home = vehicle.home_location\n output+=\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (0,1,0,16,0,0,0,0,home.lat,home.lon,home.alt,1)\n '''\n\n #Add commands\n for cmd in mission:\n commandline=\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)\n output+=commandline\n with open(aFileName, 'w') as file_:\n print(\" Write mission to file\")\n file_.write(output)", "def upload_mission(aFileName):\n #Read mission from file\n missionlist = readmission(aFileName)\n \n # print \"\\nUpload mission from a file: %s\" % import_mission_filename\n #Clear existing mission from vehicle\n # print ' Clear mission'\n cmds = vehicle.commands\n cmds.download()\n cmds.wait_ready()\n cmds.clear()\n #Add new mission to vehicle\n for command in missionlist:\n cmds.add(command)\n # print ' Upload mission'\n vehicle.commands.upload()\n\n return missionlist", "def blank_mission():\n return {\n \"@id\": None, # 'subject' of the RDF triple: in this doc CHRONOS_URL % (\"missions\", <slug>)\n \"skos:prefLabel\": None, # full name of mission: \"Apollo 11\"\n \"@type\": ONTOLOGY_URL % \"chronos\" + \"mission\", # type of the document, in this case chronos:mission\n \"rdf:type\": [], # rdf types as found in wikipedia\n \"chronos:slug\": None, # if mission in wikipedia, this is the wikipedia title. Else is None\n \"chronos:missionEra\": None, # string: present, apst, future, concept\n \"chronos:group\": \"missions\", # group it belongs in the database\n \"chronos:oldId\": None, # id it had in the old db, for legacy purpose\n \"chronos:payload\": [], # list of payloads (now strings, in future documents)\n \"owl:sameAs\": [], # references to other URI with the same meaning\n \"chronos:imageUrl\": { # url of an image of mission\n \"@value\": None,\n \"@type\": \"https://schema.org/URL\"\n },\n \"schema:url\": { # url of the official mission page\n \"@value\": None,\n \"@type\": \"https://schema.org/URL\"\n },\n \"chronos:relTarget\": [] # bodies the mission visited\n\n }", "def _load_mission(self, **kwargs):\n\n self.mission_spec = MissionSpec(self._spec_path)\n\n self.__draw_hallways()\n self.__draw_rooms()\n self.__draw_levers_and_doors()\n self.__draw_wires()\n self.__draw_goal()\n\n return MalmoPython.MissionSpec(str(self.mission_spec), True)", "def get_current_downloads(program):\n downloads = []\n\n stable_release, testing_release = get_latest_releases(program)\n\n stable_downloads = Download.objects.filter(release=stable_release)\n\n downloads.append((stable_release, stable_downloads))\n\n if testing_release is not None:\n testing_downloads = Download.objects.filter(release=testing_release)\n downloads.append((testing_release, testing_downloads))\n\n return downloads", "def list_available_missions():\n count = 1\n for file in _get_mission_folder().glob('*.miz'):\n yield count, file\n count += 1", "def get_mission(self, mission_id):\n\n return Mission(self.missions[mission_id], mission_id)", "def getCurrentMission(self) -> Optional[Mission]:\n argosMission = self.argosController.getCurrentMission()\n if argosMission is not None:\n return argosMission\n return self.crazyradioController.getCurrentMission()", "def download_mission_from_discord(discord_attachment,\n overwrite: bool = False,\n load: bool = False,\n force: bool = False):\n url = discord_attachment['url']\n size = discord_attachment['size']\n filename = discord_attachment['filename']\n local_file = MissionPath(Path(_get_mission_folder(), filename))\n\n overwriting = ''\n if local_file:\n if overwrite:\n overwriting = ' (replacing existing file)'\n else:\n LOGGER.warning('this mission already exists: %s\\nuse \"overwrite\" to replace it', local_file.path)\n return\n\n LOGGER.info('downloading: %s (%s) %s',\n filename,\n humanize.naturalsize(size),\n overwriting,\n )\n with requests.get(url) as response:\n local_file.path.write_bytes(response.content)\n\n if load:\n if commands.DCS.there_are_connected_players() and not force:\n LOGGER.error('there are connected players; cannot restart the server now (use \"force\" to kill anyway)')\n return\n LOGGER.info('restarting the server with this mission')\n local_file.set_as_active()\n commands.DCS.restart(force=force)\n else:\n LOGGER.info('download successful, mission is now available')", "def dl_sub(page):\n # start_time = time.time()\n soup = scrape_page(page)\n div = soup.find(\"div\", {\"class\": \"download\"})\n down_link = \"https://subscene.com\" + div.find(\"a\").get(\"href\")\n r = requests.get(down_link, stream=True)\n filelist = []\n for found_sub in re.findall(\n \"filename=(.+)\", r.headers[\"content-disposition\"]\n ):\n with open(found_sub.replace(\"-\", \" \"), \"wb\") as f:\n for chunk in r.iter_content(chunk_size=150):\n if chunk:\n f.write(chunk)\n filelist = zip_extractor(found_sub.replace(\"-\", \" \"))\n print(\n \"Subtitle ({}) - Downloaded\\nList of files zipped: {}\".format(\n found_sub.replace(\"-\", \" \").capitalize(), filelist\n )\n )\n return filelist\n # print(\"--- download_sub took %s seconds ---\" % (time.time() - start_time))", "def get_missions(where = '',\n\t\t\t\torderby = 'time_posted DESC',\n\t\t\t\tstart = 0,\n\t\t\t\tlimit = 0):\n\t\n\tmission_order = []\n\tmission_found = {}\n\t\n\tquery = \"SELECT * FROM missions\"\n\t\n\t# Where\n\tif where != '': query += \" WHERE %s\" % where\n\t\n\t# Order by\n\tif orderby != '': query += \" ORDER BY %s\" % orderby\n\t\n\t# Limit stuff\n\tif start > 0 and limit > 0: query += \" LIMIT %s, %s\" % (start, limit)\n\tif start > 0 and limit < 1: query += \" LIMIT 0, %s\" % (limit)\n\tif start < 1 and limit > 0: query += \" LIMIT %s\" % (limit)\n\t\n\ttry:\t database.cursor.execute(query)\n\texcept Exception as e:\n\t\traise Exception(\"Database error: %s\\nQuery: %s\" % (str(e.args[0]).replace(\"\\n\",\"\"), query))\n\tfor row in database.cursor:\n\t\tmission_order.append(row[\"id\"])\n\t\tmission_found[row[\"id\"]] = mission.Mission(row)\n\t\n\treturn mission_order, mission_found" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict (that must contain a "phrase", "answer") and converts it to a BooleanQA format
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None): if 'context' not in example: example['context'] = '' if 'id' not in example: example['id'] = self.create_qid(example) if do_print: print('a:%s d1:%s d2:%s || Q:%s' % (example['phrase'], example['answer'])) if append_to_list is not None: append_to_list.append(example) else: self.artiset_data.append(example)
[ "def build_examples(to_predict):\n examples = []\n\n for row in to_predict:\n context = row[\"context\"]\n for qa in row[\"qas\"]:\n qa[\"answers\"] = [{\"text\": \" \", \"answer_start\": 0}]\n qa[\"is_impossible\"] = False\n example = {\"context\": context, \"qas\": row[\"qas\"]}\n examples.append(example)\n\n return examples", "def create_examples_albert(examples, tokenizer):\n example = T.valmap(T.get(0), examples)\n # substitute mask with <mask>\n\n # get option ids\n option_encodings = _get_option_encodings(example, tokenizer, True)\n option_input_ids = [o.input_ids[0] for o in option_encodings]\n\n # substitute [MASK]\n example['question'] = re.sub(r'( \\[MASK\\])|(\\[MASK\\])', tokenizer.mask_token,\n example['question'])\n\n # format text input\n text_input = '{context} {question}'.format_map(example)\n\n # get BatchEncoding\n instance = tokenizer(text=text_input, padding='do_not_pad',\n add_special_tokens=True)\n # locate pad token\n target_tok_idx = try_get_index(instance.tokens(), tokenizer.mask_token)\n\n # add example-general info\n instance['example_idx'] = example['example_idx']\n instance['option_idx'] = list(range(4))\n instance['target_tok_idx'] = target_tok_idx\n instance['option_input_ids'] = option_input_ids\n\n return {k: [instance[k]] for k in instance.keys()}", "def tqa_map(ex):\n return {\n 'inputs':\n tf.strings.join(\n [prefix, 'question:', ex['question']], separator=' '),\n 'targets': ex['answer']['value'],\n 'answers': ex['answer']['aliases'],\n }", "def convert_squad_examples(record, is_training):\n example_id = record[0]\n qas_id = record[1]\n question_text = record[2]\n paragraph_text = record[3]\n orig_answer_text = record[4][0] if record[4] else ''\n answer_offset = record[5][0] if record[5] else ''\n is_impossible = record[6] if len(record) == 7 else False\n\n answer_length = len(orig_answer_text)\n doc_tokens = []\n\n char_to_word_offset = []\n prev_is_whitespace = True\n\n for c in paragraph_text:\n if str.isspace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n if not is_training:\n start_position = -1\n end_position = -1\n else:\n start_position = char_to_word_offset[answer_offset] if not is_impossible else -1\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1] if not is_impossible else -1\n answer_offset = -1 if is_impossible else answer_offset\n example = SquadExample(\n qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text,\n doc_tokens=doc_tokens, example_id=example_id, orig_answer_text=orig_answer_text,\n start_position=start_position, end_position=end_position, start_offset=answer_offset,\n end_offset=answer_offset + len(orig_answer_text) - 1, is_impossible=is_impossible)\n return example", "def format_question(question):\n correct = '' # check if this is the right way to handle a potential exception.\n answers = question[\"answers\"]\n random.shuffle(answers)\n human_formatted = f'{question[\"q\"]}\\n'\n machine_formatted = f'MC\\t{question[\"q\"]}\\t'\n for i, answer in enumerate(answers):\n machine_formatted += f'{answer[0]} {answer[1]}\\t'\n human_formatted += f' {string.ascii_lowercase[i]}. {answer[0]}\\n'\n if answer[1] == \"correct\":\n correct = string.ascii_lowercase[i]\n return machine_formatted, human_formatted, correct", "def trivia_qa_open(\n dataset,\n prefix='trivia_qa'\n ):\n def tqa_map(ex):\n \"\"\"Map TriviaQA example to text-to-text example.\"\"\"\n return {\n 'inputs':\n tf.strings.join(\n [prefix, 'question:', ex['question']], separator=' '),\n 'targets': ex['answer']['value'],\n 'answers': ex['answer']['aliases'],\n }\n\n return dataset.map(tqa_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "def _format_failing_examples(\n self,\n inputs: Tuple,\n pred: int,\n conf: Union[np.array, np.ndarray],\n label: Optional[int] = None,\n *args,\n **kwargs,\n ):\n labels = {\n self._entails: \"Entails\",\n self._contradicts: \"Contradicts\",\n self._neutral: \"Neutral\",\n }\n ret = \"Premise: %s\\nHypothesis: %s\" % (inputs[0], inputs[1])\n if label is not None:\n ret += \"\\nOriginal: %s\" % labels[label]\n ret += \"\\nPrediction: Entails (%.1f), Contradicts (%.1f), Neutral (%.1f)\" % (\n conf[self._entails],\n conf[self._contradicts],\n conf[self._neutral],\n )\n\n return ret", "def qa(question, captions):\n text = caption_sections_to_text(captions)\n \n tokenizer = BertTokenizer.from_pretrained(\n 'deepset/bert-large-uncased-whole-word-masking-squad2')\n\n encoding = tokenizer(question, text, return_tensors=\"pt\")\n\n if len(encoding['input_ids'][0]) < 512:\n model = BertForQuestionAnswering.from_pretrained(\n 'deepset/bert-large-uncased-whole-word-masking-squad2')\n print('Using BERT original')\n else:\n tokenizer = LongformerTokenizer.from_pretrained(\n \"allenai/longformer-large-4096-finetuned-triviaqa\")\n model = LongformerForQuestionAnswering.from_pretrained(\n \"allenai/longformer-large-4096-finetuned-triviaqa\")\n print('Using Longformer')\n\n input_ids = encoding[\"input_ids\"]\n input_dict = tokenizer(question, text, return_tensors='pt')\n outputs = model(**input_dict)\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n\n all_tokens = tokenizer.convert_ids_to_tokens(\n input_dict[\"input_ids\"].numpy()[0])\n answer_tokens = all_tokens[torch.argmax(\n start_logits):torch.argmax(end_logits)+1]\n answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens))\n\n if len(answer) == 0 or answer == '[CLS]':\n return 'No answer found.'\n return answer", "def anechoic_scenario_map_fn(\n example: dict,\n *,\n normalize_sources: bool = True,\n) -> dict:\n T = example[keys.NUM_SAMPLES][keys.OBSERVATION]\n original_source = example[keys.AUDIO_DATA][keys.ORIGINAL_SOURCE]\n offset = example[keys.OFFSET][keys.ORIGINAL_SOURCE]\n\n # In some databases (e.g., WSJ) the utterances are not mean normalized. This\n # leads to jumps when padding with zeros or concatenating recordings.\n # We mean-normalize here to eliminate these jumps\n if normalize_sources:\n original_source = [s_ - np.mean(s_) for s_ in original_source]\n\n # Scale the sources by log_weights. We have to determine the scale based on\n # the full signal (its standard deviation) and not just the cut out part\n scale = get_scale(example[keys.LOG_WEIGHTS], original_source)\n scaled_source = [s_ * scale_ for s_, scale_ in zip(original_source, scale)]\n\n # Move and pad speech source to the correct position, use sparse array\n speech_source = pad_sparse(original_source, offset, target_shape=(T,))\n speech_image = pad_sparse(scaled_source, offset, target_shape=(T,))\n\n # The mix is now simply the sum over the speech sources\n mix = sum(speech_image, np.zeros(T, dtype=speech_image[0].dtype))\n\n example[keys.AUDIO_DATA][keys.OBSERVATION] = mix\n example[keys.AUDIO_DATA][keys.SPEECH_SOURCE] = speech_source\n example[keys.AUDIO_DATA][keys.SPEECH_IMAGE] = speech_image\n\n return example", "def add_example(self, example):\n self.examples.append(example)", "def dummy_add_transcript():\n return {\n \"message\": \"AddTranscript\",\n \"format\": \"2.1\",\n \"metadata\": {\"start_time\": 0.0, \"end_time\": 2.0, \"transcript\": \"Foo\\nBar.\"},\n \"results\": [\n {\n \"type\": \"word\",\n \"start_time\": 0.0,\n \"end_time\": 1.0,\n \"alternatives\": [\n {\"content\": \"foo\", \"confidence\": 1.0, \"language\": \"en\"},\n ],\n },\n {\n \"type\": \"speaker_change\",\n \"start_time\": 1.0,\n \"end_time\": 1.0,\n \"score\": 0.8,\n },\n {\n \"type\": \"word\",\n \"start_time\": 1.0,\n \"end_time\": 2.0,\n \"alternatives\": [\n {\"content\": \"bar\", \"confidence\": 1.0, \"language\": \"en\"},\n ],\n },\n {\n \"type\": \"punctuation\",\n \"start_time\": 2.0,\n \"end_time\": 2.0,\n \"alternatives\": [{\"content\": \".\", \"confidence\": 1.0}],\n },\n ],\n }", "def test_answer_attributes() -> None:\n a = Answer(\"dogs\")\n b = Answer(True)\n c = Answer(10)\n d = Answer(['a', 'b', 'c', 'd'])\n assert a.content == \"dogs\"\n assert b.content == True\n assert c.content == 10\n assert d.content == ['a', 'b', 'c', 'd']", "def encode_answer(self, answer: str) -> Dict[str, str]:", "def add_answer(question,sas,shuffle_options=None):\n\tanswer = Answer(section_answer_sheet=sas)\n\tanswer.save()\n\t# create special answer which is bound to question's specialization\n\tspecial_question = question.get_special_question()\n\tqtype = special_question.get_qtype()\n\tif qtype==\"text\":\n\t\tspecial_answer = TextAnswer(special_question=special_question, answer=answer)\n\telif qtype==\"mcq\":\n\t\tspecial_answer = McqAnswer(special_question=special_question, answer=answer)\n\telse:\n\t\traise custom_exceptions.QuestionTypeNotImplemented\n\tspecial_answer.save()\n\treturn answer", "def test_matches_keywords(self):\n # Test case 1\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n txt = \"hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 2\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical Protein\"}]\n txt = \"Hypothetical protein\"\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 3\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 4\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 5\n txt = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 6\n txt = \"hypothetical protein\"\n keywords = [{\"Not\": True, \"Keyword\" : \"hypothetical\"},\n {\"Not\": False, \"Keyword\" : \"DNA\"},\n {\"Not\": False, \"Keyword\" : \"polymerase\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n # Test case 7\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"}]\n self.assertTrue(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 8\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": False, \"Keyword\" : \"*\"},\n {\"Not\": True, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertTrue(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 9\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))\n # Test case 10\n txt = \"hypothetical protein\"\n txt2 = \"DNA polymerase IV (EC 2.7.7.7)\"\n keywords = [{\"Not\": True, \"Keyword\" : \"*\"},\n {\"Not\": False, \"Keyword\" : \"hypothetical\"}]\n self.assertFalse(Annot_Reader.matches_keywords(txt, keywords))\n self.assertFalse(Annot_Reader.matches_keywords(txt2, keywords))", "def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):\n\n query = tokenization.convert_to_unicode(example.query)\n tokens_query = tokenizer.tokenize(query)\n self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)\n\n # title\n title = tokenization.convert_to_unicode(example.title)\n tokens_title = tokenizer.tokenize(title)\n # para\n para = tokenization.convert_to_unicode(example.para)\n tokens_para = tokenizer.tokenize(para)\n\n self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)\n\n tokens_q = []\n text_type_ids_q = []\n tokens_q.append(\"[CLS]\")\n text_type_ids_q.append(0)\n for token in tokens_query:\n tokens_q.append(token)\n text_type_ids_q.append(0)\n tokens_q.append(\"[SEP]\")\n text_type_ids_q.append(0)\n\n token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)\n position_ids_q = list(range(len(token_ids_q)))\n #f = open('tid', 'a')\n #for tid in range(len(token_ids_q)):\n # f.write(str(token_ids_q[tid]) + '\\t' + tokens_q[tid] + '\\n')\n #f.write(str(token_ids_q[tid]) + ' ')\n #f.write('\\t')\n\n ### para\n tokens_p = []\n text_type_ids_p = []\n tokens_p.append(\"[CLS]\")\n text_type_ids_p.append(0)\n\n for token in tokens_title:\n tokens_p.append(token)\n text_type_ids_p.append(0)\n tokens_p.append(\"[SEP]\")\n text_type_ids_p.append(0)\n\n for token in tokens_para:\n tokens_p.append(token)\n text_type_ids_p.append(1)\n tokens_p.append(\"[SEP]\")\n text_type_ids_p.append(1)\n\n token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)\n position_ids_p = list(range(len(token_ids_p)))\n #for tid in range(len(token_ids_p)):\n # f.write(str(token_ids_p[tid]) + '\\t' + tokens_p[tid] + '\\n')\n #f.write(str(token_ids_p[tid]) + ' ')\n #f.write('\\n')\n #f.close()\n\n if self.is_inference:\n Record = namedtuple('Record',\n ['token_ids_q', 'text_type_ids_q', 'position_ids_q', \\\n 'token_ids_p', 'text_type_ids_p', 'position_ids_p'])\n record = Record(\n token_ids_q=token_ids_q,\n text_type_ids_q=text_type_ids_q,\n position_ids_q=position_ids_q,\n token_ids_p=token_ids_p,\n text_type_ids_p=text_type_ids_p,\n position_ids_p=position_ids_p)\n else:\n if self.label_map:\n label_id = self.label_map[example.label]\n else:\n label_id = example.label\n\n Record = namedtuple('Record',\n ['token_ids_q', 'text_type_ids_q', 'position_ids_q', \\\n 'token_ids_p', 'text_type_ids_p', 'position_ids_p', \\\n 'label_id', 'qid'\n ])\n\n qid = None\n if \"qid\" in example._fields:\n qid = example.qid\n\n record = Record(\n token_ids_q=token_ids_q,\n text_type_ids_q=text_type_ids_q,\n position_ids_q=position_ids_q,\n token_ids_p=token_ids_p,\n text_type_ids_p=text_type_ids_p,\n position_ids_p=position_ids_p,\n label_id=label_id,\n qid=qid)\n return record", "def alpha(self, irc, msg, args, options, question):\n apikey = self.registryValue('apikey')\n if not apikey or apikey == \"Not set\":\n irc.reply(\"API key not set. see 'config help supybot.plugins.Wolfram.apikey'.\")\n return\n\n maxoutput = 2\n for (key, value) in options:\n if key == 'lines':\n maxoutput = value\n\n u = \"http://api.wolframalpha.com/v2/query?\"\n q = urllib.parse.urlencode({'input': question, 'appid': apikey})\n xml = urllib.request.urlopen(u + q).read()\n tree = ElementTree.fromstring(xml)\n\n if tree.attrib['success'] == \"false\":\n for results in tree.findall('.//error'):\n for err in results.findall('.//msg'):\n irc.reply(\"Error: \" + err.text)\n return\n suggestion = False\n dyms = tree.findall('.//didyoumean')\n for dym in dyms:\n if dym.text:\n suggestion = True\n irc.reply(\"Did you mean: \" + str(dym.text) + \"?\")\n if not suggestion:\n irc.reply(\"huh, I dunno, I'm still a baby AI. Wait till the singularity I guess?\")\n return\n\n found = False\n outputcount = 0\n for pod in tree.findall('.//pod'):\n title = pod.attrib['title']\n for plaintext in pod.findall('.//plaintext'):\n if plaintext.text:\n found = True\n \"\"\"if(title == \"Input interpretation\" or \n title == \"Result\" or \n title == \"Input\" or \n title == \"Exact result\" or \n title == \"Decimal approximation\"):\n \"\"\"\n if outputcount < maxoutput:\n output = plaintext.text\n output = output.replace(' | ', ': ')\n output = output.replace('\\n', ', ')\n # Skip the input interpretation if only one line out.\n if maxoutput == 1 and outputcount == 0:\n maxoutput = 2 # hack :D\n outputcount += 1\n continue\n irc.reply((\"%s: %s\" % (title, output)))\n outputcount += 1\n if not found:\n irc.reply(\"huh, I dunno, I'm still a baby AI. Wait till the singularity I guess?\")", "def process(self, training_example: dict) -> None:", "def sample_answers(y, product_set, p_idk = 0.1, p_2a = 0.3, p_3a = 0.15):\n # Get set of possible questions available in the product catalog\n question_set = set(product_set[\"PropertyDefinitionId\"].values) # faster\n \n # Get dict of (true) answers available for the target product\n quest_answer_y = algo_utils.get_answers_y(y, product_set) \n result = {}\n \n # For each question sample additional answers \n # or replace true answer by idk if necessary.\n for question in question_set:\n # Sample random number b/w 0 and 1.\n u = random.random()\n # Sample if user says idk\n if u < p_idk:\n result[question] = ['idk'] \n # Else if it is possible sample if user give additional answers.\n elif quest_answer_y[question]=='none': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]]\n elif quest_answer_y[question]=='idk': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]] \n # Giving 2 answers?\n elif u < p_idk+p_2a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=1)\n # If the drawn 2nd answer is the same, redraw one\n while (str(quest_answer_y[question]) in sample.astype(str)): \n sample = np.random.choice(possible, size=1)\n result[question] = np.append([quest_answer_y[question]], sample) \n # Giving 3 answers?\n elif u < p_idk+p_2a+p_3a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=2, replace=False)\n # If the drawn 2nd or 3rd answer is the same, redraw 2 answers\n while (str(quest_answer_y[question]) in sample.astype(str)):\n sample = np.random.choice(possible, size=2)\n result[question] = np.append([quest_answer_y[question]], sample)\n # Else keep only the true answer \n else:\n result[question] = [quest_answer_y[question]] \n return(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save_dataset() automatically saves the artiset if the config output_file contains the string _sample.jsonl it will be saved in a more readable format otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
def save_dataset(self): # Move non-required columns to metadata: artiset_data_with_metadata = [] for example in self.artiset_data: if 'metadata' not in example: new_example = {'metadata':{}} else: new_example = {'metadata': example['metadata']} new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']}) new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}}) artiset_data_with_metadata.append(new_example) self.artiset_data = artiset_data_with_metadata # splitting if len(self._split) > 0: train_inds, dev_inds, test_inds = self.split_by_columns() elif 'split' in self.examples_meta: test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index) dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index) train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index) random.seed(17) random.shuffle(train_inds) #random.shuffle(test_inds) #random.shuffle(dev_inds) test_inds = test_inds[0: self._config['test_dev_size'][0]] dev_inds = dev_inds[0:self._config['test_dev_size'][1]] train_inds = train_inds[0:self._config['max_number_of_examples']] else: inds = [i for i in range(len(self.artiset_data))] random.seed(17) random.shuffle(inds) test_inds = inds[0:self._config['test_dev_size'][0]] dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])] train_inds = inds[sum(self._config['test_dev_size']):] if self._output_file.startswith('s3://'): save_func = upload_jsonl_to_s3 elif is_path_creatable(self._output_file) and len(self._output_file) > 0: save_func = save_jsonl_to_local else: # Do nothing return if self._save_sample: if 'split' in self.examples_meta.columns: logger.info(f"size of each split:\n{self.examples_meta['split'].value_counts()}") random.seed(17) if len(self.artiset_data) > 100: self.artiset_data = random.sample(self.artiset_data,100) save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample) else: logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds))) save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds]) save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds]) save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds]) if len(self.examples_meta) > 0: save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows')) return train_inds, dev_inds, test_inds
[ "def save_datasets(self,fname='default',ftype=None):\n if not ftype: \n ftype = self.config['datasets_ftype']\n \n # Check if data are in dictionaries (sparse matrices)\n if (\n hasattr(self,'train') or \n hasattr(self,'val') or \n hasattr(self,'test')\n ): \n self.to_df()\n \n savepath = os.path.join(\n self.config['datasets_fpath'],\n f\"analysis_id={self.config['analysis_id']}\",\n \"datasets\",\n fname\n )\n \n os.makedirs(savepath, exist_ok = True)\n \n # Save datasets\n for element in [\n 'X_train',\n 'y_train',\n 'ids_train',\n 'X_val',\n 'y_val',\n 'ids_val',\n 'X_test',\n 'y_test',\n 'ids_test'\n ]:\n \n if hasattr(self,element): \n \n if ftype == 'parquet':\n pq.write_table(\n pa.Table.from_pandas(\n pd.DataFrame(getattr(self,element))\n ),\n f\"{savepath}/{element}.parquet\"\n ) \n \n if ftype == 'feather':\n feather.write_feather(\n pd.DataFrame(getattr(self,element)),\n f\"{savepath}/{element}.feather\"\n )", "def save_datasets(dataset: TrainDatasets, path_str: str, overwrite=True) -> None:\n path = Path(path_str)\n\n if overwrite:\n shutil.rmtree(path, ignore_errors=True)\n\n def dump_line(f, line):\n f.write(json.dumps(line).encode(\"utf-8\"))\n f.write(\"\\n\".encode(\"utf-8\"))\n\n (path / \"metadata\").mkdir(parents=True)\n with open(path / \"metadata/metadata.json\", \"wb\") as f:\n dump_line(f, dataset.metadata.dict())\n\n (path / \"train\").mkdir(parents=True)\n with open(path / \"train/data.json\", \"wb\") as f:\n for entry in dataset.train:\n dump_line(f, serialize_data_entry(entry))\n\n if dataset.test is not None:\n (path / \"test\").mkdir(parents=True)\n with open(path / \"test/data.json\", \"wb\") as f:\n for entry in dataset.test:\n dump_line(f, serialize_data_entry(entry))", "def _save_datasets(train, test, outdir: Path):\n out_train = outdir / 'train_cleaned.csv'\n out_test = outdir / 'test_cleaned.csv'\n flag = outdir / '.SUCCESS-clean'\n\n train.to_csv(str(out_train))\n test.to_csv(str(out_test))\n\n flag.touch()", "def save(self, filename):\n self.is_generated()\n print('Saving to', filename)\n train_set_input, train_set_label = self.train_set\n test_set_input, test_set_label = self.test_set\n np.savez_compressed(filename, \n train_set_input=train_set_input,\n train_set_label=train_set_label,\n test_set_label=test_set_label,\n test_set_input=test_set_input,\n input_interval = self.input_interval,\n prediction_interval = self.prediction_interval,\n categories = self.categories)", "def create_datasets_file(self):\n data_list = self.get_data()\n\n split_num = 80 * int(103 * 0.75)\n print(f\"split_num {split_num}\")\n # Save train and validation dataset\n filename = os.path.join(self.dataset_dir, \"train.datatxt\")\n save_dataset_list(filename, data_list[:split_num])\n print(\n f\"The dataset of the size {len(data_list[:split_num])} saved in {filename}.\"\n )\n filename = os.path.join(self.dataset_dir, \"val.datatxt\")\n save_dataset_list(filename, data_list[split_num:])\n print(\n f\"The dataset of the size {len(data_list[split_num:])} saved in {filename}.\"\n )", "def write_dataset(self, upload=False):\n scene_dataset_json_path = os.path.join(\n self.local_output_path,\n '{}_v{}.json'.format(self.scene_dataset_pb2.metadata.name, self.scene_dataset_pb2.metadata.version)\n )\n save_pbobject_as_json(self.scene_dataset_pb2, scene_dataset_json_path)\n\n # Printing SceneDataset scene counts per split (post-merging)\n logging.info('-' * 80)\n logging.info(\n 'Output SceneDataset {} has: {} train, {} val, {} test'.format(\n scene_dataset_json_path, len(self.scene_dataset_pb2.scene_splits[dataset_pb2.TRAIN].filenames),\n len(self.scene_dataset_pb2.scene_splits[dataset_pb2.VAL].filenames),\n len(self.scene_dataset_pb2.scene_splits[dataset_pb2.TEST].filenames)\n )\n )\n\n s3_path = os.path.join(\n self.scene_dataset_pb2.metadata.bucket_path.value, os.path.basename(scene_dataset_json_path)\n )\n if upload:\n s3_copy(scene_dataset_json_path, s3_path)\n\n else:\n logging.info(\n 'Upload the DGP-compliant scene dataset JSON to s3 via `aws s3 cp --acl bucket-owner-full-control {} {}`'\n .format(scene_dataset_json_path, s3_path)\n )\n return scene_dataset_json_path", "def write_dataset(datasets, output_dir, short_name, suffix=\"bio\"):\n for shard, dataset in zip(SHARDS, datasets):\n output_filename = os.path.join(output_dir, \"%s.%s.%s\" % (short_name, shard, suffix))\n write_sentences(output_filename, dataset)\n\n convert_bio_to_json(output_dir, output_dir, short_name, suffix)", "def save_pres_dat(self, dirname):\n arrs = self.train_data.to_numpy()\n ext = '.npy'\n for i in range(arrs.shape[0]):\n if i < 10:\n sample_num = \"0000\" + str(i)\n elif i < 100:\n sample_num = \"000\" + str(i)\n elif i < 1000:\n sample_num = \"00\" + str(i)\n elif i < 10000:\n sample_num = \"0\" + str(i)\n else:\n sample_num = str(i)\n fn = os.path.join(dirname, (sample_num+ext))\n np.save(fn, arrs[i])\n print(f\"{arrs.shape[0]} files saved to {dirname}\")", "def write_data_spec(self):\n with tf.io.gfile.GFile(self.dataset_spec_path, 'w') as f:\n # Use 2-space indentation (which also add newlines) for legibility.\n json.dump(self.dataset_spec.to_dict(), f, indent=2)", "def save_prediction_files(pred_train, pred_test, save_dir):\n train_path = os.path.join(save_dir, 'pred_train.lst')\n test_path = os.path.join(save_dir, 'pred_test.lst')\n with open(train_path, 'w') as pred_file:\n for elem in pred_train:\n pred_file.write(str(elem) + '\\n')\n with open(test_path, 'w') as file:\n for elem in pred_test:\n pred_file.write(str(elem) + '\\n')", "def create_datasets(per_train = 0.6, per_dev = 0.2, per_test = 0.2, datasets = 'datas.txt', should_filter = True, save_path = '../data_sets', verbos = False, keep = []):\n paths = []\n with open(datasets, 'r') as fid:\n while True:\n path = fid.readline()\n if path == '':\n break\n else:\n paths.append(path.rstrip())\n names = [path.split('/')[-1] + '_' for path in paths]\n\n should_load = was_created([save_path + '/' + name for name in names], per_train, per_dev, per_test)\n \n inds = []\n inds_initialized = False\n for name, path in zip(names, paths):\n if not should_load:\n print('Reading data from %s...' % path)\n data = read_data(path, should_filter, keep = keep)\n data = break_data(data)\n if not inds_initialized:\n for c in data:\n inds_temp = np.arange(c.shape[0])\n np.random.shuffle(inds_temp)\n inds.append(inds_temp)\n inds_initialized = True\n data = [c[inds[i]] for i, c in enumerate(data)]\n else:\n data = None # only because we need to send something to split_data()\n print('Splitting %s set...' % name)\n split_data(data, per_train = per_train, per_dev = per_dev, per_test = per_test, path = save_path, data_name = name, should_shuffle = False, should_load = should_load, verbos = verbos)", "def exportDataset(datasetName):\n\tf = open((datasetName+\".pkl\"), 'wb')\n\n\tif datasetName in DATA:\n\t\tpickle.dump(DATA[datasetName], f)\n\t\tprint(\"\\'\"+datasetName+\"\\' exported.\")\n\telse:\n\t\tprint(\"! \\'\"+datasetName+\"\\' does not exist.\")\n\tf.close()", "def save_block(self, dataset, dataset_block):\n record_file = self.output_directory / f\"images_{dataset_block}.tfrecords\"\n with tf.io.TFRecordWriter(str(record_file)) as writer:\n for item in dataset:\n writer.write(item.SerializeToString())", "def to_json(dataset: List[\"InputSample\"], output_file: Union[str, Path]):\n\n examples_json = [example.to_dict() for example in dataset]\n\n with open(\"{}\".format(output_file), \"w+\", encoding=\"utf-8\") as f:\n json.dump(examples_json, f, ensure_ascii=False, indent=4)", "def create_dataset(city='', savefile='dataset'):\n df = read_data(city)\n df_train, df_val, df_test = train_validate_test_split(df)\n (X_train, y_train), (X_val, y_val), (X_test, y_test) = input_output_split(df_train), input_output_split(df_val), input_output_split(df_test)\n\n # Not using to_pickle or np.savez in order to maintain DF structure\n save_path = 'data/' + savefile + '_' + city + '.pkl' if city else 'data/' + savefile + '.pkl'\n with open(save_path, 'wb') as f:\n pkl.dump( dict(X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, X_test=X_test, y_test=y_test), f)\n print('Saved generated dataset to: ' + save_path)", "def convert_dataset(self):\n self.create_dataset_specification_and_records()\n\n # Write the DatasetSpecification to the designated location.\n self.write_data_spec()", "def create_dataset(cls, **kwargs):\n data = {\n 'dataset_name': 'test_dataset',\n 'group_name': 'test_group',\n 'method': 'prebuilt',\n 'prebuilt_train_images': os.path.join(cls.imageset_folder, 'train_images'),\n 'prebuilt_train_labels': os.path.join(cls.imageset_folder, 'train_labels'),\n 'prebuilt_val_images': os.path.join(cls.imageset_folder, 'val_images'),\n 'prebuilt_val_labels': os.path.join(cls.imageset_folder, 'val_labels'),\n 'prebuilt_mean_file': os.path.join(cls.imageset_folder, 'train_mean.binaryproto'),\n }\n data.update(kwargs)\n\n request_json = data.pop('json', False)\n url = '/datasets/images/generic'\n if request_json:\n url += '.json'\n\n rv = cls.app.post(url, data=data)\n\n if request_json:\n if rv.status_code != 200:\n print json.loads(rv.data)\n raise RuntimeError('Model creation failed with %s' % rv.status_code)\n return json.loads(rv.data)['id']\n\n # expect a redirect\n if not 300 <= rv.status_code <= 310:\n s = BeautifulSoup(rv.data, 'html.parser')\n div = s.select('div.alert-danger')\n if div:\n print div[0]\n else:\n print rv.data\n raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)\n\n job_id = cls.job_id_from_response(rv)\n\n assert cls.dataset_exists(job_id), 'dataset not found after successful creation'\n\n cls.created_datasets.append(job_id)\n return job_id", "def savePrediction(self):\n\n # transform predictions\n prediction = self.prediction * 48 + 48\n prediction = prediction.clip(0, 96)\n\n # read id list\n outputset = read_csv(os.path.expanduser(self.fIdList))\n\n # get needed predictions\n outputPrediction = []\n for i in range(len(outputset)):\n outputPrediction.append(prediction[outputset['ImageId'][i]-1,\n outputset['FeatureName'][i]-1])\n\n # read output list\n outputset = read_csv(os.path.expanduser(self.fOutputList))\n\n # fill output list with predictions\n outputset['Location'] = outputPrediction\n\n # write output list to disk\n outputset.to_csv(self.fOutFile, index=False)", "def test_to_datafile(self):\n\n expected_datafile = json.dumps(self.config_dict_with_features)\n\n opt_obj = optimizely.Optimizely(expected_datafile)\n project_config = opt_obj.config_manager.get_config()\n\n actual_datafile = project_config.to_datafile()\n\n self.assertEqual(expected_datafile, actual_datafile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds user in rooms associated to its groups
async def _set_user_in_group_rooms( app: web.Application, user_id: UserID, socket_id: SocketID ) -> None: primary_group, user_groups, all_group = await list_user_groups(app, user_id) groups = [primary_group] + user_groups + ([all_group] if bool(all_group) else []) sio = get_socket_server(app) for group in groups: sio.enter_room(socket_id, f"{group['gid']}")
[ "def add_group_users():\n group_id = request.args.get('group_id', 0)\n group = (db_session.query(Group)\n .filter(Group.id == group_id)\n .first())\n return render_template('add_group_users.html',\n group=group)", "def add_player_to_room(room_id, user):\n # Check if the user is logged in\n if not user.is_authenticated:\n raise ClientError(\"USER_HAS_TO_LOGIN\")\n # Find the room they requested (by ID)\n try:\n user.game_room = Room.objects.get(pk=room_id)\n user.ready = False # default to not being ready\n user.save()\n except User.DoesNotExist:\n raise ClientError(\"USER_INVALID\")\n except Room.DoesNotExist:\n raise ClientError(\"ROOM_INVALID\")\n return True", "def add_user(self, group: str, user: User):\n self.groups[group].users.add(user)", "def add_to_room(room_id, client):\n if not room_id in Room._rooms:\n Room._rooms[room_id] = []\n\n if Room._rooms[room_id].count(client) == 0:\n Room._rooms[room_id].append(client)", "def addUser(self, user):\n if user.currentRoom() != None:\n logger.error(\n \"User %s is already in room %s\" %(user.userName, user.room)\n )\n return False\n\n back_self_user = self.activeUsers[:]\n back_self_count = self.activeUserCount\n\n try:\n self.activeUsers.append(user)\n self.activeUserCount += 1\n back_room = user.joinRoom(self)\n except Exception as err:\n logger.error(\n \"Error adding user %s to room %s\" % (user.userName, self.roomName)\n )\n self.activeUsers = back_self_user\n self.activeUserCount = back_self_count\n return False\n\n return True", "def join_rooms(self) -> None:\n if self.rooms:\n for room in self.rooms:\n # self.add_event_handler(f'muc::{room}::got_online', self.notify_user)\n self.plugin['xep_0045'].join_muc(room, self.nick, wait=True)", "def post(self):\n new_room_args = room_post_reqparser.parse_args(strict=True)\n\n name_record = db.session.query(RoomModel).filter_by(name=new_room_args['name']).first()\n if name_record:\n abort(409, error_code=409,\n error_msg='Cannot create a new room because a room with the given name already exists.'\n )\n room_admin = db.session.query(UserModel).filter_by(name=new_room_args['room_admin_name']).first()\n if not room_admin:\n abort(409, error_code=409,\n error_msg='Cannot create a new room because no user with the given name of the room admin exists.'\n )\n\n new_room = RoomModel(name=new_room_args['name'])\n room_admin.is_admin.append(new_room)\n db.session.add(new_room)\n db.session.commit()\n \n return new_room, 201", "def add_group(username, group_name, logger, client):\n client.users.add_to_group(username, group_name)\n logger.info('User `{0}` added successfully to group '\n '`{1}`'.format(username, group_name))", "def add_user(request, id):\n editor = request.user\n group = get_object_or_404(Group, id=id)\n \n if not (editor.is_superuser or editor.has_perm('admin', group)):\n return HttpResponseForbidden('You do not have sufficient privileges')\n \n if request.method == 'POST':\n form = AddUserForm(group, request.POST)\n if form.is_valid():\n user = form.cleaned_data['user']\n group.user_set.add(user)\n \n # signal\n view_add_user.send(sender=editor, user=user, obj=group)\n \n # return html for new user row\n url = reverse('usergroup-permissions', args=[id])\n return render_to_response( \\\n \"object_permissions/permissions/user_row.html\", \\\n {'user':user, 'object':group, 'url':url})\n \n # error in form return ajax response\n content = json.dumps(form.errors)\n return HttpResponse(content, mimetype='application/json')\n\n form = AddUserForm()\n return render_to_response(\"object_permissions/group/add_user.html\",\\\n {'form':form, 'group':group}, \\\n context_instance=RequestContext(request))", "def addgroupuser(self, username, gid):\n sql = \"INSERT INTO `%s` SET `%s`=%%s,`%s`=%%s;\" % (\n self.table,\n self.config.get('fields', 'username', fallback='username'),\n self.config.get('fields', 'gid', fallback='gid'))\n\n with self.dbs.cursor() as cur:\n cur.execute(sql, (username, gid))", "def add_room(self, room):\n for i in range(0, 2400, 25):\n timeslots = [(room.upper(), d, i, 0) for d in range(1, 8)]\n self.c.executemany('INSERT INTO rooms VALUES (?,?,?,?)', (timeslots))\n self.conn.commit()", "def add_users_to_groups_based_on_users_permissions(apps, schema_editor):\n User = apps.get_model(\"account\", \"User\")\n Group = apps.get_model(\"auth\", \"Group\")\n GroupData = namedtuple(\"GroupData\", [\"users\", \"group_name\"])\n\n groups = Group.objects.all().prefetch_related(\"permissions\")\n\n mapping = create_permissions_mapping(User, GroupData)\n for perms, group_data in mapping.items():\n group = get_group_with_given_permissions(perms, groups)\n users = group_data.users\n if group:\n group.user_set.add(*users)\n continue\n group = create_group_with_given_permissions(perms, group_data.group_name, Group)\n group.user_set.add(*users)", "def add_room():\n print(\"ADD A ROOM\".center(80))\n print(\"-\".center(80, '-'))\n room = str(_get_room_name())\n db.execute('INSERT into room (name) VALUES (?)', (room,))\n db.commit()\n display_menu()", "async def channel_group_user_add(self, channel, *users):\n channel_id = get_channel_id(channel, Channel.is_private_group)\n \n user_ids = set()\n for user in users:\n user_id = get_user_id(user)\n user_ids.add(user_id)\n \n for user_id in user_ids:\n await self.http.channel_group_user_add(channel_id, user_id)", "def save(self, *args, **kwargs):\n\t\tadmin_group = Group.objects.get_or_create(name='administrator')[0]\n\t\tself.user.groups.add(admin_group)\t\t\n\t\treturn super(AdminUser, self).save(*args, **kwargs)", "def groupIdJoin(groupId):\n group = db.Group.find_one({\"_id\": ObjectId(groupId)})\n user = db.users.find_one({\"_id\": ObjectId(current_user.id)})\n if group is not None:\n if not group['enrolledIds']:\n updatedGroup = db.Group.update_one({'_id': group['_id']}, {\"$set\": {\n \"enrolledIds\": [user['_id']]\n }})\n else:\n updatedGroup = db.Group.update_one({'_id': group['_id']}, {\"$set\": {\n \"enrolledIds\": group['enrolledIds'].append(user['_id'])\n }})\n if not user['enrolledGroups']:\n updatedUser = db.users.update_one({'_id': user['_id']}, {\n \"$set\": {\n \"enrolledGroups\": [group['_id']]\n }\n })\n else:\n updatedUser = db.users.update_one({'_id': group['_id']}, {\n \"$set\": {\n \"enrolledIds\": user['enrolledGroups'].append(group['_id'])\n }\n })\n return jsonify({\"msg\": \"Group successfully joined!\"}), 200\n elif group is None:\n return jsonify({\"msg\": \"Group Not Found\"}), 404\n return jsonify({\"msg\": \"something went wrong\"})", "def add_room(self, x, y, room):\n for (i, j), tile in room.items():\n self.set_tile(i + x, j + y, tile)", "def add_rooms(self) -> None:\n for i in range(self.num_room_tries):\n size = random.randint(1, 3 + self.room_extra_size) * 2 + 1\n rectangularity = random.randint(0, int(1 + size / 2)) * 2\n width = size\n height = size\n if random.randint(1, 3) == 1:\n width += rectangularity\n else:\n height += rectangularity\n\n x = random.randint(1, int((self.current_map_width - width - 1) / 2)) * 2 + 1\n y = random.randint(1, int((self.current_map_height - height - 1) / 2)) * 2 + 1\n\n room = pygame.Rect(x, y, width, height)\n\n overlaps = room.collidelist(self.rooms) != -1\n\n if not overlaps:\n self.rooms.append(room)\n self.start_region()\n self.carve(room, self.tile_texture)", "def add_user(self, user_id: int, group_name: str):\n self.cursor.execute(\n f\"INSERT INTO public.users (id, user_id, group_name) VALUES (DEFAULT, {user_id}, '{group_name}');\")\n self.conn.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Based on the counts and the input file, calculate log likelihood. Write the trigram, log likelihood to output_file.
def write_output(Count_trigram, Count_bigram, input_file, output_name): output_file = file(output_name, "w") input_file.seek(0) l = input_file.readline() while l: line = l.strip() fields = line.split(" ") assert len(fields)==3 log_pr = cal_trigram_param(Count_trigram, Count_bigram, fields) # Calculate using naive estimator. l = line + " " + str(log_pr) + "\n" output_file.write(l) l = input_file.readline() output_file.close()
[ "def process_files(args, logger):\n writer = get_open_function(args.output)\n logger.info(\"Writing outputs to {0}\".format(args.output))\n\n with writer(args.output, \"wt\") as o:\n # Write header row as comment\n o.write(\"#\" + \"\\t\".join(COLUMN_NAMES) + \"\\n\")\n if len(args.input) > 1:\n logger.info(\"Merging {0} STAR gene counts files.\".format(len(args.input)))\n # Load\n dic = dict()\n for fil in args.input:\n dic = load_junction_file(fil, dic)\n\n logger.info(\n \"Writing merged STAR junction counts to {0}.\".format(args.output)\n )\n # Merge and write\n for key in sorted(dic, key=itemgetter(0, 1, 2)):\n o.write(str(dic[key]) + \"\\n\")\n\n else:\n logger.info(\n \"Only 1 STAR junction counts file provided. \"\n + \"A new STAR junction counts file will be produced \"\n + \"with a header line.\"\n )\n logger.info(\n \"Writing formatted STAR junction \"\n + \"counts to {0}.\".format(args.output)\n )\n\n fil = args.input[0]\n reader = get_open_function(fil)\n with reader(fil, \"rt\") as fh:\n for line in fh:\n o.write(line)", "def add_logfrequency(inp='../data/vrex_1week_with_probability_unique.queries',\n outp='../data/vrex_1week_with_probability_plus_logfrequency.query'):\n with open(inp) as f:\n with open(outp, 'wb') as fout:\n for i, aline in enumerate(f):\n if i % 100000 == 0:\n print(i)\n aline = aline.strip()\n cols = aline.split('\\t')\n logprob = float(cols[-2])\n logfreq = np.log(float(cols[-1]))\n fout.write(aline + '\\t' + str(logprob + logfreq) + '\\n')\n fout.flush()", "def trigram_story(filename,number_of_words=500):\n text_list= create_filtered_list(filename)\n trigram_dict = create_trigram(text_list)\n trigram_text = generate_new_text(trigram_dict,number_of_words)\n write_text(filename,trigram_text)\n print(\"Please read the trigram text generated!\")", "def read_output_file():\n global TP_SPAM, FP_SPAM, TN_SPAM, FN_SPAM\n global TP_HAM, FP_HAM, TN_HAM, FN_HAM\n global path\n output_file = open(path+\"nboutput.txt\",\"r\", encoding=\"latin-1\")\n i = 0\n for line in output_file:\n i+=1\n arr = line.split()\n path = arr[1]\n label = arr[0]\n \n #calculating for spam\n if \"spam\" in path:\n if label == \"spam\":\n TP_SPAM+= 1\n else:\n FN_SPAM+= 1\n else:\n if label == \"ham\":\n TN_SPAM+= 1\n else:\n FP_SPAM+= 1\n \n #calculating for ham\n if \"ham\" in path:\n if label == \"ham\":\n TP_HAM+= 1\n else:\n FN_HAM+= 1\n else:\n if label == \"spam\":\n TN_HAM+= 1\n else:\n FP_HAM+= 1\n calculate_f1(TP_SPAM, TN_SPAM, FP_SPAM, FN_SPAM, \"SPAM\")\n calculate_f1(TP_HAM, TN_HAM, FP_HAM, FN_HAM, \"HAM\")", "def save_transition_probs(input_file):\n\n\t# read counts file\n\tcounter = Hmm(3)\n\tcounter.read_counts(file('ner_rare.counts'))\n\n\tout_lines_list = []\n\tl = input_file.readline()\n\twhile l:\n\t\tline = l.strip()\n\t\tif line: # Nonempty line\n\t\t\ttrigram = tuple(line.split())\n\t\t\t# get transition probability of trigram\n\t\t\tprob = compute_transition_prob(counter.ngram_counts[1][(trigram[0], trigram[1])], counter.ngram_counts[2][trigram])\n\t\t\t# get log probability\n\t\t\tlog_prob = math.log(prob)\n\t\t\tl = line + \" \" + str(log_prob)\n\n\t\tout_lines_list.append(l)\n\t\tl = input_file.readline()\n\tout_lines = \"\\n\".join(out_lines_list)\n\n\t# write trigrams and their log probs to file\n\twith open('5_1.txt','w') as out_file:\n\t\tout_file.write(out_lines)", "def kn_logprob(inp='../data/vrex_1week_long_text.queries',\n outp='../data/vrex_1week_with_probability.queries',\n fdfile='../data/fdist_kn.pickle',\n minlen=4,\n length_normalized=True):\n print('Loading Trigram Distribution')\n fdist = cp.load(open(fdfile))['fdist']\n print('Trigram Distribution Loaded')\n kn_pd = nltk.probability.KneserNeyProbDist(fdist)\n print('Kneser Ney Loaded')\n with open(inp) as f:\n with open(outp, 'wb') as fout:\n for i, aline in enumerate(f):\n jdat = json.loads(aline.strip())\n q = jdat['text'].lower().encode('ascii', 'ignore')\n tokens = ['<s>'] + nltk.word_tokenize(q) + ['<e>']\n if len(tokens) < minlen + 2:\n continue\n logplist = []\n for x, y, z in nltk.trigrams(tokens):\n lgp = kn_pd.logprob((x, y, z))\n # OOV cases\n if lgp == -1e300:\n logplist.append(-50)\n else:\n logplist.append(lgp)\n # Length Normalization: Add points for longer sentences\n if length_normalized:\n len_score = len(set(tokens)) * 8.5\n else:\n len_score = 0\n\n logpsum = sum(logplist) + len_score\n fout.write(q + '\\t' + str(logpsum) + '\\n')\n fout.flush()\n if i % 100000 == 0:\n print(i)", "def process(wav_dir, id_list, out_dir, calculate_normalisation, normalisation_of_deltas):\n file_ids = get_file_ids(wav_dir, id_list)\n\n make_dirs(os.path.join(out_dir, 'lf0'), file_ids)\n make_dirs(os.path.join(out_dir, 'vuv'), file_ids)\n\n for file_id in file_ids:\n wav_path = os.path.join(wav_dir, f'{file_id}.wav')\n wav, sample_rate = file_io.load_wav(wav_path)\n\n f0, vuv = analysis(wav, sample_rate)\n lf0 = np.log(f0)\n\n file_io.save_bin(lf0, os.path.join(out_dir, 'lf0', f'{file_id}.npy'))\n file_io.save_bin(vuv, os.path.join(out_dir, 'vuv', f'{file_id}.npy'))\n\n if calculate_normalisation:\n process_mvn(out_dir, 'lf0', id_list=id_list, deltas=normalisation_of_deltas)", "def write_ngram_output(ngram_dict, ngram_count_dict, output_file_dictionary, output_file_postings):\n with open(output_file_postings, 'w') as out_postings:\n # term_dict has term as key, doc_id_dict as value\n # doc_id_dict has doc id as key, term frequency corresponding to the doc id as value\n doc_norm = dict()\n for term, doc_id_dict in ngram_dict.iteritems():\n\n doc_id_list = doc_id_dict.keys()\n doc_id_list.sort()\n\n posting = []\n for doc_id in doc_id_list:\n values = [1 + math.log(i, 10) for i in doc_words[doc_id].values()]\n norm_val = math.sqrt(sum(i ** 2 for i in values))\n doc_norm[doc_id] = norm_val\n posting.append(str(doc_id) + '-' + str(doc_id_dict[doc_id]))\n # add a space at the end for distinguishing the current posting string from the next posting string\n posting_str = \" \".join(str(e) for e in posting) + \" \"\n\n head = out_postings.tell()\n out_postings.write(posting_str)\n freq = len(doc_id_list)\n tail = out_postings.tell()\n build_ngram_count_dict(ngram_count_dict, term, head, tail, freq)\n\n with open(output_file_dictionary, 'w') as out_dict:\n all_doc_ids.sort()\n ngram_count_dict['N'] = collection_size\n ngram_count_dict['DOC_NORM'] = doc_norm\n # TODO: Svilen: I don't know if we need 'ALL'. In the doc_norm, where we will store the length of a document, we will also have all docIDs\n ngram_count_dict['ALL'] = {'f': len(all_doc_ids), 'a': all_doc_ids}\n json.dump(ngram_count_dict, out_dict)", "def changing_ngram(input_file1, input_file2, output_file, n=1, alpha_numeric_flag=0, stop_words_flag=0):\n\n ngram_freq1 = count_ngram_frequency(input_file1, n, alpha_numeric_flag, stop_words_flag)\n ngram_freq1 = collections.OrderedDict(sorted(ngram_freq1.items()))\n ngram_freq2 = count_ngram_frequency(input_file2, n, alpha_numeric_flag, stop_words_flag)\n ngram_freq2 = collections.OrderedDict(sorted(ngram_freq2.items()))\n\n ngram_freq = {}\n\n for key in ngram_freq1.keys():\n if key in ngram_freq2:\n ngram_freq[key] = ngram_freq2[key] - ngram_freq1[key]\n else:\n ngram_freq[key] = -1*ngram_freq1[key]\n\n for key in ngram_freq2:\n if key not in ngram_freq:\n ngram_freq[key] = ngram_freq2[key]\n\n ngram_freq = sorted(ngram_freq.items(), key=lambda kv: kv[1], reverse=True)\n\n with open(output_file, \"w+\") as csvfile:\n writer = csv.writer(csvfile)\n for item in ngram_freq:\n writer.writerow(item)", "def binned_text():\n binned_pha_files,binned_MJD,binned_counts_soft1,binned_unc_soft1 = get_binned_data(soft1_dict,soft1_err_dict)\n binned_pha_files,binned_MJD,binned_counts_soft2,binned_unc_soft2 = get_binned_data(soft2_dict,soft2_err_dict)\n binned_pha_files,binned_MJD,binned_counts_A,binned_unc_A = get_binned_data(A_dict,A_err_dict)\n binned_pha_files,binned_MJD,binned_counts_B,binned_unc_B = get_binned_data(B_dict,B_err_dict)\n binned_pha_files,binned_MJD,binned_counts_C,binned_unc_C = get_binned_data(C_dict,C_err_dict)\n binned_pha_files,binned_MJD,binned_counts_D,binned_unc_D = get_binned_data(D_dict,D_err_dict)\n binned_pha_files,binned_MJD,binned_counts_inband,binned_unc_inband = get_binned_data(inband_dict,inband_err_dict)\n\n counts_file = Lv0_dirs.NGC300_2020 + 'n300_ulx.' + bgsub_type + '_cl50_g2020norm_' + bin_size + '.fffphot'\n output_file = open(counts_file,'w')\n\n ### get MJD (int), soft1, soft2, A, B, C, D, inband, all associated pha files\n for i in range(len(binned_MJD)):\n output_file.write(str(binned_MJD[i]) + ' ' + str(round(binned_counts_soft1[i],4)) + ' ' + str(round(binned_counts_soft2[i],4)) + ' ' + str(round(binned_counts_A[i],4)) + ' ' + str(round(binned_counts_B[i],4)) + ' ' + str(round(binned_counts_C[i],4)) + ' ' + str(round(binned_counts_D[i],4)) + ' ' + str(round(binned_counts_inband[i],4)) + ' ' + binned_pha_files[i] + '\\n')\n output_file.close()\n\n unc_file = Lv0_dirs.NGC300_2020 + 'n300_ulx.' + bgsub_type + '_cl50_g2020err_norm_' + bin_size + '.fffphot'\n output_file = open(unc_file,'w')\n\n for i in range(len(binned_MJD)):\n output_file.write(str(binned_MJD[i]) + ' ' + str(round(binned_unc_soft1[i],4)) + ' ' + str(round(binned_unc_soft2[i],4)) + ' ' + str(round(binned_unc_A[i],4)) + ' ' + str(round(binned_unc_B[i],4)) + ' ' + str(round(binned_unc_C[i],4)) + ' ' + str(round(binned_unc_D[i],4)) + ' ' + str(round(binned_unc_inband[i],4)) + ' ' + binned_pha_files[i] + '\\n')\n output_file.close()", "def test_LM(in_file, out_file, LM):\r\n print \"testing language models...\"\r\n fout = open(out_file,'w+')\r\n\r\n with open(in_file, 'r') as testfile:\r\n for line in testfile:\r\n # probability of language(in log)\r\n p = [0.0, 0.0, 0.0]\r\n # number of matched 4-grams\r\n match_count = 0\r\n # sentence with punctuation removed and all characters converted to lowercase\r\n s = re.sub('[^a-zA-Z ]', '', line).lower()\r\n\r\n # count frequency of appearance for each 4-gram\r\n for i in range(-3,len(s)):\r\n # Add padding '0' if the end of a sentenece does not have enough characters to form a 4-gram\r\n # Use ^ to pad the beginning\r\n if i < 0:\r\n part = '^'*(0 - i) + s[0:4+i]\r\n # Use # to pad the end\r\n elif(i+4 > len(s)):\r\n part = s[i:len(s)] + '#'*(i+4-len(s))\r\n else:\r\n part = s[i:i+4]\r\n\r\n if part in LM:\r\n for j in range(0,3):\r\n p[j] += math.log(LM[part][j],10)\r\n # print(\"p[j]+\",p[j])\r\n # print(p)\r\n match_count += 1\r\n else:\r\n # ignore 4-grams that are not found in the LM\r\n pass\r\n\r\n # write result to output file\r\n # if less than 'LIMIT'% 4-grams are not in the LM, consider other language \r\n if((match_count*1.0/(len(s)+3)) < LIMIT):\r\n fout.write('other '+line)\r\n else:\r\n fout.write(INDEX_TO_LANG[p.index((max(p)))]+' '+line)\r\n \r\n # append newline at EOF\r\n fout.write('\\n')\r\n fout.close()", "def annotate_terms(text_file, output_file):\n init_data = read_data('lexicon.tsv')\n data = select_data(init_data)\n text_dataframe = lemma_posttag(text_file)\n annotate(data, text_dataframe)\n annotation = construct_annotated_text(text_dataframe)\n with open(output_file, 'w') as f:\n f.write(annotation)\n print(\"Your file has been annotated.\")", "def writelogq(self, output, trigram_file = None):\n t = self.q.keys()\n if trigram_file:\n t = trigram_iterator(trigram_file)\n for trigram in t:\n if self.q[trigram]:\n output.write(\"%s %s %s %f\\n\" % ( trigram + (math.log(self.q[trigram]),)))\n else:\n output.write(\"%s %s %s %s\\n\" % ( trigram + (\"-inf\",)))", "def countWordfrequencies(inpath):\n # First we create one dictionary for the files and counters\n docs_counts = {}\n \n # We import the texts from txt folder and for each...\n for doc in glob.glob(inpath+\"*.txt\"):\n \n # We split between the name of the file and its extension \n # filename,extesion = os.path.basename(doc).split(\".\")\n # print(doc)\n \n # We open the document and read it\n with open(doc, \"r\", encoding = \"utf-8\") as fin:\n content = fin.read()\n \n # We split it (or tokenize it) using a regular expression\n tokens_content = re.split(\"[\\W]+\",content)\n # print(type(tokens_content))\n \n # TODO, change this to only count a subset of the n-grams, see R output\n # We count how many times a word (or token) comes in the document\n doccounts = Counter(tokens_content) \n # print(doccounts)\n \n # We put that data in a dictionary with the name of the file together\n file = fix_name(doc[len(inpath):])[:-3]\n # \"D:\\\\Desktop\\\\Tesis-estilometria\\\\data\\\\corpus\\\\1940\"\n docs_counts[file] = doccounts\n # print(doccounts)\n # fin.close\n \n # print(len(docs_counts))\n \n # Now that we have all the information about the frecuency of each token, we create a matrix from the dictionary\n freqmatrix = pd.DataFrame.from_dict(docs_counts, orient = 'columns')\n # print(freqmatrix)\n # print(freqmatrix.shape)\n \n # We replace the NaN with zeros\n freqmatrix = freqmatrix.fillna(0)\n # print(freqmatrix)\n \n # We sum how many words are in each text and we put that in a Serie \n doclen = freqmatrix.sum(axis = vertical)\n # We put to this Serie a name: doclen\n doclen = pd.Series(doclen, name = \"doclen\")\n # print(doclen)\n \n # We rotate the table so the Serie doclen can fit in\n freqmatrix = freqmatrix.T\n \n # We sum how many times appear one word in the whole corpus to have the MFW of the corpus\n sumfrequencies = np.sum(freqmatrix, axis = vertical)\n sumfrequencies = pd.Series(sumfrequencies, name = \"sumfrequencies\")\n # print(sumfrequencies)\n\n # We order the token list of the corpus by frequency\n sumfrequencies.sort_values(ascending=False)\n \n \n # Now we divide the frequency through the length of the whole text in order to get relative frequencies\n freqmatrix = freqmatrix.apply(lambda x: x / doclen)\n # print(freqmatrix)\n \n # We add that to the table\n freqmatrix = freqmatrix.append(sumfrequencies)\n # print(freqmatrix)\n\n # We rotate it\n freqmatrix = freqmatrix.T\n\n #And we sort it by frequency\n freqmatrix.sort_values([\"sumfrequencies\"], ascending=False)\n # print(freqmatrix)\n\n # If you want, you can print the first 10 words of each document\n # print(freqmatrix.iloc[0:10,:])\n # print(freqmatrix[0:10])\n \n # We cut the table in case there are more than 5000 words in the corpus\n freqmatrix = freqmatrix.head(5000)\n # print(freqmatrix)\n\n # We drop (delete) the sumfrequencies!\n freqmatrix = freqmatrix.drop(\"sumfrequencies\", axis=horizontal)\n \n # We rotate it\n freqmatrix = freqmatrix.T\n\n # print(\"\\n\\n\\n\\nHere it is the frequency matrix!\")\n # print(freqmatrix)\n # print(freqmatrix.shape)\n\n return freqmatrix", "def processMotifs():\n\t\"\"\"\n\t\tInput file line format: node1 node2 node3 node4 motifType\n\t\"\"\"\n\tfin = open('reSampledMotifs','r')\n\tfout = open('outlierScores','w')\n\tneighbors=set()\n\n\tfor i, line in enumerate(fin):\n\t\ttempList=(line.rstrip().split(\" \"))\n\t\ttempList=map(int,tempList)\n\t\tneighbors.clear()\n\t\tmotifAuths=tempList[:4]\n\n\t\tfor nod in motifAuths:\n\t\t\tfor auth in edges[nod]:\n\t\t\t\tneighbors.add(auth)\n\n\t\t\"\"\" \n\t\t\tSince our dataset is too large and contains motifs having as many as n = 500 neigbhours.\n\t\t\tThere as many as O(n^4) constraints. Simplex cannot handle so many constraints.\n\t\t\tWe ignore such motifs because it is highly improbable that such motifs are outliers due to there connectedness.\n\t\t\"\"\"\n\t\tif len(neighbors)<=100:\n\t\t\tfout.write(str(i) + \" \" + str(len(neighbors))+\" \"+str(outlierScore(neighbors,belongingnessVectorDict,fields,edges,C, i))+\"\\n\")\n\n\t\t# if(i%10000==0):\n\t\t# \tprint str(i)\n\n\tfin.close()\n\tfout.close()", "def ngram_frequency_dist(input_file, output_file, n=1, alpha_numeric_flag=False, stop_words_flag=False):\n ngram_freq = count_ngram_frequency(input_file, n, alpha_numeric_flag, stop_words_flag)\n ngram_freq = ngram_freq.most_common()\n\n with open(output_file, \"w+\") as csvfile:\n writer = csv.writer(csvfile)\n for item in ngram_freq:\n writer.writerow(item)", "def main(args):\n logger = get_logger(\"merge_star_junction_counts\")\n logger.info(\n \"Merging/Formatting {0} STAR junction counts files.\".format(len(args.input))\n )\n\n process_files(args, logger)", "def daily_ngram_collector(input_file_folder_path, output_file, number_of_users, start_date, end_date, n=1, cutoff_freq=5,\n alpha_numeric_flag=0, stop_words_flag=0):\n\n curr_date = start_date\n\n end_date = dt.strptime(end_date, '%Y_%m_%d')\n end_date += datetime.timedelta(days=1)\n end_date = dt.strftime(end_date, '%Y_%m_%d')\n\n while curr_date != end_date:\n input_f = os.path.join(input_file_folder_path, curr_date + '_profiles_' + str(number_of_users) + '.zip')\n if os.path.exists(input_f):\n ngram_freq = count_ngram_frequency(input_f, n, alpha_numeric_flag, stop_words_flag)\n ngram_freq = ngram_freq.most_common()\n\n # Creating the new row to add to the daily collector file\n # new_row1 = {'Date': re.findall(r'[0-9]{4}_[0-9]{2}_[0-9]{2}', input_f)[0]}\n new_row1 = {'Date': curr_date}\n # Extracting the Date from the filename\n for item, val in ngram_freq:\n if n == 1:\n new_row1[item[0]] = [val]\n else:\n new_row1[item] = [val]\n new_row = pd.DataFrame(new_row1)\n\n new_row1 = pd.DataFrame()\n for col in list(new_row.columns):\n if col == 'Date':\n new_row1[str(col)] = new_row[col]\n continue\n if new_row[col][0] > cutoff_freq:\n new_row1[str(col)] = new_row[col]\n # Checking the file exist or not\n # If not then generate a new one or append the line at the end of the file\n if not os.path.exists(output_file):\n ngram_combined = new_row1\n else:\n ngram_original = pd.read_csv(output_file, index_col=0)\n ngram_combined = pd.concat([ngram_original, new_row1], sort=False, ignore_index=True, axis=0)\n ngram_combined.replace(np.nan, 0, inplace=True)\n ngram_combined.to_csv(output_file)\n\n curr_date = dt.strptime(curr_date, '%Y_%m_%d')\n curr_date += datetime.timedelta(days=1)\n curr_date = dt.strftime(curr_date, '%Y_%m_%d')", "def learn_distribution(self,filename):\n\n\n if self._ipa_tokens is None:\n self.load_ipa_tokens(filename)\n\n \n print(\"About to learn from\",filename)\n print(\"For each word, each chunk of\",self.window_size,\"sounds will be considered.\")\n print(\"Some of the words that could not be processed will be printed below; just check that nothing too bad is happening.\")\n \n s_msg = SuppressedMessenger(name=\"unprocessed words\",max_messages = 1000)\n\n epi = epitran.Epitran(self.lang_code)\n \n def extract_words(line):\n for word in line.split():\n word_ipa = epi.trans_list(word.strip(' .()!?:;,\\n\\\"'))\n if word_ipa and all(c in self.get_ipa_tokens() for c in word_ipa):\n yield map(self.token_to_int,word_ipa)\n else:\n bad_chars = [c for c in word_ipa if c not in self.get_ipa_tokens()]\n if bad_chars and any(c not in '1234567890-()[]{}=@' for c in bad_chars): # not useful to see this warning in most cases\n s_msg.print(\"\\\"\"+word+\"\\\" was not processed due to: \"+str(bad_chars))\n \n num_tokens = len(self.get_ipa_tokens())\n start_token = self.token_to_int('WORD_START')\n end_token = self.token_to_int('WORD_END')\n \n counts = np.zeros((num_tokens,)*self.window_size,dtype=np.dtype('u8')) # TODO use sparse array instead and be smarter about dtype.\n \n with open(filename) as f:\n for line_num,line in enumerate(f.readlines()):\n for word in extract_words(line):\n previous = [start_token]*(self.window_size-1)\n for t in word:\n counts[tuple(previous)+(t,)] += 1\n previous = previous[1:]+[t]\n counts[tuple(previous)+(end_token,)] += 1\n sys.stdout.write(\"> \"+str(line_num+1)+\" lines processed \\r\")\n sys.stdout.flush()\n print()\n \n totals = counts.sum(axis=self.window_size-1)\n self._distribution = counts / (np.vectorize(lambda x : x if x!=0 else 1)(totals.reshape(totals.shape+(1,))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inicializa una Instancia de la clase ReservadorDeVuelos
def __init__(self): self.__vuelos = {}
[ "def __init__(self, nome, vagas=0):\n self.nome = nome\n self.vagas = vagas\n self.alunos = [] # self.alunos é um atributo global criado automaticamente", "def regice_init(self):\n for peripheral_name in self.svd.peripherals:\n peripheral = self.svd.peripherals[peripheral_name]\n peripheral_obj = RegicePeripheral(peripheral, self.client)\n setattr(self, peripheral_name, peripheral_obj)", "def __init__(self, nombre_comercial, id_monodroga, cantidad_monodroga):\r\n ObjetoBase.__init__(self)\r\n self.nombre_comercial=nombre_comercial\r\n self.id_monodroga=id_monodroga\r\n self.cantidad_monodroga=cantidad_monodroga", "def __init__(self):\n self.__socios = self.__listar_socios()", "def __init__(self, vm_scheduler, preparation_scheduler, slottable, accounting):\n \n # Logger\n self.logger = logging.getLogger(\"LSCHED\")\n \n # Assign schedulers and slottable\n self.vm_scheduler = vm_scheduler\n \"\"\"\n VM Scheduler\n @type: VMScheduler\n \"\"\"\n self.preparation_scheduler = preparation_scheduler\n self.slottable = slottable\n self.accounting = accounting\n\n # Create other data structures\n self.queue = Queue()\n self.leases = LeaseTable()\n self.completed_leases = LeaseTable()\n\n # Handlers are callback functions that get called whenever a type of\n # resource reservation starts or ends. Each scheduler publishes the\n # handlers it supports through its \"handlers\" attributes. For example,\n # the VMScheduler provides _handle_start_vm and _handle_end_vm that\n # must be called when a VMResourceReservation start or end is encountered\n # in the slot table.\n #\n # Handlers are called from the process_reservations method of this class\n self.handlers = {}\n for (type, handler) in self.vm_scheduler.handlers.items():\n self.handlers[type] = handler\n\n for (type, handler) in self.preparation_scheduler.handlers.items():\n self.handlers[type] = handler", "def inicializar(self):\n\t\tLabel(self, text=\"Ingrese datos de la solicitud\", ).grid(\n\t\t\trow=1, column=2)\n\t\tLabel(self, text=\"Cliente*: \").grid(row=2, column=1)\n\t\tLabel(self, text=\"Asesor*: \").grid(row=3, column=1)\n\t\tLabel(self, text=\"Vehiculo*: \").grid(row=4, column=1)\n\t\tLabel(self, text=\"Chapa*: \").grid(row=5, column=2)\n\t\tLabel(self, text=\"Marca*: \").grid(row=6, column=2)\n\t\tLabel(self, text=\"Modelo*: \").grid(row=7, column=2)\n\t\tLabel(self, text=\"Repuesto: \").grid(row=9, column=1)\n\t\tLabel(self, text=\"Tipo: \").grid(row=10, column=2)\n\t\tLabel(self, text=\"Marca: \").grid(row=11, column=2)\n\t\tLabel(self, text=\"Costo: \").grid(row=12, column=2)\n\t\tButton(self, text=\"GUARDAR\", command=self.agregar_solicitud).grid(\n\t\t\trow=14, column=1)\n\n\t\tself.get_cliente_entry()\n\t\tself.get_asesor_entry()\n\t\tself.get_chapa_entry()\n\t\tself.get_marcaV_entry()\n\t\tself.get_modelo_entry()\t\t\n\t\tself.get_tipo_entry()\n\t\tself.get_marcaR_entry()\n\t\tself.get_costo_entry()", "def __init__(self): \t \n\t\tsuper(Fenetre, self).__init__()\n\t\tself.resizable(width=False, height=False)\n\t\tself.grid_columnconfigure(0, weight=2)\n\t\tself.grid_rowconfigure(0, weight=2)\n\t\tself.lesRegles = ListeRegle().charger()\n\t\tself.fenetreListerRegle()", "def __init__(self, **kwargs):\n\n super(NUVMIPReservation, self).__init__()\n\n # Read/Write Attributes\n \n self._ip_type = None\n self._ipv4_address = None\n self._ipv6_address = None\n self._ipv6_allocation_pools = None\n self._last_updated_by = None\n self._last_updated_date = None\n self._allocation_pools = None\n self._embedded_metadata = None\n self._entity_scope = None\n self._creation_date = None\n self._state = None\n self._owner = None\n self._external_id = None\n \n self.expose_attribute(local_name=\"ip_type\", remote_name=\"IPType\", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])\n self.expose_attribute(local_name=\"ipv4_address\", remote_name=\"IPV4Address\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"ipv6_address\", remote_name=\"IPV6Address\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"ipv6_allocation_pools\", remote_name=\"IPV6AllocationPools\", attribute_type=list, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"last_updated_by\", remote_name=\"lastUpdatedBy\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"last_updated_date\", remote_name=\"lastUpdatedDate\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"allocation_pools\", remote_name=\"allocationPools\", attribute_type=list, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"embedded_metadata\", remote_name=\"embeddedMetadata\", attribute_type=list, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"entity_scope\", remote_name=\"entityScope\", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])\n self.expose_attribute(local_name=\"creation_date\", remote_name=\"creationDate\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"state\", remote_name=\"state\", attribute_type=str, is_required=False, is_unique=False, choices=[u'ASSIGNED', u'ASSIGNED_DELETE_PENDING', u'UNASSIGNED'])\n self.expose_attribute(local_name=\"owner\", remote_name=\"owner\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"external_id\", remote_name=\"externalID\", attribute_type=str, is_required=False, is_unique=True)\n \n\n # Fetchers\n \n \n self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n\n self._compute_args(**kwargs)", "def __init__(__self__, *,\n reservation_id: pulumi.Input[str],\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n throughput_capacity: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"reservation_id\", reservation_id)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if throughput_capacity is not None:\n pulumi.set(__self__, \"throughput_capacity\", throughput_capacity)", "def __init__(self):\n self._events = self._create_event_objects()", "def _create_fleet(self):\n\t\t# create a virus and find the number of viruses in a row\n\t\t# spacing between each virus is equal to one virus width\n\t\tvirus = Virus(self)\n\t\tvirus_width, virus_height = virus.rect.size\n\t\tavailable_space_x = self.settings.screen_width - (2 * virus_width)\n\t\tnumber_viruses_x = available_space_x // (2 * virus_width)\n\n\t\t# determine the number of rows of viruses that fit on the screen\n\t\tnurse_height = self.nurse.rect.height\n\t\tavailable_space_y = (self.settings.screen_height - (3 * virus_height) - nurse_height)\n\t\tnumber_rows = available_space_y // (2 * virus_height)\n\n\t\t# create full fleet of viruses\n\t\tfor row_number in range(number_rows):\n\t\t\tfor virus_number in range(number_viruses_x):\n\t\t\t\tself._create_virus(virus_number, row_number)", "def __init__(self, nombre):\n self.nombre = nombre\n self.edad = 0\n self.vivo = True", "def __init__(self,nombre,droga,obraSocial,plan,importe):\n self.nombreMedicamento = nombre\n self.droga = droga\n self.obraSocial = obraSocial\n self.plan = plan\n self.importe = importe\n self.fechaVenta = datetime.now().date()\n self.horaVenta = datetime.now().time()", "def __init__(self, *args , **kwargs):\n\t\t#RRT Planner Service \n\t\tself.planning_srv_rrt = rospy.Service('rrt_planner_service', Planner, self.plan)", "def __init__(self):\n\n self.name = self.get_param(\"name\", \"rosweld\")\n rospy.init_node(self.name)\n\n self.publishers = {}", "def __init__(self, vehicles: List[Vehicle]):\n self.orders = []\n self.vehicles = vehicles", "def __init__(self, **kwargs):\n\n self._car_factory = CarFactory(type=kwargs.get(\"type\"), car_type=kwargs.get(\"car_type\"),doors=kwargs.get(\"doors\"),liters=kwargs.get(\"liters\"),fuel_type=kwargs.get(\"fuel_type\"))\n self.inventory.append(self._car_factory.get_car())", "def make_viso_instance(self):\n pass", "def __init__(self, nombre, cocina, clientes):\r\n self.nombre=nombre\r\n self.cocina=cocina\r\n self.clientes=clientes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If this is an airflow run, generate a name to reflect it still awaits a sync. Otherwise, generate a human friendly name for the run.
def _generate_run_name(self, af_context: Optional[AirflowTaskContext]) -> str: if af_context is not None: return f"Airflow-run-await-sync_{self.run_uid}" return get_random_name(seed=self.run_uid)
[ "def _get_task_name(task):\n\n if task.is_generate_resmoke_task:\n return task.generated_task_name\n\n return task.name", "def taskName(self, name):\n\n return self.uniqueName(name)", "def get_name(self):\n try:\n return self.task.split('.')[-1]\n except NotImplementedError:\n return '%s: No task specified.' % self.__class__.__name__", "def taskname(task):\n task_dict = all_tasks.get(task)\n if task_dict:\n return task_dict.get('display_name')\n task = task.replace('_', ' ')\n task_splitted = task.split(':')\n if len(task_splitted) == 2:\n return task_splitted[0] + ' (' + task_splitted[1] + ')'\n return task.replace(':', ': ')", "def name(self):\n if self.ready():\n return self._result['task']\n raise AttributeError", "async def name_thread(history: str, personality: str, current_name: str = None) -> str:", "def gen_task_name(app, name, module_name):\n module_name = module_name or '__main__'\n try:\n module = sys.modules[module_name]\n except KeyError:\n # Fix for manage.py shell_plus (Issue #366)\n module = None\n\n if module is not None:\n module_name = module.__name__\n # - If the task module is used as the __main__ script\n # - we need to rewrite the module part of the task name\n # - to match App.main.\n if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:\n # - see comment about :envvar:`MP_MAIN_FILE` above.\n module_name = '__main__'\n if module_name == '__main__' and app.main:\n return '.'.join([app.main, name])\n return '.'.join(p for p in (module_name, name) if p)", "def _get_condensed_name(self) -> str:\n return f\"{self.get_datetime()}_{self.constellation.name}_{self.product_type.name}_{self.band_combi.name}_{self._job_id}\"", "def name(self) -> str:\n return f\"{self._schedule_name} Schedule\"", "def _generateName(self):\n return \"PoolThread-{}-{}\".format(self.name or id(self), self.workers)", "def get_activity_name(activityType):\n\treturn \"activity_\" + activityType", "def get_build_dir_name(self) -> str:\n return f\"{self.launch_time}-{self.name}\"", "def get_task_name(self):\n\t\ttask_name = input(\"Task Name: \")\n\t\tif len(task_name) == 0:\n\t\t\tinput(\"Task Name should be at least one character long. Press enter to continue.\")\n\t\t\tself.get_task_name()\n\t\telse:\n\t\t\tself.task_name = task_name", "def get_output_basename(self):\n cumf_base_name = self.options[\"full_task_name\"]\n cumf_base_name = re.sub(r\"[() ]\", r\"_\", cumf_base_name)\n if cumf_base_name.endswith(\"_\"):\n cumf_base_name = cumf_base_name[:-1]\n return \"ana.\" + cumf_base_name", "def generate_name(self):\n return self._generate_name", "def update_experiment_name(self):\n n_list = [task['n'] for task in self.preprocessed_task_list]\n if any([n > 1 for n in n_list]):\n # Rabi measurement with more than one pulse\n self.experiment_name += f'-n'\n if len(np.unique(n_list)) == 1:\n # all tasks have the same n; add the value of n to the\n # experiment name\n self.experiment_name += f'{n_list[0]}'", "def generate_default_name():\n return \"{}{}\".format(os.getpid(), str(time.time()).replace(\".\", \"\"))", "def factory_name():\r\n return 'base-task'", "def generate_unique_job_name(self, name='no_name_job'):\n name = os.path.basename(name)\n return \"_\".join([os.path.split(name)[1], self.get_jobname_hash()])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads the result of the run by name, default is the default result name.
def load_from_result(self, name=RESULT_PARAM, value_type=None): # type: (Optional[str], Optional[str]) -> Any return self.run_executor.result.load(name, value_type)
[ "def load_by_name(self, name):\n return self.load(self.names.get(name, 0))", "def get_result(self, output_name):\n return self._results[output_name]", "def load(self):\n results_fn = os.path.join(self.full_path, self.output_filename)\n self.results = rlpy.Tools.results.load_single(results_fn)\n return self.results", "def load_result_from_experiment(fbase):\n filename = fbase + \"results_dict.pkl\"\n with open(filename, \"rb\") as fp:\n return pickle.load(fp)", "def get_result(self, response, result_name):\n jr = json.loads(response.text)\n if 'success' in jr and jr['success'] == True:\n if 'result' in jr and result_name in jr['result']:\n return(jr['result'][result_name])\n return(0)", "def load_all_results():\n directory = os.getcwd() + \"\\\\result\"\n\n return __load_results(directory)", "def __getitem__(self, name):\n return self.job(name=name)", "def test_results(self, testname):\n return self._tests[testname]", "def get_outcome(self, name):\n return self.all_outcomes.get(name)", "def persist_result(result_data, analysis_result_id, result_name):\n result = ReadStatsResult(**result_data)\n persist_result_helper(result, analysis_result_id, result_name)", "def security_result(name):\n filename = Path(__file__).parent / \"security\" / f\"{name}.out\"\n with filename.open() as f:\n return f.read()", "def test_pipeline_processor_get_eval_result_by_ref_name(self):\n\n nlp = Pipeline[DataPack](enforce_consistency=True)\n reader = DummySentenceReaderOne()\n nlp.set_reader(reader)\n dummy = DummyEvaluatorFour()\n nlp.add(dummy, ref_name=\"ref_dummy\")\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n pack = nlp.process(data_path)\n self.assertEqual(\n nlp.get_component(\"ref_dummy\").get_result(),\n \"Reference name of DummyEvaluatorFour is ref_dummy\",\n )", "def load_name(self) -> str:\n return self._definition.parameters.loadName", "def get_value(self, resultpath, default=None):\n fname = os.path.join(resultpath, self.filename)\n with open(fname) as f:\n for line in f:\n m = re.search(self.regex, line)\n if m:\n return self.parser(m.group('value'))\n return default", "def read_single_result():\n # TODO: your code here\n # example return values\n return \"some_table1\", \"some_table2\", \"p1\", \"p2\", \"runtime\"", "def load_by_name(cls, name):\n return cls.load(os.path.join(\n os.path.dirname(__file__), \"dictionaries\", name + \".json\"))", "def GetValue(self, name, subresult_name=''):\n\n\t\t# If we specify a subresults\n\t\tif len(subresult_name) > 0:\n\n\t\t\tif not self._subresult:\n\t\t\t\terror(\"No subresult from which \\\n\t\t\t\t\t\tI could get the {} one...\".format(subresult_name))\n\t\t\t\treturn None\n\n\t\t\tsub = self._subresult[subresult_name]\n\n\t\t\tif not sub:\n\t\t\t\terror(\"No subresult from which \\\n\t\t\t\t\t\tI could get the {} one...\".format(subresult_name))\n\t\t\t\treturn None\n\n\t\t\treturn sub.GetValue(name)\n\n\t\t# self._map existes only for AnnaResults w/o subresults\n\t\tif self._map is not None:\n\t\t\ttry:\n\t\t\t\tvalue = self._map[name][self._index.kValue]\n\t\t\texcept KeyError:\n\t\t\t\treturn None\n\t\t\treturn value\n\n\t\t# Mean method (by default)\n\t\tif self._resultMergingMethod == self._mergingMethod.kMean:\n\n\t\t\tmean, sm = 0., 0.\n\n\t\t\tfor rname in self.subresults:\n\t\t\t\tr = self.subresults[rname]\n\t\t\t\tif self.IsIncluded(r.GetName()) is True and r.HasValue(name) > 0:\n\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tThe weight for each subresult is the same (=1.), since the data sample\n\t\t\t\t\tis always the same and just the fit function changes among subresults.\n\t\t\t\t\tWe can also weight subresults with :\n\t\t\t\t\t\te = r.GetErrorStat(name)/math.sqrt(r.GetValue(name))\n\n\t\t\t\t\tThe math.sqrt(r>GetValue(name)) was not there before and was introduced\n\t\t\t\t\tto remove the dependence of the error with the number of particule\n\t\t\t\t\textracted (valid only for counts results with different data samples\n\t\t\t\t\tand not for <pt>...)\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t# Check fit status\n\t\t\t\t\tfitStatus = r.GetValue(\"FitResult\") \\\n\t\t\t\t\t\tif r.GetValue(\"FitResult\") is not None else 0\n\t\t\t\t\tcovStatus = r.GetValue(\"CovMatrixStatus\") \\\n\t\t\t\t\t\tif r.GetValue(\"CovMatrixStatus\") is not None else 3\n\t\t\t\t\tchi2 = r.GetValue(\"FitChi2PerNDF\") \\\n\t\t\t\t\t\tif r.GetValue(\"FitChi2PerNDF\") is not None else 1\n\n\t\t\t\t\t# Select only Fit that converge\n\t\t\t\t\tif (fitStatus != 0 and fitStatus != 4000) or chi2 > 2.5:\n\t\t\t\t\t\tdebug(\"Fit {} excluded (FitResult = {} | Cov. Mat. = {})\\n\".format(\n\t\t\t\t\t\t\tr.GetName(), fitStatus, covStatus)\n\t\t\t\t\t\t)\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t# weight and error\n\t\t\t\t\tw = r.weigth\n\t\t\t\t\tdebug(\" --- Weight for subResults {} = {} \\n\".format(r.GetName(), w))\n\t\t\t\t\t# If the error is not correct we skip the subresult\n\t\t\t\t\tif r.GetErrorStat(name) is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif r.GetErrorStat(name) < 0.0 or fitStatus != 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tmean += w * r.GetValue(name)\n\t\t\t\t\tsm += w\n\n\t\t\t# Case something went wrong\n\t\t\ttry:\n\t\t\t\tassert sm != 0.\n\t\t\texcept AssertionError:\n\t\t\t\treturn None\n\n\t\t\treturn mean / sm\n\n\t\telse:\n\t\t\tsm = 0.\n\t\t\tfor rname in self.subresults:\n\t\t\t\tr = self.subresults[rname]\n\t\t\t\tif self.IsIncluded(r.GetName()) is True and r.HasValue(name) > 0:\n\t\t\t\t\tsm += r.GetValue(name)\n\n\t\t\t# Case something went wrong\n\t\t\ttry:\n\t\t\t\tassert sm != 0.\n\t\t\texcept AssertionError:\n\t\t\t\treturn None\n\n\t\t\treturn sm", "def result(self):\n return self.instance.get_task_result(self.name)", "def LoadResultContainer(resultPath, resultId, user, videoId):\n pathToIndividualStatistic = os.path.join(PATH_TO_STATISTIC_RESULTS,\n 'individual',\n resultId)\n resultContainerDumpPath = \\\n '{}.dump'.format(pathToIndividualStatistic)\n rc = Load(resultContainerDumpPath)\n if rc is not None:\n rc.isNew = False\n else:\n rc = ResultContainer(resultPath, resultId, user, videoId)\n rc.isNew = True\n return rc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add object to end of the array.
def append(self, obj): if self._n == self._capacity: # not enough room self._resize(2 * self._capacity) # so double capacity self._A[self._n] = obj self._n += 1
[ "def append(self, obj):\n if self._n == self._capacity: # Not enough room, double capacity\n self._resize(2 * self._capacity)\n self._A[self._n] = obj\n self._n += 1", "def append(self, object) :\n self.objects.append(object)", "def append(self, value: object) -> None:\n # check to see if there is room\n if self.size == self.capacity: # if full\n self.resize(self.capacity * 2) # double the capacity \n # place the value at the end of the size\n self.data[self.size] = value\n # add to the size of the array\n self.size += 1", "def append(self, value: object) -> None:\n if self.size == self.capacity:\n self.resize(self.capacity * 2)\n self.data[self.size] = value\n self.size += 1\n return", "def append(self, value):\n # TODO: Don't allow multiple values as parameter\n\n if (self.index + 1) == self.size:\n # Current array is full -> extend\n self.extend()\n self.array[self.index + 1] = value\n self.index += 1", "def append(self, item):\n if len(self) < self.maxSize:\n self.array[len(self)] = item\n else:\n raise IndexError(\"List is full\")", "def add_element(self, value):\n self.idx = (self.idx + 1) % self.buffer_size\n self.array[self.idx] = value", "def _insert(self, obj, index):\n\n if self._n == self._capacity:\n self._resize(2 * self._capacity)\n self._n += 1\n for k in reversed(xrange(index, self._n)): # Goes backwards through the list to move the values along in the array\n self._A[k] = self._A[k-1]\n\n self._A[index] = obj", "def addLastArray(*args, **kwargs):\n \n pass", "def addElement(self, element):\n self.rawArray_.append(element)", "def _AddObject(self, _id, obj):\n if self._vbo is None:\n return\n index = -1\n if len(self._empty_indices) > 0:\n index = self._empty_indices.pop()\n elif self._max_index < self._data_size - 1:\n self._max_index += 1\n index = self._max_index\n if index > 0:\n values = self.__descToArray(obj)\n num_values = len(values)\n self._vbo[\n index * num_values:(index + 1) * num_values] = narray(values, \"f\")\n self._indices[_id] = index\n else:\n self._vbo = None\n # trigger _BuildData for next draw", "def append(self, value):\n self.data.insert(self.tail() + 1, value)", "def append(self, cell_obj):\r\n\r\n assert isinstance(cell_obj, Cell)\r\n self.cell_list = np.append(self.cell_list, cell_obj)", "def add(self, value):\r\n self.data.append(value)\r\n if len(self.data) > self.limit:\r\n self.data = self.data[1:]", "def append(self, elem):\n if self._max == self._size:\n raise Exception(\"Full list\")\n self._list[self._size] = elem\n self._size += 1", "def append(self, elem):\n self.vec.append(elem)", "def append(self, item):\n if len(self) == 0:\n # Special case, we make this the current item\n self.index = 0\n\n self.items.append(item)", "def add_last(self, e):\n if self._size == len(self._data): # storage is full\n self._resize(2 * len(self._data)) # double the capacity\n back = (self._front + self._size) % len(self._data) # get index of end of deuqe\n self._data[back] = e\n self._size += 1", "def append(self,obj):\n if obj.getName() is not None:\n if obj.getName() not in self.__objCatalog:\n # self.__objNameList is keeping track of object order here -- \n self.__objNameList.append(obj.getName())\n self.__objCatalog[obj.getName()]=obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get access to optimizer object
def optimizer(self) -> OptimizerBase: return self._optimizer
[ "def optimizer(self):\n return self.optimizers[0]", "def optimizer(self):\n return self._optimizer.__class__.__name__", "def get_optimizer(optimizer, **kwargs):\n return optimizers[optimizer](**kwargs)", "def make_optimizer(self):\n raise NotImplementedError", "def get_optimizer(self, optimizer=None):\n from tframe.optimizers.optimizer import Optimizer\n\n if optimizer is None:\n if any([self.optimizer is None, self.learning_rate is None]):\n tfr.console.show_status('Optimizer not defined.', '!!')\n return None\n\n optimizer = self.optimizer\n tfr.console.show_status(\n 'Optimizer defined in trainer hub is used.', '++')\n\n return Optimizer.get_optimizer(optimizer)", "def get_optimizer(optimizer_name='Adam'):\n optimizer_name = optimizer_name.capitalize()\n return getattr(torch.optim, optimizer_name)", "def get_optimizer(model: any) -> str:\n pass", "def test_optimizer(self):\n from torch.optim import Adagrad, Adam\n\n from class_resolver.contrib.torch import optimizer_resolver\n\n self.assertEqual(Adagrad, optimizer_resolver.lookup(\"adagrad\"))\n self.assertEqual(Adam, optimizer_resolver.lookup(\"adam\"))\n self.assertEqual(Adam, optimizer_resolver.lookup(None))", "def __init_optimization(self, optimizer):\n self.build_loss()\n self.optimizer = optimizer\n self.train_op = self.optimizer.minimize(\n self.loss, name='optimization')", "def _optimize(self):\n return self", "def _init_optimizer(self, params=None):\n pass", "def setOptimizer(engine,optimizer):\n engine.optimizer = optimizer", "def parallel_optimizer(self):\n return self.param_info.parallel_optimizer", "def get_optimizer_by_id(self, optimizer_id):\r\n optimizer_handle = OptimizerHandle(Id=optimizer_id)\r\n optimizer_info = self._optimizer_monitoring_stub.GetOptimizerInfo(optimizer_handle)\r\n optimizer_proxy = self._optimizer_factory.connect_to_existing_remote_optimizer(optimizer_info)\r\n return optimizer_proxy", "def get_execution_plan(self):", "def test_build_optimizer_adam(self):\n opt_config = {\"name\": \"Adam\", \"learning_rate\": 1.0e-5}\n opt_get = optimizer.build_optimizer(opt_config)\n assert isinstance(opt_get, tf.keras.optimizers.Adam)", "def get_optimizer(hparams):\n if hparams['optimizer'] == \"Adam\":\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=hparams[\"learning_rate\"])\n elif hparams['optimizer'] == \"Adadelta\":\n assert(hparams[\"learning_rate\"] == 1.0), \"Set learning_rate to 1.0\"\n optimizer = tf.keras.optimizers.Adadelta(\n learning_rate=hparams['learning_rate'])\n else:\n raise ValueError(\"Supported Optimizer is either Adam or Adagrad\")\n \n if hparams[\"mixed_precision\"]:\n return tf.train.experimental.enable_mixed_precision_graph_rewrite(\n optimizer, \"dynamic\")\n else:\n return optimizer", "def get_optimizer(optimizer_config, learning_rate):\n if optimizer_config.TYPE == 'AdamOptimizer':\n opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n elif optimizer_config.TYPE == 'MomentumOptimizer':\n opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)\n else:\n raise ValueError('Optimizer %s not supported. Only support the following'\n 'optimizers: AdamOptimizer, MomentumOptimizer .')\n return opt", "def make_optimizer(self):\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\n if self.flags.optim == 'Adam':\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n elif self.flags.optim == 'RMSprop':\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n elif self.flags.optim == 'SGD':\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n else:\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\n return op" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the output encoder decoder.
def output_encoder_decoder(self) -> Optional[LabelEncoderDecoder]: return self._output_encoder_decoder
[ "def _create_decoder(self):\n params = self.params['decoder_params']\n return self.params['decoder'](params=params, mode=self.mode, model=self)", "def getRightEncoder(self) -> wpilib.Encoder:\n return self.right_encoder", "def _get_conv_decoder(self, decoder_phase):\n if decoder_phase == \"past\":\n conv_decoder = self.conv_decoder\n elif decoder_phase == \"future\":\n conv_decoder = self.conv_decoder_future\n else:\n raise ValueError(\"Unknown decoder.\")\n\n return conv_decoder", "def GetDecoder(cls, encoding_method):\n encoding_method = encoding_method.lower()\n decoder = cls._decoders.get(encoding_method, None)\n if not decoder:\n return None\n\n return decoder()", "def with_outputenc(self, outputenc):\n return OutputEncodingProxy(self, outputenc)", "def get_decoder(content_type):\n return decoders.get(content_type, loads)", "def getRaw(self):\n return hal.getEncoderRaw(self.encoder)", "def decode(self, input, final=False):\n if (self._decoder is None or\n self._decoder._encoding\n != self.encoding(incoming=True)):\n try:\n self._decoder = codecs.getincrementaldecoder(\n self.encoding(incoming=True))(\n errors=self.encoding_errors)\n self._decoder._encoding = self.encoding(incoming=True)\n except LookupError as err:\n assert (self.encoding(incoming=True)\n != self._default_encoding), err\n self.log.info(err)\n self._env_update({'CHARSET': self._default_encoding})\n self._decoder = codecs.getincrementaldecoder(\n self.encoding(incoming=True))(\n errors=self.encoding_errors)\n self._decoder._encoding = self.encoding(incoming=True)\n # interupt client session to notify change of encoding,\n self._display_charset_err(err)\n self.display_prompt()\n return self._decoder.decode(input, final)", "def build_decoder_output(self, X):\n decoder_outlayer = layers.Conv2DTranspose(filters=1,\n kernel_size=self.num_kernel[0],\n strides=self.num_strides[0],\n padding=\"same\",\n name=\"Decoder_output\")\n X = decoder_outlayer(X)\n #decoder_final = layers.Activation(\"sigmoid\", name=\"Decoder_activation\")\n return X # decoder_final(X)", "def get_decoder(cls, config: DecoderConfig, prefix: str) -> 'Decoder':\n config_type = type(config)\n if config_type not in cls.__registry:\n raise ValueError('Unsupported decoder configuration %s' % config_type.__name__)\n decoder_cls, suffix = cls.__registry[config_type]\n # TODO: move final suffix/prefix construction logic into config builder\n return decoder_cls(config=config, prefix=prefix + suffix)", "def get_resnet_decoder(output_shape: Tuple[int, int, int], # output image shape [B, H, W]\n decoder_base_channels: int = 1024, # For conv models\n decoder_channel_multiplier: int = 0.5, # Decoding shrinks channels\n dense_normalization: str = 'none',\n conv_normalization: str = 'none',\n layer_modifier: str = 'spectralnorm', # 'gated', 'spectralnorm' or 'coordconv'\n norm_first_layer: bool = True,\n norm_last_layer: bool = False,\n activation: str = 'relu',\n name: str = 'decoder',\n **unused_kwargs):\n resnet_size_dict = {\n 1024: Resnet1024Decoder,\n 512: Resnet512Decoder,\n 256: Resnet256Decoder,\n 128: Resnet128Decoder,\n 64: Resnet64Decoder,\n 32: Resnet32Decoder,\n 28: Resnet28Decoder,\n }\n image_size = output_shape[-1]\n\n # Mega-dict that curried the appropriate decoder.\n # The returned decoder still needs the CTOR, eg: dec(input_size)\n net_map = {\n 'resnet': {\n # True for gated, False for non-gated\n 'gated': functools.partial(resnet_size_dict[image_size],\n output_chans=output_shape[0],\n base_channels=decoder_base_channels,\n channel_multiplier=decoder_channel_multiplier,\n conv_normalization_str=conv_normalization,\n dense_normalization_str=dense_normalization,\n norm_first_layer=norm_first_layer,\n norm_last_layer=norm_last_layer,\n activation_str=activation,\n dense_layer_fn=GatedDense,\n conv_layer_fn=functools.partial(GatedConv2d, layer_type=nn.Conv2d)),\n 'spectralnorm': functools.partial(resnet_size_dict[image_size],\n output_chans=output_shape[0],\n base_channels=decoder_base_channels,\n channel_multiplier=decoder_channel_multiplier,\n conv_normalization_str=conv_normalization,\n dense_normalization_str=dense_normalization,\n norm_first_layer=norm_first_layer,\n norm_last_layer=norm_last_layer,\n activation_str=activation,\n dense_layer_fn=SNLinear,\n conv_layer_fn=SNConv2d),\n 'sine': functools.partial(resnet_size_dict[image_size],\n output_chans=output_shape[0],\n base_channels=decoder_base_channels,\n channel_multiplier=decoder_channel_multiplier,\n conv_normalization_str=conv_normalization,\n dense_normalization_str=dense_normalization,\n norm_first_layer=norm_first_layer,\n norm_last_layer=norm_last_layer,\n activation_str=activation,\n dense_layer_fn=SineLinear,\n conv_layer_fn=SineConv2d),\n 'coordconv': functools.partial(resnet_size_dict[image_size],\n output_chans=output_shape[0],\n base_channels=decoder_base_channels,\n channel_multiplier=decoder_channel_multiplier,\n conv_normalization_str=conv_normalization,\n dense_normalization_str=dense_normalization,\n norm_first_layer=norm_first_layer,\n norm_last_layer=norm_last_layer,\n activation_str=activation,\n dense_layer_fn=nn.Linear, # dense doesn't need coords\n conv_layer_fn=CoordConv),\n 'none': functools.partial(resnet_size_dict[image_size],\n output_chans=output_shape[0],\n base_channels=decoder_base_channels,\n channel_multiplier=decoder_channel_multiplier,\n conv_normalization_str=conv_normalization,\n dense_normalization_str=dense_normalization,\n norm_first_layer=norm_first_layer,\n norm_last_layer=norm_last_layer,\n activation_str=activation,\n dense_layer_fn=nn.Linear,\n conv_layer_fn=nn.Conv2d)\n },\n }\n fn = net_map[\"resnet\"][layer_modifier]\n print(\"using resnet with {} modifier for {}\".format(layer_modifier, name))\n return fn", "def build_decoder(self):\n\n dec_input = self.build_decoder_input()\n dec_dense = self.build_decoder_dense(dec_input)\n dec_reshape = self.build_decoder_reshape(dec_dense)\n dec_conv = self.build_decoder_convs(dec_reshape)\n dec_output = self.build_decoder_output(dec_conv)\n\n self.decoder = Model(dec_input, dec_output,\n name='Decoder')", "def get_decoder(encoding, *args, **kwargs):\n def _get_decoder_class():\n if encoding == AMF0:\n try:\n from cpyamf import amf0\n except ImportError:\n from pyamf import amf0\n\n return amf0.Decoder\n elif encoding == AMF3:\n try:\n from cpyamf import amf3\n except ImportError:\n from pyamf import amf3\n\n return amf3.Decoder\n\n raise ValueError(\"Unknown encoding %r\" % (encoding,))\n\n return _get_decoder_class()(*args, **kwargs)", "def _get_seq2seq_encoder(self, name=\"default\", fallback_behavior: str=None):\n if fallback_behavior is None:\n fallback_behavior = self.seq2seq_encoder_fallback_behavior\n if name in self.seq2seq_encoder_layers:\n # If we've already created this encoder, we can just return it.\n return self.seq2seq_encoder_layers[name]\n if name not in self.seq2seq_encoder_params:\n # If we haven't, we need to check that we _can_ create it, and decide _how_ to create\n # it.\n if fallback_behavior == \"crash\":\n raise ConfigurationError(\"You asked for a named seq2seq encoder (\" + name + \"), \"\n \"but did not provide parameters for that encoder\")\n elif fallback_behavior == \"use default encoder\":\n name = \"default\"\n params = deepcopy(self.seq2seq_encoder_params[name])\n elif fallback_behavior == \"use default params\":\n params = deepcopy(self.seq2seq_encoder_params[\"default\"])\n else:\n raise ConfigurationError(\"Unrecognized fallback behavior: \" + fallback_behavior)\n else:\n params = deepcopy(self.seq2seq_encoder_params[name])\n if name not in self.seq2seq_encoder_layers:\n # We need to check if we've already created this again, because in some cases we change\n # the name in the logic above.\n encoder_layer_name = name + \"_encoder\"\n new_encoder = self.__get_new_seq2seq_encoder(params, encoder_layer_name)\n self.seq2seq_encoder_layers[name] = new_encoder\n return self.seq2seq_encoder_layers[name]", "def get_encoder(encoding, *args, **kwargs):\n def _get_encoder_class():\n if encoding == AMF0:\n try:\n from cpyamf import amf0\n except ImportError:\n from pyamf import amf0\n\n return amf0.Encoder\n elif encoding == AMF3:\n try:\n from cpyamf import amf3\n except ImportError:\n from pyamf import amf3\n\n return amf3.Encoder\n\n raise ValueError(\"Unknown encoding %r\" % (encoding,))\n\n return _get_encoder_class()(*args, **kwargs)", "def encoder_inference(self, encoder_inputs, encoder_states):\n return tf.keras.models.Model(encoder_inputs, encoder_states)", "def get_mux_out_wire(self):\r\n return self.out.get_wire(0)", "def build_decoder_input(self):\n decoder_input_layer = layers.Input(\n shape=(self.latent_dim,),\n name=\"decoder_input\")\n\n return decoder_input_layer", "def decoder(self, decoder):\n\n self._decoder = decoder" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A context manager to instruct network that it enter training mode. While in training mode, the network enables regularization
def training_mode(self) -> ContextManager: if self._verbose_logging: logger.debug(f"Entering training mode, enabling regularization") self._enabled_regularization = True yield self._enabled_regularization = False if self._verbose_logging: logger.debug(f"Leaving training mode, disabling regularization")
[ "def train(self) -> None:\r\n\r\n self.training = True", "def set_train(self):\n BaseModule.train_flag = True", "def set_train(self):\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode,\n # but _dygraph_tracer() can not be called in static mode\n if in_dygraph_mode():\n framework._dygraph_tracer().train_mode()\n # Layer-level setting\n self.training = True\n for layer in self.sublayers():\n layer.training = True", "def train(self):\n self.train_state = True", "def do_training():\n train_cls = Train()\n train_cls.run()", "def train(self):\n TM = TrainingMode()\n\n \"\"\"\n Training Arguments\n \"\"\"\n train_args = {'use_global_valid': False,\n 'use_custom_obj': False,\n 'show_importance': False,\n 'save_final_pred': True,\n 'save_final_pred_train': False,\n 'save_cv_pred': True,\n 'save_cv_pred_train': False,\n 'save_csv_log': True,\n 'loss_fuc': self.rmse,\n 'append_info': 'Yuanan Bike'}\n\n \"\"\"\n Cross Validation Arguments\n \"\"\"\n cv_args = {'n_cv': 10}\n\n \"\"\"\n Base Parameters\n \"\"\"\n base_parameters = self.get_base_params('dnn')\n\n \"\"\"\n Auto Train with Logs of Boost Round\n \"\"\"\n pg_list = [\n [['learning_rate', [0.05]]]\n ]\n train_seed_list = [68]\n cv_seed_list = [95]\n TM.auto_train_boost_round('dnn', num_boost_round=10, n_epoch=1, full_grid_search=True,\n train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n base_parameters=base_parameters, parameter_grid_list=pg_list,\n save_final_pred=True, train_args=train_args, cv_args=cv_args)\n\n \"\"\"Train Different Rounds\"\"\"\n # num_boost_round_list = [83, 85, 87]\n # self.train_diff_round('xgb', TM, num_boost_round_list=num_boost_round_list, n_epoch=1, full_grid_search=True,\n # train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n # base_parameters=base_parameters, parameter_grid_list=pg_list, save_final_pred=True,\n # train_args=train_args, cv_args=cv_args)", "def train(self):\n mse = train_and_score(self.network)\n self.accuracy = 1/ mse\n self.mse = mse", "def set_eval(self):\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode,\n # but _dygraph_tracer() can not be called in static mode\n if in_dygraph_mode():\n framework._dygraph_tracer().eval_mode()\n # Layer-level setting\n self.training = False\n for layer in self.sublayers():\n layer.training = False", "def training_pool(self):", "def set_training_mode(self, mode: bool) -> None:\n self.actor.set_training_mode(mode)\n self.critic.set_training_mode(mode)\n self.training = mode", "def _prepare_for_train(self, lGraph, lGraph_vld):\n traceln('ECN Training ', self.sName)\n traceln(\"\\t- computing features on training set\")\n traceln(\"\\t\\t #nodes=%d #edges=%d \" % Graph.getNodeEdgeTotalNumber(lGraph))\n chronoOn()\n \n lX, lY = self.get_lX_lY(lGraph)\n self._computeModelCaracteristics(lX) # we discover here dynamically the number of features of nodes and edges\n # self._tNF_EF contains the number of node features and edge features\n traceln(\"\\t\\t %s\" % self._getNbFeatureAsText())\n traceln(\"\\t [%.1fs] done\\n\" % chronoOff())\n nb_class = len(lGraph[0].getLabelNameList()) #Is it better to do Y.shape ?\n traceln(\"\\t- %d classes\" % nb_class)\n \n traceln(\"\\t- retrieving or creating model...\")\n\n self.model_config['node_dim'] = self._tNF_EF[0]\n self.model_config['edge_dim'] = self._tNF_EF[1]\n self.model_config['nb_class'] = nb_class\n\n if False: \n with open ('linear_reg', 'wb') as save_file:\n pickle.dump((lX,lY), save_file, pickle.HIGHEST_PROTOCOL)\n \n #This converts the lX,lY in the format necessary for GCN Models\n gcn_graph = self.convert_lX_lY_to_GCNDataset(lX,lY,training=True)\n\n #Save the label Binarizer for prediction usage\n fd_lb =open(self.getlabelBinarizerFilename(),'wb')\n pickle.dump(self.labelBinarizer,fd_lb)\n fd_lb.close()\n\n #TODO Save the validation set too to reproduce experiments\n random.shuffle(gcn_graph)\n \n if lGraph_vld:\n gcn_graph_train = gcn_graph\n lX_vld, lY_vld = self.get_lX_lY(lGraph_vld)\n gcn_graph_val = self.convert_lX_lY_to_GCNDataset(lX_vld, lY_vld, test=True)\n del lX_vld, lY_vld\n else:\n #Get a validation set from the training set\n split_idx = max(1, int(self.model_config['ratio_train_val'] * len(gcn_graph)))\n traceln(\" - using %d train graphs as validation graphs\" % split_idx)\n gcn_graph_train = []\n gcn_graph_val = []\n gcn_graph_val.extend(gcn_graph[:split_idx])\n gcn_graph_train.extend(gcn_graph[split_idx:])\n traceln(\"%d training graphs -- %d validation graphs\"%(len(gcn_graph_train), len(gcn_graph_val)))\n self._cleanTmpCheckpointFiles()\n\n return gcn_graph_train, gcn_graph_val", "def _check_network_mode(self, network, is_train):\n if self._backbone_is_train != is_train:\n network.set_train(is_train)\n self._backbone_is_train = is_train\n return network", "def _train(self):\n self._model.learn(total_timesteps=self._num_timesteps)", "def before_epoch(self):\n self.model.train()", "def train(self, mode: bool) -> 'NoTrainInceptionV3':\n return super().train(False)", "def enable_base_model_training(self):\n for layer in self.model.layers[:self.n_base_model_layers]:\n layer.trainable = True\n return None", "def train_model(self):\n retrieved_planner_type, retrieved_path, final_planner_type, final_path, num_paths, num_NN_paths = self.retrieved_and_final_path\n # record stats\n self.total_num_paths.append(num_paths)\n self.total_num_paths_NN.append(num_NN_paths)\n self.total_new_nodes.append(self.total_new_node)\n self.total_new_nodes_NN.append(self.total_new_node_NN)\n self.plan_times.append(self.plan_time)\n if retrieved_planner_type is None:\n self.plan_mode.append(0) # 0 for pfs, 1 for rr\n else:\n self.plan_mode.append(1)\n # depending on retrieved_planner_type and final_planner, train the network\n if (retrieved_planner_type is None and final_planner_type == PlannerType.NEURAL) \\\n or (retrieved_planner_type == PlannerType.NEURAL and final_planner_type == PlannerType.NEURAL):\n self.train_sample.append(0) # no path trained\n to_save = {}\n to_save['loss'] = self.losses\n to_save['total_num_paths'] = self.total_num_paths\n to_save['total_num_paths_NN'] = self.total_num_paths_NN\n to_save['plan_time'] = self.plan_times\n to_save['plan_mode'] = self.plan_mode\n to_save['total_new_node'] = self.total_new_nodes\n to_save['total_new_node_NN'] = self.total_new_nodes_NN\n to_save['train_sample'] = self.train_sample\n utility.save_info(to_save, self.model_path+'lightning_res.pkl')\n return\n rospy.loginfo('Lightning: Training Neural Network...')\n # receive obstacle information\n obs = rospy.wait_for_message('obstacles/obs', Float64Array)\n obs = obs.values\n obs_i = rospy.wait_for_message('obstacles/obs_i', Int32)\n obs_i = obs_i.data\n # if it is a new obs, add to the obs list\n if len(self.obs_i) == 0 or obs_i != self.obs_i[-1]:\n self.obs_i.append(obs_i)\n self.obs.append(obs)\n\n obs = torch.FloatTensor(obs)\n\n dataset, targets, env_indices = plan_general.transformToTrain(final_path, len(final_path), obs, obs_i)\n self.data_all += list(zip(dataset, targets, env_indices))\n self.num_trained_samples += len(targets)\n added_data = list(zip(dataset,targets,env_indices))\n bi = np.concatenate( (obs.numpy().reshape(1,-1).repeat(len(dataset),axis=0), dataset), axis=1).astype(np.float32)\n targets = np.array(targets)\n bi = self.normalize_func(bi)\n targets = self.normalize_func(targets)\n bi = torch.FloatTensor(bi)\n bt = torch.FloatTensor(targets)\n self.model.zero_grad()\n bi=utility.to_var(bi, self.device)\n bt=utility.to_var(bt, self.device)\n self.model.observe(bi, 0, bt)\n self.num_path_trained += 1\n # record the number of samples trained\n train_sample = len(dataset)\n self.train_sample.append(train_sample)\n # rehersal\n if self.num_path_trained % self.freq_rehersal == 0 and len(self.data_all) > self.batch_rehersal:\n rospy.loginfo('Lightning: Rehersal...')\n print('rehersal...')\n sample = random.sample(self.data_all, self.batch_rehersal)\n dataset, targets, env_indices = list(zip(*sample))\n dataset, targets, env_indices = list(dataset), list(targets), list(env_indices)\n obs = np.array(self.obs)\n bi = np.concatenate( (obs[env_indices], dataset), axis=1).astype(np.float32)\n bt = targets\n bi = torch.FloatTensor(bi)\n bt = torch.FloatTensor(bt)\n bi, bt = self.normalize_func(bi), self.normalize_func(bt)\n self.model.zero_grad()\n bi=utility.to_var(bi, self.device)\n bt=utility.to_var(bt, self.device)\n self.model.observe(bi, 0, bt, False) # train but don't remember\n # obtain the loss after training:\n loss = self.model.loss(self.model.forward(bi), bt)\n loss = loss.data.cpu()\n print(\"loss: %f\" % (loss))\n print('planner type: %d' % (final_planner_type))\n self.losses.append(loss.data.cpu().item())\n\n if self.num_path_trained % self.freq_save == 0:\n # save loss and planner type\n to_save = {}\n to_save['loss'] = self.losses\n to_save['total_num_paths'] = self.total_num_paths\n to_save['total_num_paths_NN'] = self.total_num_paths_NN\n to_save['plan_time'] = self.plan_times\n to_save['plan_mode'] = self.plan_mode\n to_save['total_new_node'] = self.total_new_nodes\n to_save['total_new_node_NN'] = self.total_new_nodes_NN\n to_save['train_sample'] = self.train_sample\n utility.save_info(to_save, self.model_path+'lightning_res.pkl')\n\n # write trained model to file\n utility.save_state(self.model, self.torch_seed, self.np_seed, self.py_seed, self.model_path+self.model_name)\n # notify planners to update the model\n msg = UInt8(0)\n rospy.loginfo('Lightning: Notify planner to update network...')\n if self.use_pfs:\n self._notify_update('pfs')\n if self.use_rr:\n self._notify_update('rr')", "def start_validation_mode(self, session):\n if self.do_infinite_training:\n LOGGER.error('Dataset is currently in \"infinite training\" mode. Only the training set can be accessed.')\n raise RuntimeError('Invalid training mode specified.')\n self.training_mode = TrainingMode.VALIDATION\n self.steps_in_current_mode = 0\n self.initialize_iterator(session)", "def train(self) -> None:\n torch.autograd.set_grad_enabled(True)\n self.model.train()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method preprocesses raw tweets to cut them from unnecessary info along with some aggegation operations
def preprocess_raw_tweets(self, raw_tweets): def wait_for_awhile(): wait = 10 time.sleep(wait) twts = list() for user_data in raw_tweets: try: recent_tweets = [twt for twt in user_data['tweets']] # Aggregate the tweets to create the document text = ' '.join([tw['text'] for tw in recent_tweets]) item = { 'raw_text': text, 'user_id': user_data['id'], 'len_text': len(text), 'n_tweets': len(recent_tweets), 'screen_name': user_data['screen_name'], 'lang': user_data['lang'], 'parent': self.account_name, } # do we already have this account in the db? # twt = db.tweets.find({'user_id': id, 'parent': screen_name}) # if we do, update the data else create a new entry # if twt.count() == 0: # store document print("New account:", user_data['screen_name'], user_data['id'], len(recent_tweets), user_data['lang']) twts.append(item) # else: # # update the existing account record # res = db.tweets.replace_one( # {'user_id': id, 'parent': screen_name}, item # ) # # result of the update # if res.matched_count == 0: # print("no match for id: ", id) # elif res.modified_count == 0: # print("no modification for id: ", id) # else: # print("replaced ", timeline[0]['user']['screen_name'], # id, len(recent_tweets), timeline[0]['lang']) except TwythonRateLimitError as e: wait_for_awhile() except TwythonAuthError as e: print(e) except: # Keep track of the ID that errored out print(" FAILED:", id) print("Unexpected error:", sys.exc_info()[0]) pass return twts
[ "def cleaner(tweet):\n\n cleaned_tweet = []\n cleaned_text = process(tweet.text)\n\n cleaned_tweet.append(tweet.id)\n cleaned_tweet.append(tweet.date)\n cleaned_tweet.append(tweet.text)\n cleaned_tweet.append(cleaned_text)\n cleaned_tweet.append(tweet.retweets)\n\n\n # Use hashtags and add them to the list\n hashtags = \"\".join([hashtag_item for hashtag_item in tweet.hashtags])\n hashtags = hashtags if hashtags != '' else '<UNK>'\n cleaned_tweet.append(hashtags.strip())\n\n # Use mentions .Will be needed later\n mentions = \"\".join([mention for mention in tweet.mentions])\n mentions = mentions if mentions != '' else '<UNK>'\n cleaned_tweet.append(mentions)\n\n\n cleaned_tweet.append(tweet.username)\n\n return cleaned_tweet", "def process_tweets(tweets, classify_tweet_type=True, extract_tweet_entities=True):\n\n for line in tweets:\n\n if classify_tweet_type is True:\n # classify tweet as retweet/mention/tweet\n if \"retweeted_status\" in line:\n line[\"TWEET_TYPE\"] = \"retweet\"\n elif len(line[\"entities\"][\"user_mentions\"]) > 0:\n line[\"TWEET_TYPE\"] = \"mention\"\n else:\n line[\"TWEET_TYPE\"] = \"tweet\"\n\n if extract_tweet_entities is True:\n # check if line contains a menetion, and if so, extract all users mentione\n tweeties = []\n line[\"TWEETIES\"] = \"\"\n if len(line[\"entities\"][\"user_mentions\"]) > 0:\n tweeties.extend(line[\"entities\"][\"user_mentions\"])\n line[\"TWEETIES\"] = \" \".join([user[\"screen_name\"] for user in tweeties])\n\n # check if line contains a hashtag, and if so, extact all hashtags\n hashtags = []\n line[\"HASHTAGS\"] = \"\"\n if len(line[\"entities\"][\"hashtags\"]) > 0:\n hashtags.extend(line[\"entities\"][\"hashtags\"])\n line[\"HASHTAGS\"] = \" \".join([tag[\"text\"] for tag in hashtags])\n\n # check if line contains a URL, and if so, extract all expanded URLS\n expanded_urls = []\n line[\"EXPANDED_URLS\"] = \"\"\n if len(line[\"entities\"][\"urls\"]) > 0:\n expanded_urls.extend(line[\"entities\"][\"urls\"])\n line[\"EXPANDED_URLS\"] = \" \".join(\n [url[\"expanded_url\"] for url in expanded_urls]\n )\n\n # check if line has lat/long, and if so, extract lat/long\n line[\"LATITUDE\"] = \"\"\n line[\"LONGITUDE\"] = \"\"\n if line[\"geo\"] is not None:\n line[\"LATITUDE\"] = line[\"geo\"][\"coordinates\"][0]\n line[\"LONGITUDE\"] = line[\"geo\"][\"coordinates\"][1]\n\n return tweets", "def _filter_tweet(self, tweet):\n if \"extended_tweet\" in tweet.keys():\n tweet[\"text\"] = tweet[\"extended_tweet\"][\"full_text\"]\n elif \"retweeted_status\" in tweet.keys() and \"full_text\" in tweet[\"retweeted_status\"].keys():\n tweet[\"text\"] = \"RT \" + tweet[\"retweeted_status\"][\"full_text\"]\n\n filtered_data = self._extract(tweet, TwitterFetcher.tweet_fields)\n filtered_data[\"user\"] = self._extract(tweet[\"user\"], TwitterFetcher.user_fields)\n filtered_data[\"CC\"] = self._get_location(tweet[\"user\"][\"location\"])\n filtered_data[\"social\"] = {\"topic\": self.topic, \"topic_id\": self.topic_id, \"user_id\": self.user_id}\n filtered_data[\"source\"] = self._get_source(tweet[\"source\"])\n self.redis.publish(f'twitter:stream', json.dumps(filtered_data))\n self._initialize_results(filtered_data)\n return filtered_data", "def clean_tweets(\n tweets, clean_tweet_text=True, clean_user_description=True, convert_date=True\n):\n\n for line in tweets:\n\n if clean_tweet_text is True:\n # remove tabs in tweet text\n if \"\\t\" in line[\"text\"]:\n line[\"text\"] = line[\"text\"].replace(\"\\t\", \"\")\n\n # remove new line in tweet text\n if \"\\n\" in line[\"text\"]:\n line[\"text\"] = line[\"text\"].replace(\"\\n\", \"\")\n\n # remove /r in tweet text\n if \"\\n\" in line[\"text\"]:\n line[\"text\"] = line[\"text\"].replace(\"\\n\", \"\")\n\n if clean_user_description is True:\n # check if each a user description is not blank\n if line[\"user\"][\"description\"] != None:\n\n # remove tabs in user description\n if \"\\t\" in line[\"user\"][\"description\"]:\n line[\"user\"][\"description\"] = line[\"user\"][\"description\"].replace(\n \"\\t\", \"\"\n )\n\n # remove /r in user description\n if \"\\n\" in line[\"user\"][\"description\"]:\n line[\"user\"][\"description\"] = line[\"user\"][\"description\"].replace(\n \"\\n\", \"\"\n )\n\n # remove new line in user description\n if \"\\r\" in line[\"user\"][\"description\"]:\n line[\"user\"][\"description\"] = line[\"user\"][\"description\"].replace(\n \"\\r\", \"\"\n )\n\n if convert_date is True:\n # format date/time\n line[\"CREATED_AT\"] = time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.strptime(line[\"created_at\"], \"%a %b %d %H:%M:%S +0000 %Y\"),\n )\n dt = line[\"CREATED_AT\"].split(\" \")\n d = dt[0].split(\"-\")\n t = dt[1].split(\":\")\n line[\"CREATED_AT_DATE_TIME\"] = datetime.datetime(\n int(d[0]), int(d[1]), int(d[2]), int(t[0]), int(t[1]), int(t[2])\n )\n\n return tweets", "def analyse_tweet(self,tweet):\n flag=False\n try:\n #extract date\n dat=date_extract(tweet)\n #find type of tweet\n tw_typ=tweet_type(tweet)\n #extract text of tweet\n text=tweet_text(tweet,tw_typ)\n\n #find the keys of tweet:\n kys=self.whichkeys(text)\n\n \n #remove blank tweets and tweets that have lost their date:\n flag= (dat!=None) and (text!=None)\n except:\n return None\n #for a tweet satisfying the above\n if flag:\n #new date considered\n if dat not in self.dates.keys():\n self.new_date(dat)\n #update information\n for key in kys:\n try:\n #sentiment of tweet\n s=self.sent(text)\n self.update_stats(dat,key,s)\n except:\n pass", "def preprocess_twitter(post):\n # TODO\n return post", "def analyze_tweet(tweet, results):\n\n ######################################\n # fields that are relevant for user-level and tweet-level analysis\n # count the number of valid Tweets here\n # if it doesn't have at least a body and an actor, it's not a tweet\n try: \n body = tweet[\"body\"]\n userid = tweet[\"actor\"][\"id\"].split(\":\")[-1]\n results[\"tweet_count\"] += 1\n except (ValueError, KeyError):\n if \"non-tweet_lines\" in results:\n results[\"non-tweet_lines\"] += 1\n return\n\n # count the number of tweets from each user\n if \"tweets_per_user\" in results:\n results[\"tweets_per_user\"][tweet[\"actor\"][\"id\"][15:]] += 1\n \n #######################################\n # fields that are relevant for the tweet-level analysis\n # ------------------> term counts\n # Tweet body term count\n if \"body_term_count\" in results:\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # count the occurences of different hashtags\n if \"hashtags\" in results:\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in tweet[\"twitter_entities\"][\"hashtags\"]:\n results[\"hashtags\"][h[\"text\"].lower()] += 1\n \n try:\n # count the occurences of different top-level domains\n if (\"urls\" in results) and (\"urls\" in tweet[\"gnip\"]):\n for url in tweet[\"gnip\"][\"urls\"]:\n try:\n results[\"urls\"][url[\"expanded_url\"].split(\"/\")[2]] += 1\n except (KeyError,IndexError,AttributeError):\n pass\n # and the number of links total\n if (\"number_of_links\" in results) and (\"urls\" in tweet[\"gnip\"]):\n results[\"number_of_links\"] += len(tweet[\"gnip\"][\"urls\"])\n except KeyError:\n pass\n \n # -----------> timelines\n # make a timeline of UTC day of Tweets posted\n if \"utc_timeline\" in results:\n date = tweet[\"postedTime\"][0:10]\n results[\"utc_timeline\"][date] += 1\n\n # make a timeline in normalized local time (poster's time) of all of the Tweets\n if \"local_timeline\" in results:\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n \n # ------------> mention results\n # which users are @mentioned in the Tweet\n if \"at_mentions\" in results:\n for u in tweet[\"twitter_entities\"][\"user_mentions\"]:\n # update the mentions with weight + 1 and \n # list all of the screennames (in case a name changes)\n if u[\"id_str\"] is not None:\n results[\"at_mentions\"][u[\"id_str\"]][\"weight\"] += 1 \n results[\"at_mentions\"][u[\"id_str\"]][\"screennames\"].update([u[\"screen_name\"].lower()])\n \n # count the number of times each user gets replies\n if (\"in_reply_to\" in results) and (\"inReplyTo\" in tweet):\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n # --------------> RTs and quote Tweet\n # count share actions (RTs and quote-Tweets)\n # don't count self-quotes or self-RTs, because that's allowed now\n if ((\"quote_of_user\" in results) or (\"RT_of_user\" in results)) and (tweet[\"verb\"] == \"share\"):\n # if it's a quote tweet\n if (\"quote_of_user\" in results) and (\"twitter_quoted_status\" in tweet[\"object\"]):\n quoted_id = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"id\"][15:]\n quoted_name = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"preferredUsername\"]\n if quoted_id != tweet[\"actor\"][\"id\"]:\n results[\"quote_of_user\"][quoted_id][\"weight\"] += 1 \n results[\"quote_of_user\"][quoted_id][\"screennames\"].update([quoted_name])\n # if it's a RT\n elif (\"RT_of_user\" in results):\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n if rt_of_id != tweet[\"actor\"][\"id\"]:\n results[\"RT_of_user\"][rt_of_id][\"weight\"] += 1 \n results[\"RT_of_user\"][rt_of_id][\"screennames\"].update([rt_of_name])\n\n # Tweet expended url content term count\n if \"url_content\" in results:\n try:\n urls = tweet[\"gnip\"][\"urls\"]\n except KeyError:\n urls = []\n url_content = \"\"\n for url in urls:\n try:\n expanded_url_title = url[\"expanded_url_title\"]\n if expanded_url_title is None:\n expanded_url_title = \"\"\n except KeyError:\n expanded_url_title = \"\"\n try:\n expanded_url_description = url[\"expanded_url_description\"]\n if expanded_url_description is None:\n expanded_url_description = \"\"\n except KeyError:\n expanded_url_description = \"\"\n url_content = url_content + \" \" + expanded_url_title + \" \" + expanded_url_description\n results[\"url_content\"].add(url_content)\n \n ############################################\n # actor-property qualities\n # ------------> bio terms\n if \"bio_term_count\" in results:\n if tweet[\"actor\"][\"id\"][:15] not in results[\"tweets_per_user\"]:\n try:\n if tweet[\"actor\"][\"summary\"] is not None:\n results[\"bio_term_count\"].add(tweet[\"actor\"][\"summary\"])\n except KeyError:\n pass\n \n # ---------> profile locations\n if \"profile_locations_regions\" in results:\n # if possible, get the user's address\n try:\n address = tweet[\"gnip\"][\"profileLocations\"][0][\"address\"]\n country_key = address.get(\"country\", \"no country available\")\n region_key = address.get(\"region\", \"no region available\")\n except KeyError:\n country_key = \"no country available\"\n region_key = \"no region available\"\n results[\"profile_locations_regions\"][country_key + \" , \" + region_key] += 1", "def preprocess_data(self):\n # Read the Input data \n self.read_data()\n\n # Initial preprocessing\n self.tweets = map(lambda x: x.split(\"\\r\\n\"), self.tweets) # Splitting the tweets into individual tweets\n self.tweets = self.tweets[0][1:]\n self.tweets = map(lambda x: x.split(\";\")[2], self.tweets) # Splitting each tweet with colon and extract the tweet message\n \n # Removing punctuation \n exclude = set(string.punctuation) # Make a set of punctuation to remove it from tweet\n self.tweets = map(lambda x:''.join(ch for ch in x if ch not in exclude).lower(), self.tweets) # Remove the punctuation from each tweet \n self.tweets = map(lambda x:' '.join(x.split()), self.tweets) # Remove extra spaces \n \n # Removing Stop Words \n stopWords = set(stopwords.words(\"english\")) # Removing Stop words \n self.tweets = map(lambda x:' '.join(word for word in x.split() if word not in stopWords), self.tweets)\n\n # Applying Stemming \n stemmer = PorterStemmer()\n self.tweets = map(lambda x: self.stem_message(x), self.tweets) # Convert each message to its base form after stemming \n\n print \"\\nIntermediate Data After Initial Preprocessing\"\n print self.tweets[:5]\n\n # Convert the tweets to bag of words representation\n self.countVectorizer = CountVectorizer(decode_error='ignore', \\\n stop_words='english', \\\n min_df=5, ngram_range=(1, 2)) # Extract uni-gram, bi-grams and tri-grams from the tweets \n self.bag_of_words = self.countVectorizer.fit_transform(self.tweets) # Convert each tweet into a vector \n\n print \"\\nTop 20 uni grams in the vocabulary_\"\n print sorted(dict((key,value) for key, value in self.countVectorizer.vocabulary_.iteritems() if key.count(' ')==0).items(), key=operator.itemgetter(1), reverse=True)[:20]\n\n print \"\\nTop 20 bi-grams in vocbulary \"\n print sorted(dict((key,value) for key, value in self.countVectorizer.vocabulary_.iteritems() if key.count(' ')==1).items(), key=operator.itemgetter(1), reverse=True)[:20]\n\n # Convert the Tweets to TF - IDF Representation for understanding importance of individual words \n self.tfidf_vectorizer = TfidfVectorizer(decode_error='ignore',\\\n stop_words='english', \\\n min_df=10, ngram_range=(1, 3)) # Convert the tweets message to tf idf representation \n self.tf_idf_scores = self.tfidf_vectorizer.fit_transform(self.tweets) \n \n # Convert the tf - idf to pandas dataframe \n print \"\\nTf - Idf for each tweet in the dataset\"\n self.df = pd.DataFrame(self.tf_idf_scores.toarray(), columns=self.tfidf_vectorizer.get_feature_names()) # Convert the td idf values for each tweet into a DataFrame\n self.df[\"Input Tweets\"] = self.tweets\n print self.df.sample(n=5) \n\n # Adding Proportion of positive words as a feature\n self.df['Positive Words'] = map(lambda x: self.positive_word(x), self.df['Input Tweets']) # Adding proportion of positive words as a feature\n print self.df.sample(n=5)\n \n # Adding Proportion of negative words as a feature \n self.df['Negative Words'] = map(lambda x: self.negative_word(x), self.df['Input Tweets']) # Adding proportion of negative words as a feature\n print self.df.sample(n=5)\n\n # Adding part of speech tag features to the dataframe \n pos_feat_ = map(lambda x: self.pos_features(x), self.df['Input Tweets']) # Adding number of parts of speech like Noun, Pronoun, Adjective as a feature\n self.df['Nouns'] = map(lambda x: x['NN'], pos_feat_)\n self.df['Verbs'] = map(lambda x: x['VBP'], pos_feat_)\n self.df['Pronoun'] = map(lambda x: x['PRP'], pos_feat_)\n self.df['Adverb'] = map(lambda x: x['RB'], pos_feat_)\n self.df['Adjective'] = map(lambda x: x['JJ'], pos_feat_)\n print self.df.sample(n=5)\n\n # Let's build a brown classifier \n self.build_brown_classifier()\n \n # Adding another features which classifies the tweet into four categories news, reviews, humor and government. \n self.df['Category'] = map(lambda x: self.classifying_with_brown(x), self.df['Input Tweets'])\n print self.df.sample(n=100)\n print self.df['Category'].value_counts()", "def preprocessing(company, lang):\n\n # get tweets\n tweets = np.array(execute(\"SELECT * FROM tweet WHERE searchterm = '@\" + company + \"'\"))\n tweets = tweets[:,2]\n\n # count retweets\n pattern = re.compile(\"^RT \")\n rt_tweets = [ tweet for tweet in tweets if pattern.match(tweet) ]\n\n # only lang tweets\n lang_tweets = []\n for tweet in rt_tweets:\n try:\n if detect(tweet) == lang:\n lang_tweets.append(tweet)\n except:\n continue\n\n # no urls\n url = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n no_url_tweets = [ re.sub(url, '', tweet) for tweet in lang_tweets ]\n\n # remove @ words\n no_arobas_tweets = [ re.sub(r\"([@?]\\w+)\\b\", '', text) for text in no_url_tweets ]\n\n # remove non-alphanumerical characters\n only_alphanum_tweets = [ re.sub(r'[^\\w]', ' ', text) for text in no_arobas_tweets ]\n\n # tokenizing\n tokenized_tweets = [ tweet.split(\" \") for tweet in only_alphanum_tweets ]\n\n # lower tweets and remove one char words\n lowered_tweets = [ [ word.lower() for word in text if len(word) > 1 ] for text in tokenized_tweets ]\n \n # remove stopwords\n stopwords = open(\"./stopwords\").read().split(\"\\n\")\n stopwords += [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\", \n \"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\",\n \"amp\", \"rt\", \"https\"]\n filtered_tweets = [ [ word for word in text if word not in stopwords ] for text in lowered_tweets ]\n\n # isolate bigrams\n bigrams = mark_bigrams(filtered_tweets)\n\n # reduce to one list of words\n flat_text_bigrams = [ word for tweet in bigrams for word in tweet ]\n flat_text = [ word for tweet in filtered_tweets for word in tweet ]\n\n # get frequency dictionary\n frequ = collections.Counter(flat_text_bigrams).most_common()\n\n # return format\n # * name company\n # * number tweets\n # * nb retweet\n # * language chosen\n # * nb tweet in chosen language\n # * nb words\n # * nb unique words\n data = (company, len(tweets), len(rt_tweets), lang, len(lang_tweets), len(flat_text_bigrams), len(frequ), filtered_tweets)\n\n return data", "def clean(tweet):\n \n\n # Replace emoticons\n tweet = replace_emoticons(tweet)\n # Replace emojis\n tweet = re.sub(r'[^\\x00-\\x7F]+','', tweet)\n # Remove HTML special entities\n tweet = re.sub(r\"\\&\\w*;\",\" \", tweet)\n # Remove hyperlinks\n tweet = re.sub(r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))\\S+\",\\\n \"\", tweet)\n # Remove twitter usernames\n tweet = re.sub(r\"@[^\\s]+\",\"\", tweet)\n # Remove numbers\n tweet = re.sub(\"\\d+\", \"\", tweet)\n # Remove special characters\n tweet = re.sub(r\"[^\\w\\s]\", \" \", tweet)\n tweet = re.sub(r\"\\_\", \" \", tweet)\n # Remove 1 letter words\n tweet = re.sub(r\"\\W*\\b\\w\\b\", \"\", tweet)\n # Remove leftover whitespace\n if tweet:\n tweet = \" \".join(tweet.split())\n # Make lowercase\n tweet = tweet.lower()\n \n return(tweet)", "def standardize_tweet(self, tweet):\n original_tweet = tweet\n # Convert to lowercase\n tweet = tweet.lower()\n\n # remove rt\n if tweet[:2] == \"rt\":\n tweet = tweet[3:]\n\n # replace # with word\n tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\n\n # replace @ with word\n tweet = re.sub(r'@[^\\s]+', 'USER', tweet)\n\n # remove url\n tweet = re.sub(r'((www\\.[^\\s]+)|(https?://[^\\s]+))', 'URL', tweet)\n\n # remove emoji\n try:\n # UCS-4\n emoji_pattern = re.compile(u'([\\U00002600-\\U000027BF])|([\\U0001f300-\\U0001f64F])|([\\U0001f680-\\U0001f6FF])')\n except re.error: # pragma: no cover\n # UCS-2\n emoji_pattern = re.compile(u'([\\u2600-\\u27BF])|([\\uD83C][\\uDF00-\\uDFFF])|([\\uD83D][\\uDC00-\\uDE4F])|([\\uD83D][\\uDE80-\\uDEFF])')\n tweet = emoji_pattern.sub('', tweet)\n\n # remove non-letter and space\n tweet = re.sub('[^a-zA-Z ]+', '', tweet)\n\n # remove remove repeated chars\n tweet = re.sub(r'(.)\\1+', r'\\1\\1', tweet)\n\n # remove extra whitespaces\n tweet = re.sub(r'[\\s]+', ' ', tweet.strip())\n\n # remove stop words\n words = nltk.word_tokenize(tweet)\n tweet = \" \".join([x for x in words if x not in STOPWORDS])\n\n # remove appeneded url or user\n tweet = tweet.replace('URL','')\n tweet = tweet.replace('USER','')\n\n return tweet", "def _enrich_tweet(self, tweet):\n enriched_data = dict()\n\n if self._message_is_limit_message(tweet):\n # Send DD the limit message value\n limit_count = tweet.get('limit').get(self.traptor_type, None)\n dd_monitoring.gauge('limit_message_count', limit_count, [])\n # Store the limit count in Redis\n self._increment_limit_message_counter(limit_count=limit_count)\n # Log limit message\n self.logger.info('limit_message_received', extra=logExtra({'limit_count': limit_count}))\n elif self._message_is_tweet(tweet):\n try:\n # Add the initial traptor fields\n tweet = self._create_traptor_obj(tweet)\n\n # Add the created_at_iso field\n tweet = self._add_iso_created_at(tweet)\n\n # Add the rule information\n enriched_data = self._find_rule_matches(tweet)\n\n # Update the matched rule stats\n if self.traptor_type != 'locations' \\\n and self.enable_stats_collection:\n self._increment_rule_counter(enriched_data)\n except Exception as e:\n theLogMsg = \"Failed to enrich tweet, skipping enhancement\"\n self.logger.error(theLogMsg, extra=logExtra(e, {\n \"tweet\": json.dumps(tweet)\n }))\n\n # an error occurred while processing the tweet. If some information was\n # set in the dictionary when calling _find_rule_matches, clear it out\n # because it is likely invalid...\n enriched_data = {}\n\n else:\n theLogMsg = \"Twitter message is not a tweet\"\n self.logger.info(theLogMsg, extra=logExtra({\n 'twitter_message': tweet\n }))\n\n dd_monitoring.increment('tweet_process_success')\n\n if enriched_data:\n return enriched_data\n else:\n return tweet", "def initWithRawData(self, tweet):\n\n for attr in self.desired_features['tweet']:\n if attr in tweet.keys():\n setattr(self, attr, tweet[attr])\n\n if 'preprocessed_text' in self.desired_features['tweet']:\n self.preprocessText(tweet['text'])", "def clean_tweet(tweet):\n\ttweet = re.sub(r\"\\bhttps?:\\/\\/\\S+\\b\", '<LINK>', tweet) # Replace links with <LINK> tag\n\ttweet = re.sub(r\"@\\w+\", '<USER> ', tweet) # Replace @user with <USER> tag\n\ttweet = re.sub(r\"😺✏ — ((?s).*?)<LINK>\", r\"<CATASK> \\1\", tweet) # Add a tag to CuriousCat answers\n\ttweet = re.sub(r\"\\[ID(.*?)\\]\", '<DESCRIPTION>', tweet, flags=re.I) \n\ttweet = re.sub(r\"\\[alt(.*?)\\]\", '<DESCRIPTION>', tweet, flags=re.I)\n\ttweet = re.sub(r\"\\[desc(.*?)\\]\", '<DESCRIPTION>', tweet, flags=re.I)\n\n\t# Replace automatically generated text and short tweets with None\n\tto_be_removed = ['My week on Twitter', 'My fitbit #Fitstats', 'biggest fans this week',\n\t'via @YouTube', 'automatically checked by', '#MyTwitterAnniversary']\n\tif any(n in tweet for n in to_be_removed) or len(tweet.split(' '))<10: \n\t\ttweet = None\n\n\treturn tweet", "def preProcessing(tweetData, conflate, lower, negate, hashtag):\n\n newData = {}\n newData['tweets'] = {}\n tweetIDs = tweetData['tweets'].keys()\n for tweetID in tweetIDs:\n wfeatures = tweetData['tweets'][tweetID].get('weightFeatures')\n answers = tweetData['tweets'][tweetID]['answers']\n words = tweetData['tweets'][tweetID]['words']\n tags = tweetData['tweets'][tweetID].get('tags') # None otherwise\n\n # construct dictionary of tweet data after preprocessing\n newData['tweets'][tweetID] = {}\n if conflate:\n answers = conflateAnswers(answers)\n if lower:\n words = lowerWords(words)\n if negate:\n words = negateWords(words, negate)\n if hashtag:\n words = hashtagWords(words)\n\n newData['tweets'][tweetID]['words'] = words\n newData['tweets'][tweetID]['answers'] = answers\n newData['tweets'][tweetID]['tags'] = tags\n newData['tweets'][tweetID]['weightFeatures'] = wfeatures\n\n return newData", "def process_tweet(tweet, stopwords, punctuations):\n retweetRT_removed = re.sub(r'^RT[\\s]','',tweet)\n tweet_url_removed = re.sub(r'https?:\\/\\/.*[\\r\\n]*','',retweetRT_removed)\n tweet_hashtag_removed = re.sub(r'#', '', tweet_url_removed) \n \n # instantiate tweettokenizer class\n tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)\n # tokenize the tweet\n tweet_tokens = tokenizer.tokenize(tweet_hashtag_removed) \n \n #remove stop word and punctuations\n tweet_token_processed = [item for item in tweet_tokens if ((item not in stopwords) and (item not in punctuations))]\n\n #lemmatizing words using wordnet lemmatizer \n lmtzr = WordNetLemmatizer()\n tweet_tokens_lmtzd = [lmtzr.lemmatize(item) for item in tweet_token_processed]\n \n return tweet_tokens_lmtzd", "def process_tweets(dataset, group_by = 'author', filter_language = 'English', extract_hashtags = True, filtersize = 3):\n import pandas as pd\n import re\n from nltk.corpus import wordnet as wn\n from collections import Counter\n\n pd.options.mode.chained_assignment = None # default='warn', suppress the setting with copy warning\n\n # Filter for languages if true \n if filter_language:\n # selecting content columns for subject categorization by language\n dataset = dataset[dataset.language == filter_language]\n cont = dataset.content\n else:\n cont = dataset.content\n \n content_filtered = cont.apply(lambda x: re.sub(r'http\\S+', '', x)).apply(lambda x: re.sub(r\"'|\\\"|`|:|\\?|~|,|\\.\", '', x))\\\n .apply(lambda x: remove_stopwords(x))\n\n\n # redefine content column for dataset\n dataset['content'] = content_filtered.values\n # Drop NaN values in content \n dataset.dropna(axis=0,subset=['content'], inplace=True)\n # Get list of words that are stop words \n en_stop = set(nltk.corpus.stopwords.words('english'))\n tokens = []\n \n ##### GROUP BY AUTHOR ######\n \n if group_by == 'author':\n tweets_concatenated = dataset.groupby('author')['content'].apply(lambda x : x.sum()\n if x.dtype=='float64' else ' '.join(x))\n content = tweets_concatenated.copy()\n if extract_hashtags == True:\n # Count the hashtag frequency for each user\n hashtag_count = tweets_concatenated.apply(lambda x: hashtag_counter(x, do='count'))\n # Extract words that are in hashtags\n hashtagged = tweets_concatenated.apply(lambda x: hashtagger(x))\n # Concatenate the words to the entire tweets\n hashtags_gone = hashtagged + tweets_concatenated\n # Remove hashtags since they are no longer needed and make all words lower case\n hashtags_gone = hashtags_gone.apply(lambda x: re.sub(r\"#\\w+\", '', x)).apply(lambda x: x.lower())\n # Convert to NumPy array\n content = hashtags_gone.values\n content_tokens = [nltk.word_tokenize(x) for x in content]\n for sublist in content_tokens:\n tokens.append([get_lemma(token) for token in sublist if token not in en_stop and len(token) > 3])\n\n return tokens, hashtag_count\n \n ##### GROUP BY HASHTAG ######\n \n if group_by == 'hashtag':\n hashtag_column = dataset['content'].apply(lambda x: hashtag_counter(x))\n df_hashtags = pd.concat([dataset['content'], hashtag_column], axis=1)\n df_hashtags.columns = ['content', 'hashtags']\n \n \n # make the series that has as the index values the hashtag and the column that has the concatenated \n # tweets.\n tweets_concatenated = df_hashtags.groupby('hashtags')['content'].apply(lambda x : x.sum()\n if x.dtype=='float64' else ' '.join(x))\n # remove the hashtag shit\n hashtags_gone = tweets_concatenated.apply(lambda x: re.sub(r\"#\\w+\", '', x)).apply(lambda x: x.lower())\n content = hashtags_gone.values\n\n\n content_tokens = [nltk.word_tokenize(x) for x in content]\n for sublist in content_tokens:\n tokens.append([get_lemma(token) for token in sublist if token not in en_stop and len(token) > 3])\n \n return tweets_concatenated, tokens, hashtag_column", "def analyze_tweet(tweet,results):\n \n # tweet body information\n if \"body_term_count\" not in results:\n results[\"body_term_count\"] = SimpleNGrams(\n char_lower_cutoff=3\n ,n_grams=1\n ,tokenizer=\"twitter\"\n )\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # which users are involved\n if \"at_mentions\" not in results:\n results[\"at_mentions\"] = defaultdict(constant_factory)\n #if \"mention_edges\" not in results:\n # results[\"mention_edges\"] = {}\n for u in [x for x in tweet[\"twitter_entities\"][\"user_mentions\"]]:\n \tresults[\"at_mentions\"][u[\"id_str\"]] = (results[\"at_mentions\"][u[\"id_str\"]][0] + 1, \n results[\"at_mentions\"][u[\"id_str\"]][1] | set([u[\"screen_name\"].lower()]))\n #if u not in results[\"mention_edges\"]:\n # results[\"mention_edges\"][u[\"id_str\"]] = {tweet[\"actor\"][\"id\"][15:]: 1}\n #else:\n # actor_id = tweet[\"actor\"][\"id\"][15:]\n # if actor_id not in results[\"mention_edges\"][u[\"id_str\"]]:\n # results[\"mention_edges\"][u[\"id_str\"]][actor_id] = 1\n # else:\n # results[\"mention_edges\"][u[\"id_str\"]][actor_id] += 1\n \n if \"inReplyTo\" in tweet:\n if \"in_reply_to\" not in results:\n results[\"in_reply_to\"] = defaultdict(int)\n #print tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n if tweet[\"verb\"] == \"share\":\n if \"RT_of_user\" not in results:\n results[\"RT_of_user\"] = defaultdict(constant_factory)\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n results[\"RT_of_user\"][rt_of_id] = (results[\"RT_of_user\"][rt_of_id][0] + 1, \n results[\"RT_of_user\"][rt_of_id][1] | set([rt_of_name]))\n\n if \"hashtags\" not in results:\n results[\"hashtags\"] = defaultdict(int)\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in [x[\"text\"].lower() for x in tweet[\"twitter_entities\"][\"hashtags\"]]:\n results[\"hashtags\"][h] += 1\n\n if \"local_timeline\" not in results:\n results[\"local_timeline\"] = defaultdict(int)\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n\n if \"urls\" not in results:\n results[\"urls\"] = defaultdict(int)\n if \"urls\" in tweet[\"gnip\"]:\n try:\n for url in [x[\"expanded_url\"] for x in tweet[\"gnip\"][\"urls\"]]:\n results[\"urls\"][url.split(\"/\")[2]] += 1\n except KeyError:\n pass\n\n if \"user_ids_user_freq\" not in results:\n results[\"user_ids_user_freq\"] = defaultdict(int)\n results[\"user_ids_user_freq\"][tweet[\"actor\"][\"id\"][15:]] += 1", "def filter(self, tweet: dict) -> dict:\n\n # Filter required fields\n filtered_tweet = {\"user_id\": tweet.user.id_str,\n \"name\": tweet.user.name,\n \"nickname\": tweet.user.screen_name,\n \"description\": tweet.user.description,\n \"user_location\": tweet.user.location,\n \"followers_count\": tweet.user.followers_count,\n \"tweets_count\": tweet.user.statuses_count,\n \"user_date\": tweet.user.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"verified\": tweet.user.verified,\n \"tweet_id\": tweet.id_str,\n \"text\": tweet.full_text,\n \"favs\": tweet.favorite_count,\n \"retweets\": tweet.retweet_count,\n \"tweet_date\": tweet.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"tweet_location\": tweet.place.full_name if tweet.place else None,\n \"source\": tweet.source,\n \"sentiment\": self.detect_sentiment(tweet.full_text, tweet.lang)}\n\n return filtered_tweet" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a spike train of a marked inhomogeneous Poisson process
def generate_spikes(model, parameter, trajectory_data): n_neurons = len(model['spike_rates']); spike_rates = model['spike_rates']; position_centers = model['position_centers']; position_std = model['position_std']; mark_centers = model['mark_centers']; mark_std = model['mark_std']; times = trajectory_data['times']; time_steps = trajectory_data['time_steps']; trajectory = trajectory_data['positions']; unit_data = {} for n in range(n_neurons): #generate spikes by approximating inhomogenous Poission process with Bernoulli process pos_rv = sp.stats.norm(loc=position_centers[n], scale=position_std[n]) spike_rate = pos_rv.pdf(trajectory) / pos_rv.pdf(position_centers[n]); spike_rate = spike_rate / np.sum(spike_rate) * len(spike_rate) * np.max(times)/np.max(time_steps) * spike_rates[n] spike_rate[spike_rate > 1] = 1; spikes = sp.stats.bernoulli(p=spike_rate).rvs() > 0; n_spikes = np.sum(spikes); #generate marks - assumes mark probability is uniformly distributed over position. mark_rv = sp.stats.multivariate_normal(mean=mark_centers[n], cov=np.diag(mark_std[n])); marks = mark_rv.rvs(n_spikes); if marks.ndim == 1: marks = marks[:,None]; #genereate unit spike data unit_data[n] = dict(positions = trajectory[spikes], marks=marks, time_steps = time_steps[spikes], times = times[spikes]); #generate full data data = dict(); data['unit'] = np.hstack([np.ones(len(unit_data[n]['times'])) * n for n in range(n_neurons)]); for q in ['positions', 'marks', 'times', 'time_steps']: if q == 'marks': data[q] = np.vstack([unit_data[n][q] for n in range(n_neurons)]); else: data[q] = np.hstack([unit_data[n][q] for n in range(n_neurons)]); sort = np.argsort(data['time_steps']); for q in data.keys(): data[q] = data[q][sort]; return data;
[ "def poisson_train(ISI, time):\r\n# -----------------------------------------------------------------------------\r\n train_length = 0\r\n while train_length < time:\r\n candidate = np.random.poisson(ISI, (time/ISI)*2)\r\n train_length = sum(candidate)\r\n i = 0; train_length = 0\r\n while train_length < time:\r\n train_length += candidate[i]\r\n i += 1\r\n assert sum(candidate[:(i-1)]) < time, \"Spike train is longer then the total time.\"\r\n return make_array_train(candidate[:(i-1)], time)", "def poissonFiring(n_cells, n_trials, p):\n\n\tspikes = numpy.random.rand(n_cells, n_trials)\n\tspikes[spikes < p] = 1\n\tspikes[spikes < 1] = 0\n\n\treturn spikes", "def spiking(x,dt):\r\n# N = len(x)\r\n# spike = np.random.rand(N) < x*dt*0.1\r\n x[x<0] = 0\r\n #x[x>100] = 100\r\n spike = np.random.poisson(x*dt) #Poisson process\r\n# spike = x*dt #rate model\r\n return spike", "def __generateSpikes(self):\n\t\ti = 0\n\t\twhile i < self.spikeNumber:\n\t\t\tcolisionDetected = False\n\t\t\tx = randrange(10 - self.gameWidth/2, -10 + self.gameWidth/2)\n\t\t\ty = randrange(10 - self.gameHeight/2, -10 + self.gameHeight/2)\n\t\t\tspikePoint = Point3(x, y, 0)\n\n\t\t\tfor spike in Spike.spikeNormalList:\n\t\t\t\tpoint = spike.spikeHandle.getPos(self.render)\n\t\t\t\tdist = (point.getXy() - spikePoint.getXy()).lengthSquared()\n\t\t\t\tif dist < 100:\n\t\t\t\t\tcolisionDetected = True\n\t\t\t\t\tbreak\n\n\t\t\tif colisionDetected:\n\t\t\t\tcontinue\n\n\t\t\tSpike(self, x, y)\n\t\t\ti += 1", "def generate_spike_train(self):\n\n # Container for spike train.\n spike_train = np.zeros(self.duration)\n\n # Set initial rate of change.\n s = np.random.uniform(self.s_min, self.s_max)\n r = np.random.uniform(self.r_min, self.r_max)\n\n for i in range(0, self.duration):\n\n # Calculate probability of giving a spike at given time step.\n p = r * self.dt\n\n # Ensure that all afferent spikes at\n # least once every given pattern length.\n if i >= self.pattern_duration:\n spike_sum = np.sum(spike_train[i - self.pattern_duration: i])\n else:\n spike_sum = 1\n\n if spike_sum < 1:\n spike_train[i] = 1\n\n # Fire if p is > random number between 0 and 1.\n elif p > np.random.uniform(0, 1):\n spike_train[i] = 1\n\n # Calculate change in r, apply and clip.\n dr = s * self.dt\n r += dr\n r = min(self.r_max, max(r, self.r_min))\n\n # Calculate rate of change and clip.\n ds = np.random.uniform(self.ds_min, self.ds_max)\n s += ds\n s = min(self.s_max, max(self.s_min, s))\n\n return spike_train", "def generateInhomPoisson_Thinning(rate, deltaT, T):\n # generate homPois with rate rmax for each trial ( we used bernoulli approximation of Pois)\n r_max = np.max(rate, axis=1)\n SF = 1/deltaT\n numSamples = np.shape(rate)[1]\n numTrials = np.shape(rate)[0]\n repeated_rmax = np.transpose(npmt.repmat(r_max, numSamples, 1))\n probThrslds = repeated_rmax/SF\n spikeTrain_hom = (np.random.rand(numTrials,numSamples)<probThrslds).astype(int)\n \n # create rejection matrix\n rejectMat = ((rate/repeated_rmax) > np.random.rand(numTrials,numSamples)).astype(int)\n \n #create inhom pois\n spikeTrain_inhom = rejectMat * spikeTrain_hom \n return spikeTrain_inhom", "def generate_data_oscilatory(nTrials, N, T,freq_coinc, amp_coinc, offset_coinc,freq_bg, amp_bg,offset_bg,RateJitter = 10*pq.Hz):\n# from stocmod import poisson_nonstat as pn\n import neo\n h = 1*pq.ms\n # modulatory coincidence rate\n tc = numpy.arange(0,T.rescale('ms').magnitude,h.rescale('ms').magnitude)*pq.ms\n bbc = (2*numpy.pi*freq_coinc*tc).simplified\n coincrate = offset_coinc+ amp_coinc*numpy.sin(bbc)*offset_coinc.units\n coincrate[coincrate <0*coincrate.units]=0*coincrate.units\n\n # background rate\n tb = numpy.arange(0,T.rescale('ms').magnitude,h.rescale('ms').magnitude)*pq.ms\n bbb = (2*numpy.pi*freq_bg*tb).simplified\n backgroundrate = offset_bg+ amp_bg*numpy.sin(bbb)*offset_bg.units\n backgroundrate[backgroundrate <0*backgroundrate.units]=0*backgroundrate.units\n\n # inhomogenious rate across trials\n rndRateJitter = (numpy.random.rand(nTrials)-0.5)*RateJitter\n spiketrain = []\n for i in range(nTrials):\n rate_signal_bg = neo.AnalogSignal((backgroundrate.rescale('Hz')+rndRateJitter[i]).magnitude,sampling_period=h, units=pq.Hz,t_start=0*pq.ms)\n rate_signal_coinc = neo.AnalogSignal(coincrate.rescale('Hz').magnitude,sampling_period=h, units=pq.Hz,t_start=0*pq.ms)\n sts_bg = poisson_nonstat(rate_signal_bg,N=N)\n # inserting coincidences\n sts_coinc = poisson_nonstat(rate_signal_coinc,N=1)\n sts_bg_coinc = []\n for j in sts_bg:\n sts_bg_coinc.append(\n neo.SpikeTrain(numpy.sort(numpy.append(j.magnitude, sts_coinc[0].magnitude))*j.units,\n t_start=j.t_start,t_stop = j.t_stop))\n spiketrain.append(sts_bg_coinc)\n return {'st':spiketrain, 'backgroundrate':backgroundrate, 'coincrate':coincrate}", "def simulatePoissonProcess(max=200000, size=200):\n #for non-linearity\n from support import VISinstrumentModel\n\n size = int(size)\n\n fluxlevels = np.linspace(1000, max, 50)\n\n #readnoise\n readnoise = np.random.normal(loc=0, scale=4.5, size=(size, size))\n #PRNU\n prnu = np.random.normal(loc=1.0, scale=0.02, size=(size, size))\n\n fig = plt.figure(1)\n plt.title(r'Simulation: $%i \\times %s$ region' % (size, size))\n plt.subplots_adjust(left=0.14)\n\n ax = fig.add_subplot(111)\n\n for flux in fluxlevels:\n d1 = np.random.poisson(flux, (size, size))*prnu + readnoise\n d2 = np.random.poisson(flux, (size, size))*prnu + readnoise\n fx = (np.average(d1) + np.average(d2)) / 2.\n ax.plot(fx, np.var(d1-d2)/2., 'bo')\n\n d1 = np.random.poisson(flux, (size, size))*prnu + readnoise\n d2 = np.random.poisson(flux, (size, size))*prnu + readnoise\n #d1nonlin = VISinstrumentModel.CCDnonLinearityModelSinusoidal(d1, 0.1, phase=0.5, multi=1.5)\n #d2nonlin = VISinstrumentModel.CCDnonLinearityModelSinusoidal(d2, 0.1, phase=0.5, multi=1.5)\n d1nonlin = VISinstrumentModel.CCDnonLinearityModel(d1)\n d2nonlin = VISinstrumentModel.CCDnonLinearityModel(d2)\n fx = (np.average(d1) + np.average(d2)) / 2.\n ax.plot(fx, np.var(d1nonlin-d2nonlin)/2., 'rs')\n\n d1 = np.random.poisson(flux, (size, size))*prnu*1.05 + readnoise #5% gain change\n d2 = np.random.poisson(flux, (size, size))*prnu + readnoise\n fx = (np.average(d1) + np.average(d2)) / 2.\n ax.plot(fx, np.var(d1 - d2) / 2., 'mD')\n\n ax.plot([-1, ], [-1, ], 'bo', label='data (linear)')\n ax.plot([-1, ], [-1, ], 'rs', label='data (non-linear)')\n ax.plot([-1, ], [-1, ], 'mD', label='data (gain change)')\n\n ax.plot([0, max], [0, max], 'k-', lw=1.5, label='shot noise')\n\n ax.set_xlim(0, max)\n ax.set_ylim(0, max)\n\n ax.set_xlabel(r'$ \\left < \\mathrm{Signal}_{%i \\times %i} \\right > \\quad [e^{-}]$' % (size, size))\n ax.set_ylabel(r'$\\frac{1}{2}\\sigma^{2}(\\Delta \\mathrm{Signal}) \\quad [(e^{-})^{2}]$')\n\n plt.legend(shadow=True, fancybox=True, loc='upper left', numpoints=1)\n plt.savefig('Simulation.pdf')\n plt.close()", "def _sample_poisson(lam=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)", "def p_thin(self,n=2):\n \n if any('Background Subtracted' in s for s in self.ProcessingStatus):\n print('Warning: poisson thinning (self.p_thin) called on %s \\n %s has been background subtracted so it is \\n not strictly a poisson random number \\n applying anyway.' %(self.label,self.label))\n \n proflist = []\n p = 1.0/n\n for ai in range(n):\n copy = self.copy()\n copy = np.random.binomial(self.profile,p)\n copy.profile_variance = copy.profile_variance*p\n copy.label = copy.label + ' copy %d'%ai\n proflist.extend([copy])\n \n return proflist", "def probgen(self, i,j):\n\n pmat=self.clustertrain(i, j) # returns spike trains for a given cluster j at RG step i\n calc=(1.-np.mean(pmat)) # calculate probability of silence within cluster\n return calc", "def generate_spike_trains(self):\n\n # Container for spike trains.\n spike_trains = np.zeros((self.num_neurons, self.duration))\n\n for i in range(0, self.num_neurons):\n spike_train = self.generate_spike_train()\n spike_trains[i, :] = spike_train\n\n # Track progress\n progress = (i / float(self.num_neurons)) * 100\n sys.stdout.write(\"Generating spike trains: %d%% \\r\" % progress)\n sys.stdout.flush()\n\n self.spike_trains = spike_trains", "def poissonOnStrip(n, xMin=0, xMax=10):\n def initRoutine(causalSet):\n for k in range(0,n):\n causalSet.events.append(Event(\n t=0,\n x=random.random() * (xMax - xMin) + xMin))\n return causalSet\n return initRoutine", "def __init__(self, atoms, contraints=None, label=\"SpikeSourcePoisson\",\n rate = 1, start = 0, duration = 10000, seed=None):\n super( SpikeSourcePoisson, self ).__init__(\n n_neurons = atoms,\n constraints = contraints,\n label = label\n )\n \n self.rate = rate\n self.start = start\n self.duration = duration\n self.seed = seed", "def _generate_inhom_poisson(self, time, rate):\n # calculate cumulative rate\n deltaT = time[1:] - time[:-1]\n r = np.cumsum(rate[0:-1] * deltaT)\n r = np.insert(r, 0, 0)\n deltaR = r[1:] - r[:-1]\n\n # generate 1.5 as many spikes as expected on average for exponential distribution with rate 1\n numX = math.ceil(1.5 * r[-1])\n\n # generate exponential distributed spikes with the average rate 1\n notEnough = True\n x = np.empty(0)\n xend = 0.0\n while notEnough:\n x = np.append(x, xend + np.cumsum(np.random.exponential(1.0, numX)))\n # check that we generated enough spikes\n if (not len(x) == 0):\n xend = x[-1]\n notEnough = xend < r[-1]\n\n # trim extra spikes\n x = x[x <= r[-1]]\n\n if len(x) == 0:\n spikes = np.empty(0)\n else:\n # for each x find index of the last rate which is smaller than x\n indJ = [np.where(r <= x[iSpike])[0][-1] for iSpike in range(len(x))]\n\n # compute rescaled spike times\n spikes = time[indJ] + (x - r[indJ]) * deltaT[indJ] / deltaR[indJ]\n\n return spikes", "def gen_Poisson_n(self, metodo='SOR', omega=1, a=0, c=0, linea_xi=0,\n aa=0, cc=0, linea_eta=0):\n\n # aproximacion inicial\n self.gen_TFI()\n\n # asiganicion de variable para metodo\n Xn = self.X\n Yn = self.Y\n Xo = Xn.copy()\n Yo = Yn.copy()\n\n m = self.M\n n = self.N\n\n d_eta = self.d_eta\n d_xi = self.d_xi\n\n P_ = np.arange(1, m)\n Q_ = np.arange(1, n)\n P_ = -a * (P_ / (m-1) - linea_xi)\\\n / np.abs(P_ / (m-1) - linea_xi)\\\n * np.exp(-c * np.abs(P_ /\n (m-1) - linea_xi))\n Q_ = -aa * (Q_ / (n-1) - linea_eta)\\\n / np.abs(Q_ / (n-1) - linea_eta)\\\n * np.exp(-cc\n * np.abs(Q_ / (n-1) - linea_eta))\n\n mask = np.isnan(P_)\n P_[mask] = 0\n mask = np.isnan(Q_)\n Q_[mask] = 0\n\n # obteniendo el indice de la union de los perfiles\n if not self.airfoil_alone:\n union_start = 0\n while self.airfoil_boundary[union_start] != 0:\n union_start += 1\n\n mesh.it_max = 750000\n mesh.err_max = 1e-6\n # inicio del metodo iterativo, separa el metodo para perfil con\n # y sin flap\n print(f\"Generando malla tipo O.\\nDimensiones M: {self.M}\"\n + f\" N: {self.N}\")\n if self.airfoil_alone:\n print(\"Perfil\")\n print(\"Poisson numba:\")\n for it in range(mesh.it_max):\n if (it % 150e3 == 0):\n self.X = np.copy(Xn)\n self.Y = np.copy(Yn)\n self.plot()\n print()\n\n # imprime informacion\n print('it = ' + str(it) + ' aa = ' + str(aa) + ' cc = '\n + str(cc)\n + ' err_x = ' + '{:.3e}'.format(abs(Xn - Xo).max())\n + ' err_y = ' + '{:.3e}'.format(abs(Yn - Yo).max())\n + '\\t\\t', end=\"\\r\")\n\n Xo = Xn.copy()\n Yo = Yn.copy()\n # si el metodo iterativo es Jacobi\n if metodo == 'J':\n X = Xo\n Y = Yo\n else: # si el metodo es Gauss-Seidel o SOR\n X = Xn\n Y = Yn\n\n (Xn, Yn) = _gen_Poisson_n(X, Y, self.M, self.N, P_, Q_)\n\n # se aplica sobre-relajacion si el metodo es SOR\n if metodo == 'SOR':\n Xn = omega * Xn + (1 - omega) * Xo\n Yn = omega * Yn + (1 - omega) * Yo\n\n if abs(Xn -Xo).max() < mesh.err_max\\\n and abs(Yn - Yo).max() < mesh.err_max and it > 10:\n print('Poisson: ' + metodo + ': saliendo...')\n print('it=', it)\n break\n else:\n print(\"Perfil con flap\")\n print(\"Poisson numba:\")\n for it in range(mesh.it_max):\n if (it % 650e3 == 0):\n self.X = np.copy(Xn)\n self.Y = np.copy(Yn)\n self.plot()\n print()\n\n # imprime informacion\n print('it = ' + str(it) + ' aa = ' + str(aa) + ' cc = '\n + str(cc)\n + ' err_x = ' + '{:.3e}'.format(abs(Xn - Xo).max())\n + ' err_y = ' + '{:.3e}'.format(abs(Yn - Yo).max())\n + '\\t\\t', end=\"\\r\")\n\n Xo = Xn.copy()\n Yo = Yn.copy()\n # si el metodo iterativo es Jacobi\n if metodo == 'J':\n X = Xo\n Y = Yo\n else: # si el metodo es Gauss-Seidel o SOR\n X = Xn\n Y = Yn\n\n (Xn, Yn) = _gen_Poisson_n_flap(X, Y, self.M, self.N, P_,\n Q_, self.airfoil_boundary,\n union_start)\n\n # se aplica sobre-relajacion si el metodo es SOR\n if metodo == 'SOR':\n Xn = omega * Xn + (1 - omega) * Xo\n Yn = omega * Yn + (1 - omega) * Yo\n\n\n if abs(Xn -Xo).max() < mesh.err_max\\\n and abs(Yn - Yo).max() < mesh.err_max:\n print('Poisson: ' + metodo + ': saliendo...')\n print('it=', it)\n break\n\n self.X = Xn\n self.Y = Yn\n\n return (self.X, self.Y)", "def process_particles(n: int, l: int, t: int, r: float, v: float, nu: float, kappa: float) -> \\\n Generator[Tuple[Tensor, Tensor], None, None]:\n von_mises = VonMises(tensor(0, torch.float), tensor(kappa, torch.float))\n\n dt = 0.01 / nu\n max_iter = int(t / dt) * 5\n scaled_velocity = l * v\n rr = l / int(l / r)\n map_size = int(l / rr)\n pos = torch.mul(torch.rand(n, 3, device=gpu_cuda), l)\n vel = torch.cat((torch.mul(torch.rand(n, 1, device=gpu_cuda), 2 * math.pi),\n torch.mul(torch.rand(n, 1, device=gpu_cuda), math.pi)), 1)\n\n print(f\"\"\"Calculated Parameters:-\n Time Discretisation Step: {dt}\n Max Iteration: {max_iter}\n Scaled Velocity of Particles: {scaled_velocity}\n Adjusted Interaction Radius: {rr}\n Particle Map Size: {map_size}\"\"\")\n\n index = index_map(pos, rr)\n particle_map = fill_map(map_size, index)\n\n for t in range(max_iter + 1):\n jump = torch.rand(n, 1, device=gpu_cuda)\n who = torch.where(torch.gt(jump, torch.exp(tensor(-nu * dt, torch.float))),\n tensor(1, torch.int64),\n tensor(0, torch.int64))\n condition = torch.where(torch.eq(who[:, 0], 1))\n\n target = deepcopy(vel)\n target[condition] = average_orientation(pos, target, index[condition], particle_map, r)\n vel[condition] = target[condition] + von_mises.sample((who.sum(), 2))\n vel[:, 0][condition] = torch.remainder(vel[:, 0][condition], 2 * math.pi)\n vel[:, 1][condition] = torch.remainder(vel[:, 1][condition], math.pi)\n\n x = torch.sin(vel[:, 1]) * torch.cos(vel[:, 0])\n y = torch.sin(vel[:, 1]) * torch.sin(vel[:, 0])\n z = torch.cos(vel[:, 1])\n pos = torch.remainder(pos + torch.mul(torch.cat((x.reshape(x.size()[0], 1),\n y.reshape(y.size()[0], 1),\n z.reshape(z.size()[0], 1)), 1), dt * scaled_velocity), l)\n\n if t % 10 == 0:\n print(f\"Iteration number: {t} (out of {max_iter} iterations) [{(100 * t) // max_iter}% complete]\")\n yield pos, vel\n\n index = index_map(pos, rr)\n particle_map = fill_map(map_size, index)", "def poisson_series( T, r, amp=None, label=None ):\n t = 0\n ans = TrigList()\n while t < T:\n t += poisson_dt( r )\n if t > T: \n break\n ans.append( Trigger( t, amp=amp, label=label ) )\n return ans", "def poisson_create(rate: float, max_possible: float) -> float:\n n = np.arange(0, max_possible)\n n2 = np.arange(0, max_possible)\n y = stats.poisson.pmf(n, rate)\n y2 = n2 * y\n event_pred = y2.sum()\n return event_pred" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes (smoothed) variance within groups of tensor x.
def compute_grouped_variance(x, grouped_dim, smoothing_delta=0.0): grouped_shape = (x.shape[0], grouped_dim, -1) x_centered = center_data(x.view(grouped_shape)) return ((torch.norm(x_centered, dim=-1, p=2.0)) ** 2 + smoothing_delta) / x_centered.shape[-1]
[ "def calculate_variance(X):\n return np.var(X,axis=0)", "def ts_var(self, x):\r\n x.name = 'variance'\r\n return x.rolling(self.window).var()", "def prod_variance(X, varX, Y, varY):\n z = X**2.0 * varY + varX * varY + varX * Y**2.0\n return z", "def variance_filter(data, windowsize):\n half = windowsize//2\n res = np.zeros(data.shape[0]-windowsize)\n for i in range(half,len(data)-half):\n res[i-half] = np.std(data[i-half:i+half])\n return res", "def variance_extractor(data, num_samples):\n var_data = smooth(variance_filter(data,windowsize=200), windowsize=5, std=0.8)\n vstd = np.std(var_data)\n if vstd == 0:\n vstd = 0.00001\n var_data = (var_data - np.mean(var_data))/vstd\n return sample(var_data, num_samples)", "def posterior_mean_and_variance(self, x: ndarray) -> Tuple[ndarray, ndarray]:\n return self._gp.posterior_mean_and_variance(x)", "def optimal_variance_nll(self, x):\n \n sigma = ((x - self.mu) ** 2).mean().sqrt()\n return Gaussian(mu=self.mu, sigma=sigma).nll(x)", "def group_neurons_with_variance(x, grouped_dim=64, group_conv=False, group_fc=False,\n grouping_power=0.5, smoothing_delta=0.0):\n if group_conv and x.dim() == 4:\n assert x.shape[1] > 3, 'tried to group the input to the network'\n assert np.prod(list(x.shape[1:])) // grouped_dim > 1, 'grouping dim too high, will zero out the activity'\n return compute_grouped_variance(x, grouped_dim, smoothing_delta) ** grouping_power\n if group_fc and x.dim() == 2:\n assert np.prod(list(x.shape[1:])) // grouped_dim > 1, 'grouping dim too high, will zero out the activity'\n return compute_grouped_variance(x, grouped_dim, smoothing_delta) ** grouping_power\n return x", "def ml_variance(values, mean):\n\n # return the equation for variance\n return sum((value-mean)**2 for value in values)/len(values)", "def scaled_variance(mean: Tensor, values: Tensor) -> Tensor:\n return torch.sum(torch.pow((mean - values), 2) / values, dim=1).reshape(-1, 1)", "def q_mean_variance(self, x_0, x_t, t):\n assert x_0.shape == x_t.shape\n posterior_mean = extract(self.posterior_mean_coef1, t, x_t.shape) * x_0 + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n posterior_log_var_clipped = extract(self.posterior_log_var_clipped, t, x_t.shape)\n return posterior_mean, posterior_log_var_clipped", "def variance(data_set):\n deviations = mean_deviation(data_set)\n return round(sum(map(lambda x: x * x, deviations)) / (len(data_set) - 1), 3)", "def estimate_var(neighborhood: Neighborhood, k: int) -> float:\n upper = 0\n lower = len(neighborhood.neighbors) - 1\n est_residual = estimate_residual(neighborhood, k)\n for neighbor in neighborhood.neighbors:\n residual = calculate_score(neighbor.feat_vector[k:])\n upper += math.pow(residual - est_residual, 2)\n\n return upper/lower", "def compute_variance(vec):\n return sum(vec-vec.mean()**2)/float(vec.size)", "def variance(vals):\n mean=float(sum(vals))/len(vals)\n s=sum([(v-mean)**2 for v in vals])\n return s/len(vals)", "def variance_components(self):\n return [x.sigma for x in self.random_effects]", "def retained_variance(singular_values, dim):\r\n return np.sum(singular_values[0:dim]) / np.sum(singular_values)", "def blocks_to_variance(blocks):\n var = np.var(blocks, axis=(-1, -2))\n return var", "def gaussian_process_pointwise_variance(kernel, pred_samples, train_samples,\n nugget=0):\n K_train = kernel(train_samples.T)\n # add small number to diagonal to ensure covariance matrix is\n # positive definite\n ntrain_samples = train_samples.shape[1]\n K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget\n k_pred = kernel(train_samples.T, pred_samples.T)\n L = np.linalg.cholesky(K_train)\n tmp = solve_triangular(L, k_pred, lower=True)\n variance = kernel.diag(pred_samples.T) - np.sum(tmp*tmp, axis=0)\n return variance" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Groups layer activation with some power of smoothed variance.
def group_neurons_with_variance(x, grouped_dim=64, group_conv=False, group_fc=False, grouping_power=0.5, smoothing_delta=0.0): if group_conv and x.dim() == 4: assert x.shape[1] > 3, 'tried to group the input to the network' assert np.prod(list(x.shape[1:])) // grouped_dim > 1, 'grouping dim too high, will zero out the activity' return compute_grouped_variance(x, grouped_dim, smoothing_delta) ** grouping_power if group_fc and x.dim() == 2: assert np.prod(list(x.shape[1:])) // grouped_dim > 1, 'grouping dim too high, will zero out the activity' return compute_grouped_variance(x, grouped_dim, smoothing_delta) ** grouping_power return x
[ "def _smoothing_update(self):\n gain = cho_solve(cho_factor(self.x_cov_pr), self.xx_cov).T\n self.x_mean_sm = self.x_mean_fi + gain.dot(self.x_mean_sm - self.x_mean_pr)\n self.x_cov_sm = self.x_cov_fi + gain.dot(self.x_cov_sm - self.x_cov_pr).dot(gain.T)", "def __init__(self, layer: MultiLayer, smoothness: bool = False):\n super().__init__(InnerMultiplyAggregateLayer(layer, smoothness))", "def get_function_mean_var(model, layer_names):\n outputs = []\n for layer_name in layer_names:\n outputs += [\n model[layer_name].mean(0).mean(1).mean(1),\n (model[layer_name]**2).mean(0).mean(1).mean(1),\n ]\n return theano.function([model['data']], outputs)", "def create_layer(prev, n, activation):\n he = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n layer = tf.layers.Dense(units=n, activation=activation, name='layer',\n kernel_initializer=he)\n return layer(prev)", "def compute_grouped_variance(x, grouped_dim, smoothing_delta=0.0):\n grouped_shape = (x.shape[0], grouped_dim, -1)\n x_centered = center_data(x.view(grouped_shape))\n return ((torch.norm(x_centered, dim=-1, p=2.0)) ** 2 + smoothing_delta) / x_centered.shape[-1]", "def smooth(self, window: int = 1000):\n weights = np.ones(window)\n weights /= weights.sum()\n self.data[\"smoothed\"] = np.convolve(\n self.data[\"intensity\"], weights, mode=\"same\"\n )", "def total_variation_loss(x):\n a = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, 1:, :img_width-1, :])\n b = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, :img_height-1, 1:, :])\n return K.sum(K.pow(a + b, 1.25))", "def add_mean(self):\n with tf.variable_scope(self._get_layer_str()):\n prev_shape = self.get_output().get_shape()\n reduction_indices = list(range(len(prev_shape)))\n assert len(reduction_indices) > 2 and \"Can't average a (batch, activation) tensor\"\n reduction_indices = reduction_indices[1:-1]\n out = tf.reduce_mean(self.get_output(), reduction_indices=reduction_indices)\n self.outputs.append(out)\n return self", "def smooth(processed):\n smoothed = savgol_filter(processed, 45, 6)\n # For future this could be a window that you type the order and the\n # number of points into, and then it will plot it to show you the\n #smooth before moving on\n return smoothed", "def create_layer(prev, n, activation):\n weights_initializer = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n layer = tf.layers.Dense(n, activation=activation, name=\"layer\", kernel_initializer=weights_initializer)\n return (layer(prev))", "def layer_up(self, activation=sigmoid): #add option to change the activator, should these be self. ? should this be done for both FP and BP?\n\n last_completed_layer = len(self.nodes) - 1\n theta_l = self.theta[last_completed_layer]\n last_completed_values = self.nodes[last_completed_layer]\n next_layer_values = []\n for i in range(len(theta_l)):\n z_value = sum([theta_i * a_i for theta_i, a_i in zip(theta_l[i], [1] + list(last_completed_values))])\n #next_layer_values.append(sigmoid(z_value))\n next_layer_values.append(activation(z_value)) # change\n self.nodes.append(next_layer_values)", "def get_average_act(self, input, output) :\n if self.mode[0] == 'Baseline' :\n self.register_buffer('average_activation', output.mean(0).mean(1))", "def get_activation(self):\n return self.out_spike_counts*(self.out_bins[1]-self.out_bins[0])", "def batch_backprop(self, alpha, lamb, batch_size):\n # init derivated function\n if self.activation_type==1:\n derivative = lambda a: 1-ny.square(a)\n else:\n derivative = lambda a: a*(1.0-a)\n\n # init deltas\n delta_W = []\n delta_b = []\n\n z = self.data_amount if batch_size > self.data_amount else self.data_amount / batch_size\n for k in range(z):\n\n for i in range(self.number_hidden_layers+1):\n delta_W.append(0)\n delta_b.append(0)\n\n for j in range(batch_size):\n\n i = j + batch_size*k\n\n inp = self.inputs[i]\n target = self.targets[i]\n\n self.calc_activation(inp)\n\n target_rightform = ny.matrix( target ).T\n tmp = self.a[-1] - target_rightform\n\n tmp = ny.multiply(tmp, derivative(self.a[-1]))\n\n self.delta = [tmp]\n\n for i in range(self.number_hidden_layers):\n tmp = (ny.dot(self.weights_layer[-1-i].T, self.delta[i]))\n tmp = ny.multiply(tmp, derivative(self.a[-1-1-i]))\n\n self.delta.append(tmp)\n\n for i in range(len(self.weights_layer)):\n delta_W[i] += (ny.dot(self.delta[-1-i], self.a[i].T))\n delta_b[i] += self.delta[-1-i]\n \n\n for i in range(len(self.weights_layer)):\n self.weights_layer[i] -= alpha*(delta_W[i]/self.data_amount + lamb*self.weights_layer[i])\n self.bias[i] -= alpha * delta_b[i] / self.data_amount", "def gradient(self, data_state, model_state):\n grad = gu.Gradient(\n [None for l in self.layers],\n [None for w in self.weights]\n )\n\n # POSITIVE PHASE (using observed)\n\n # compute the conditional mean of the hidden layers\n new_data_state = self.mean_field_iteration(1, data_state, clamped=[0])\n\n # compute the postive phase of the gradients of the layer parameters\n for i in range(self.num_layers):\n grad.layers[i] = self.layers[i].derivatives(\n new_data_state.units[i],\n self._connected_rescaled_units(i, new_data_state),\n self._connected_weights(i)\n )\n\n # compute the positive phase of the gradients of the weights\n for i in range(self.num_layers - 1):\n grad.weights[i] = self.weights[i].derivatives(\n self.layers[i].rescale(new_data_state.units[i]),\n self.layers[i+1].rescale(new_data_state.units[i+1]),\n )\n\n # NEGATIVE PHASE (using sampled)\n\n # compute the conditional mean of the hidden layers\n new_model_state = self.mean_field_iteration(1, model_state, clamped=[0])\n\n # update the gradients of the layer parameters with the negative phase\n for i in range(self.num_layers):\n grad.layers[i] = be.mapzip(be.subtract,\n self.layers[i].derivatives(\n new_model_state.units[i],\n self._connected_rescaled_units(i, new_model_state),\n self._connected_weights(i)\n ),\n grad.layers[i])\n\n # update the gradients of the weight parameters with the negative phase\n for i in range(self.num_layers - 1):\n grad.weights[i] = be.mapzip(be.subtract,\n self.weights[i].derivatives(\n self.layers[i].rescale(new_model_state.units[i]),\n self.layers[i+1].rescale(new_model_state.units[i+1]),\n ),\n grad.weights[i])\n\n return grad", "def avgpoolflatten(): #%t\n return nn.Sequential(Reduce(\"b c h w -> b c\", \"mean\")) # combine avg pool + view", "def dropout_create_layer(prev, n, activation, keep_prob):\n i = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n new_layer = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=i)(prev)\n dropout = tf.layers.Dropout(rate=keep_prob)(new_layer)\n return dropout", "def smooth(self, \n x=0,y=0, \n window='kaiser'): #smoothes via adjacent averaging\n # n is the seed of the odd numbers: n is how many nearest neighbors \n # in each direction\n # make sure n is integer and n < grid dimension\n # account for interpolation using grid factor\n nx = x*self.grid_factor\n ny = y*self.grid_factor\n # create the window function\n if window == 'kaiser':\n # beta, a real number, is a form parameter of the kaiser window\n # beta = 5 makes this look approximately gaussian in weighting \n # beta = 5 similar to Hamming window, according to numpy\n # over window (about 0 at end of window)\n beta=5.0\n wx = np.kaiser(2*nx+1, beta)\n wy = np.kaiser(2*ny+1, beta)\n # for a 2D array, y is the first index listed\n w = np.zeros((len(wy),len(wx)))\n for i in range(len(wy)):\n for j in range(len(wx)):\n w[i,j] = wy[i]*wx[j]\n # create a padded array of zi\n # numpy 1.7.x required for this to work\n temp_zi = np.pad(self.zi, ((ny,ny), \n (nx,nx)), \n mode='edge')\n from scipy.signal import convolve\n out = convolve(temp_zi, w/w.sum(), mode='valid')\n if self.debug:\n plt.figure()\n sp1 = plt.subplot(131)\n plt.contourf(self.zi, 100)\n plt.subplot(132, sharex=sp1, sharey=sp1)\n plt.contourf(w,100)\n plt.subplot(133)\n plt.contourf(out,100)\n self.zi=out\n # reset zmax\n self.zmax = self.zi.max()\n self.zmin = self.zi.min()", "def get_bins_layers(activations, num_bins, act):\n bins = []\n for epoch in activations:\n epoch_bins=[]\n for layer in epoch:\n if act == \"linear\":\n lb_val = layer.min()\n lb = [lb_val] # min value possible \n elif act in [\"tanh\", \"elu\"]:\n lb=[-1.000000000001] # min value possible\n else:\n lb=[0] # min value possible \n unique_act_vals=np.unique(layer.flatten()) # layer.flatten() is of shape (num_samples, size_layer)\n sorted_ua = np.sort(np.setdiff1d(unique_act_vals,lb))\n if len(sorted_ua)>0: \n last_idx = np.floor((((num_bins-1)*(len(sorted_ua))) / num_bins))\n inds = list(map(int, np.linspace(0, last_idx, num_bins)))\n borders = list(map(lambda x: sorted_ua[x], inds))\n lb.extend(borders)\n lb.append(sorted_ua[-1])\n epoch_bins.append(np.array(lb))\n bins.append(epoch_bins) \n return np.array(bins)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses explore selectors with the format 'model_name/explore_name'.
def parse_selectors(selectors: List[str]) -> DefaultDict[str, set]: selection: DefaultDict = defaultdict(set) for selector in selectors: try: model, explore = selector.split("/") except ValueError: raise SpectaclesException( f"Explore selector '{selector}' is not valid.\n" "Instead, use the format 'model_name/explore_name'. " f"Use 'model_name/*' to select all explores in a model." ) else: selection[model].add(explore) return selection
[ "async def _query_explore(\n self, session: aiohttp.ClientSession, model: Model, explore: Explore\n ) -> str:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query_task_id = await self._run_query(\n session, model.name, explore.name, dimensions\n )\n self.query_tasks[query_task_id] = explore\n return query_task_id", "async def build_explore_dimensions(\n client: LookerClient,\n explore: Explore,\n ignore_hidden_fields: bool = False,\n) -> None:\n dimensions_json = await client.get_lookml_dimensions(\n explore.model_name, explore.name\n )\n\n dimensions: List[Dimension] = []\n for dimension_json in dimensions_json:\n dimension: Dimension = Dimension.from_json(\n dimension_json, explore.model_name, explore.name\n )\n if dimension.url is not None:\n dimension.url = client.base_url + dimension.url\n if not dimension.ignore and not (dimension.is_hidden and ignore_hidden_fields):\n dimensions.append(dimension)\n\n explore.dimensions = dimensions\n if len(explore.dimensions) == 0:\n logger.warning(\n f\"Warning: Explore '{explore.name}' does not have any non-ignored \"\n \"dimensions and will not be validated.\"\n )\n explore.skipped = SkipReason.NO_DIMENSIONS", "def get_explore_url(self, filters=None):\n\turl = urlresolvers.reverse('explore_stories')\n\tqs_params = []\n\tfor filter, values in filters.items():\n\t if values:\n\t qs_params.append(\"%s=%s\" % (filter, \",\".join([str(value) for value in values])))\n\n url += \"?\" + \"&\".join(qs_params) \n\treturn url", "def do_explore(self, args):\n args = parse(args)\n if not self.check_length(args, 2):\n return\n\n dest = Player.current().place\n if args[0] == 'item':\n item = Item.get_or_none(Item.name == args[1])\n if not self.set_inputs(action=T.explore, args=[dest, '', item]):\n return\n found = terminals.explore(area_location=dest, item=item)\n else:\n npc = NPC.get_or_none(NPC.name == args[1])\n\n if not self.set_inputs(action=T.explore, args=[dest, npc, '']):\n return\n found = terminals.explore(area_location=dest, npc=npc)\n\n if not found:\n print(\"failed!\")\n return\n\n self.last_action_doable = True", "def parse(path: str) -> List[QuoteModel]:\n ingestors = {\n 'csv': CSVIngestor,\n 'docx': DocxIngestor,\n 'pdf': PDFIngestor,\n 'txt': TextIngestor\n }\n file_extension = path.split('.')[-1]\n\n try:\n for ingestor in ingestors.items():\n if ingestor[0] == file_extension:\n return ingestor[1].parse(path)\n except:\n print('Issue selecting Ingestor helper')", "def ChooseScraper(self, url):", "def extract_jsonpath(models: typing.List[Model], expr: str):\n jsonpath_expr = parse(expr)\n\n result = [match.value for match in jsonpath_expr.find([md.to_dict() for md in models])]\n\n click.echo(result[0] if len(result) == 1 else json.dumps(result))", "def _ParseSelector(selector):\n if not selector:\n return None, None\n selectors = selector.split(',')\n selector_map = {}\n for s in selectors:\n items = s.split('=')\n if len(items) != 2:\n return None, '--selector should have the format key1=value1,key2=value2'\n selector_map[items[0]] = items[1]\n return selector_map, None", "def exploration_features_url(exp_id: str) -> str:\n return '%s/%s' % (feconf.EXPLORATION_FEATURES_PREFIX, exp_id)", "def splitgophertype(selector):\r\n if selector[:1] == '/' and selector[1:2]:\r\n return selector[1], selector[2:]\r\n return None, selector", "def parse_selector(cls, selector):\n def get_value(val_list):\n if len(val_list) == 1:\n return { 'type': 'literal', 'name': None, 'field_id': None, 'value': val_list[0] }\n elif val_list[0] == '[':\n return { 'type': 'entity', 'name': None, 'field_id': val_list[1], 'value': None }\n elif val_list[1] == '[':\n return { 'type': 'context', 'name': val_list[0], 'field_id': val_list[2], 'value': None }\n else:\n return { 'type': 'unknown', 'name': None, 'field_id': None, 'value': None }\n p_name = Word(alphas+\"_\", alphanums+\"_\")\n p_id = Word(alphas+\"_@\", alphanums+\"_-.~:/?#@!$&'()*+,;=)\")\n p_val = ( Group( Literal(\"[\") + p_id + Literal(\"]\") )\n | Group( p_name + Literal(\"[\") + p_id + Literal(\"]\") )\n | Group( QuotedString('\"', \"\\\\\") )\n | Group( QuotedString(\"'\", \"\\\\\") )\n | Group( p_id )\n )\n p_comp = ( Literal(\"==\") | Literal(\"in\") | p_name )\n p_selector = ( p_val + p_comp + p_val + StringEnd() )\n try:\n resultlist = p_selector.parseString(selector).asList()\n except ParseException:\n return None\n resultdict = {}\n if resultlist:\n resultdict['val1'] = get_value(resultlist[0])\n resultdict['comp'] = resultlist[1]\n resultdict['val2'] = get_value(resultlist[2])\n return resultdict", "def path_to_selector(path):\r\n if path==\"/\":\r\n return \"/\"\r\n else:\r\n return path[2:] # Cuts initial slash and data type identifier\r", "def parse_review_listings(self, sel):\n # Add item URLs to crawl queue.\n count = 0\n for url in sel.xpath(self.review_url_xpath).extract():\n self.add_url(self.clean_review_url(url))\n count += 1\n self.logger.info('Parsed {} review listings'.format(count))", "def parse_select(connectome, sources, targets, method_str):\n assert isinstance(method_str, (str, list))\n if isinstance(method_str, str):\n method_str = [method_str]\n candidate_paths = []\n for entry in method_str:\n method, args = entry.split(\",\")\n if method == \"length\":\n tolerance, max_hops = None, None\n tol_regex = re.compile(\"sp\\+([1-9][0-9]*|0)\")\n result = tol_regex.search(args)\n if result is not None:\n tolerance = int(result.group(1))\n print(tolerance)\n max_hops_regex = re.compile(\"p([1-9][0-9]*|0)\")\n result = max_hops_regex.search(args)\n if result is not None:\n max_hops = int(result.group(1))\n candidate_paths.append(\n length_based_selection(\n connectome, sources, targets, tolerance, max_hops\n )\n )\n else:\n raise NotImplementedError\n\n return set.intersection(*candidate_paths)", "def extract_links(self, selector='', *args, **kwargs):\n\t\ttry:\n\t\t\tlinks = self.get_tree_tag(selector=selector)\n\t\t\tfor link in links:\n\t\t\t\tnext_url = urljoin(self.url, link.get('href'))\n\t\t\t\tyield type(self)(next_url)\n\t\texcept XPathError:\n\t\t\traise Exception(\"Invalid %s selector - %s\" % (self.__selector_type__, selector))\n\t\texcept Exception:\n\t\t\traise Exception(\"Invalid %s selector - %s\" % (self.__selector_type__, selector))", "def parse_index(html):\n pattern = re.compile('<a.*?href=\"(.*?)\".*?class=\"name\">')\n items = re.findall(pattern, html)\n if not items:\n return []\n for item in items:\n detail_url = urljoin(BASE_URL, item)\n logging.info('get detail url %s', detail_url)\n yield detail_url", "def get_selector_from_split(raw_selector):\n return get_selector_from_sym(raw_selector)", "def init_subparsers(self) -> _SubParsersAction:\r\n subparsers = self.parser.add_subparsers(dest=\"which\")\r\n\r\n \"\"\"Parser for list command.\"\"\"\r\n parser_list = subparsers.add_parser(\"list\")\r\n parser_list.set_defaults(which=\"list\",\r\n help=\"Display a list of all projects.\")\r\n\r\n \"\"\"Parser for view command.\"\"\"\r\n parser_view = subparsers.add_parser(\"view\")\r\n parser_view.set_defaults(which=\"view\",\r\n help=\"Displays details of project.\")\r\n parser_view.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to view.\")\r\n\r\n \"\"\"Parser for create command.\"\"\"\r\n parser_create = subparsers.add_parser(\"create\")\r\n parser_create.set_defaults(which=\"create\",\r\n help=\"(Team Lead and Admin only) Create \"\r\n \"a new project from a given repo.\")\r\n parser_create.add_argument(\"gh_repo\", metavar=\"gh-repo\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify link to \"\r\n \"GitHub repository.\")\r\n parser_create.add_argument(\"github_team_name\",\r\n metavar=\"github-team-name\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify GitHub team to \"\r\n \"assign project to.\")\r\n parser_create.add_argument(\"--name\", metavar=\"DISPLAY-NAME\",\r\n type=str, action=\"store\",\r\n help=\"Add to set the displayed \"\r\n \"name of the project.\")\r\n\r\n \"\"\"Parser for unassign command.\"\"\"\r\n parser_unassign = subparsers.add_parser(\"unassign\")\r\n parser_unassign.set_defaults(which=\"unassign\",\r\n help=\"Unassign a given project.\")\r\n parser_unassign.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project \"\r\n \"to unassign.\")\r\n\r\n \"\"\"Parser for edit command.\"\"\"\r\n parser_edit = subparsers.add_parser(\"edit\")\r\n parser_edit.set_defaults(which=\"edit\",\r\n help=\"Edit the given project.\")\r\n parser_edit.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to edit.\")\r\n parser_edit.add_argument(\"--name\", metavar=\"DISPLAY-NAME\",\r\n type=str, action=\"store\",\r\n help=\"Add to change the displayed \"\r\n \"name of the project.\")\r\n\r\n \"\"\"Parser for assign command.\"\"\"\r\n parser_assign = subparsers.add_parser(\"assign\")\r\n parser_assign.set_defaults(which=\"assign\",\r\n help=\"Assigns a project to a team.\")\r\n parser_assign.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to assign.\")\r\n parser_assign.add_argument(\"github_team_name\",\r\n metavar=\"github-team-name\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify GitHub team to \"\r\n \"assign project to.\")\r\n parser_assign.add_argument(\"-f\", \"--force\", action=\"store_true\",\r\n help=\"Set to assign project even if \"\r\n \"another team is already \"\r\n \"assigned to it.\")\r\n\r\n \"\"\"Parser for delete command.\"\"\"\r\n parser_delete = subparsers.add_parser(\"delete\")\r\n parser_delete.set_defaults(which=\"delete\",\r\n help=\"Delete the project from database.\")\r\n parser_delete.add_argument(\"project_id\", metavar=\"project-id\",\r\n type=str, action=\"store\",\r\n help=\"Use to specify project to delete.\")\r\n parser_delete.add_argument(\"-f\", \"--force\", action=\"store_true\",\r\n help=\"Set to delete project even if \"\r\n \"a team is already assigned to it.\")\r\n\r\n return subparsers", "def parse_reviews_url(self, html):\n sel = Selector(html)\n url = sel.xpath(self.reviews_listing_url_xpath).extract()[0]\n return url", "def scrape(self, selector, cleaner=None, processor=None):\n # Apply CSS or XPath expression to the selector\n selected = selector.xpath(self.selection) if self.xpath else selector.css(self.selection)\n # Extract the value and apply regular expression if specified\n value = selected.re(self.re) if self.re else selected.extract(raw=self.raw, cleaner=cleaner)\n return self._post_scrape(value, processor=processor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries selected explores and returns any errors.
def validate(self, mode: str = "batch") -> List[SqlError]: explore_count = self._count_explores() printer.print_header( f"Testing {explore_count} " f"{'explore' if explore_count == 1 else 'explores'} " f"[{mode} mode]" ) loop = asyncio.get_event_loop() signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: loop.add_signal_handler( s, lambda s=s: asyncio.create_task(self.shutdown(s, loop)) ) errors = list(loop.run_until_complete(self._query(mode))) if mode == "hybrid" and self.project.errored: errors = list(loop.run_until_complete(self._query(mode))) for model in sorted(self.project.models, key=lambda x: x.name): for explore in sorted(model.explores, key=lambda x: x.name): message = f"{model.name}.{explore.name}" if explore.errored: printer.print_validation_result("error", message) else: printer.print_validation_result("success", message) return errors
[ "def test_query_grants_fail(cb):\n query = cb.select(Grant)\n with pytest.raises(ApiError):\n list(query)", "def do_query(self):\n print(\"[*] Beginning HackerTarget Query\")\n try:\n res = requests.get(self.ENDPOINT, verify=False, proxies=self.proxies)\n self._print(f\"Making request to url {self.ENDPOINT}\" + \n f\"with proxies {self.proxies}\")\n lines = res.content.splitlines()\n if len(lines) < 2: #Assuming anything greater than 1 is a valid domain for our purposes\n print(\"Domain not found on hackertarget\")\n return\n for line in res.content.split():\n unused_hostname, ip = str(line, 'utf-8').split(',')\n self.results += [ip.strip()]\n self._write_results()\n except requests.exceptions.ConnectionError as er:\n logger.error(f\"[!] Connection Error check network configuration {er}\")\n print(f\"[!] Connection Error check network configuration {er}\")\n except requests.exceptions.RequestException as er:\n logger.error(f\"[!] Request failed {er}\")\n print(f\"[!] Request failed {er}\")\n except OSError as er:\n logger.exception(\"OSError in HackerTarget\")\n print(f\"[!] Writing to file failed {er}\")\n print(\"[*] End HackerTarget Query\")", "def runQuery(self, query, *args, **kwargs):\n\t\tpool = self.getPoolFor(query)\n\t\twhile(pool):\n\t\t\ttry:\n\t\t\t\treturn pool.runQuery(query, *args, **kwargs)\n\t\t\texcept adbapi.ConnectionLost, e:\n\t\t\t\tif(pool == self.master):\n\t\t\t\t\traise e\n\t\t\t\telse:\n\t\t\t\t\tprint >>sys.stderr, \"Expired slave %s during query because of %s\" % (pool.connkw['host'], str(e))\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.slaves.remove(pool)\n\t\t\t\t\t\tpool.close()\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\tpool = self.getPoolFor(query)", "def query_check(tables, query):\n (valid_from, from_error) = from_check(tables, query) \n (valid_select, select_error) = check_select(tables, query)\n (valid_group, group_error) = check_group_by(tables, query)", "def query_days_with_errors():\n query = \"\"\"select time::date as error_date,\n (count(*)*100.0/requests) as error_average\n from log, (select time::date as request_date, count(*) as requests\n from log group by request_date) as all_requests\n where status similar to '4__%|5__%' and time::date = request_date\n group by error_date, requests having (count(*)*100.0/requests) > 1;\n \"\"\"\n result = run_query(query)\n print(\"\\r\\n==== Days where more than 1% of requests lead to errors ====\")\n for item in result:\n print(' ' + item[0].strftime('%B %d, %Y') +\n ' -- ' + str(round(item[1], 2)) + '% errors')", "def getErrors(filters, limit, offset):\n for key in filters:\n if key in INSTANCE_FILTERS:\n return None, getInstances(filters, limit=limit, offset=offset)\n\n errors = LoggedError.all().filter('active =', True)\n for key, value in filters.items():\n if key == 'maxAgeHours':\n errors = errors.filter('firstOccurrence >', datetime.now() - timedelta(hours = int(value)))\n elif key == 'project':\n errors = errors.filter('project =', getProject(value))\n else:\n errors = errors.filter(key, value)\n if 'maxAgeHours' in filters:\n errors = errors.order('-firstOccurrence')\n else:\n errors = errors.order('-lastOccurrence')\n\n return errors.fetch(limit, offset), None", "def test_query_fail(self):\n with _mocked_session('get', 401):\n cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')\n cli.query('select column_one from foo;')", "def get_errors(self):\n assert self._resolved, 'you must execute the commands before fetching results'\n\n return self._errors", "def _find_bad_connections(connections):\r\n bad = []\r\n for conn in connections:\r\n try:\r\n _, _, _ = gearman.util.select([conn], [], [], timeout=0)\r\n except (select.error, gearman.errors.ConnectionError):\r\n bad.append(conn)\r\n return bad", "def select_query(self, queries):\n\n raise NotImplementedError", "def check_results(self):\n try:\n while True:\n item = self._pop_completed() # will throw Empty\n if not item.get_results().wasSuccessful():\n raise ExecutionError(\"Error Executing Command: \", item)\n except Empty:\n return", "def queries_long_running():\n query_queries_long_running(current_app.extensions['sqlalchemy'].db)", "def poll_on_queries(self):\n queries_in_progress = set(self.query_ids)\n statement_success_status = {}\n statement_error_status = {}\n for query_id in self.query_ids:\n if not len(queries_in_progress):\n break\n self.log.info(\"checking : %s\", query_id)\n try:\n statement_status = self._hook.get_sql_api_query_status(query_id)\n except Exception as e:\n raise ValueError({\"status\": \"error\", \"message\": str(e)})\n if statement_status.get(\"status\") == \"error\":\n queries_in_progress.remove(query_id)\n statement_error_status[query_id] = statement_status\n if statement_status.get(\"status\") == \"success\":\n statement_success_status[query_id] = statement_status\n queries_in_progress.remove(query_id)\n time.sleep(self.poll_interval)\n return {\"success\": statement_success_status, \"error\": statement_error_status}", "def select_newest_errors(self):\r\n\r\n if self.__errors_locked:\r\n return []\r\n\r\n result = []\r\n check_addr = []\r\n for error in self.__errors:\r\n if error['addr'] not in check_addr:\r\n check_addr.append(error['addr'])\r\n result.append(error)\r\n return result", "def query_exception_count(self):\n return len(re.findall('EXCEPTION',\n self.impalad_test_service.read_debug_webpage('queries')))", "def _get_query_edge_rx_errors(self):\n return self.__query_edge_rx_errors", "def get_query_expiry(self, dasquery):\n err_return = time.time() + (2*self.preempt)\n try:\n if not self.das.rawcache.incache(dasquery):\n try:\n self.das.call(dasquery, add_to_analytics=False)\n except Exception as err:\n print \"\\n### FAIL input query=%s, err=%s\" \\\n % (dasquery, str(err))\n raise err\n expiries = [result.get('apicall', {}).get('expire', 0) for result in \\\n self.das.analytics.list_apicalls(qhash=dasquery.qhash)]\n if not expiries:\n return err_return\n return min(expiries)\n except:\n return err_return", "def getAllFailsafes(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('fail-safeList')\n\t\treturn deserialize_list_Fail_safe_json(payload)", "def test_bad_query_raises(self):\n dt = datetime(2015, 6, 15, 12, 0, 0)\n client = RadarServer(self.server + '/nexrad/level3/IDD')\n q = client.query().stations('FTG').time(dt)\n with pytest.raises(BadQueryError):\n client.get_catalog(q)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and executes a query with a single explore.
async def _query_explore( self, session: aiohttp.ClientSession, model: Model, explore: Explore ) -> str: dimensions = [dimension.name for dimension in explore.dimensions] query_task_id = await self._run_query( session, model.name, explore.name, dimensions ) self.query_tasks[query_task_id] = explore return query_task_id
[ "def query(self, sql):", "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def query(self, **kwargs):\n return self.iterate('query', **kwargs)", "def do(self, **kwargs):\n self._check_query_input(**kwargs)\n return Query(dag=self.dag, given=self.given_dict, do={**self.do_dict, **kwargs})", "def execute(self, *args, **kwargs):\n return graphql(self, *args, **kwargs)", "def elastic_query(self, query: str):\n self.description = None\n # Sanitize query\n query = self.sanitize_query(query)\n payload = {\"query\": query, \"fetch_size\": self.fetch_size}\n path = f\"/{self.sql_path}/\"\n try:\n resp = self.es.transport.perform_request(\"POST\", path, body=payload)\n except es_exceptions.ConnectionError as e:\n raise exceptions.OperationalError(\n f\"Error connecting to {self.url}: {e.info}\"\n )\n except es_exceptions.RequestError as e:\n raise exceptions.ProgrammingError(\n f\"Error ({e.error}): {e.info['error']['reason']}\"\n )\n return resp", "def test_simple_query():\n query = \"select * from (VALUES(1,2,3))\"\n connect_to_dremio_flight_server_endpoint(\"localhost\",\n \"32010\", \"dremio\", \"dremio123\", query, False, False, False)", "def suppose_do(self, **kwargs):\n self._check_query_input(**kwargs)\n return SupposeQuery(dag=self.dag, when=self.orig_query, suppose_do={**self.suppose_do_dict, **kwargs},\n suppose_given=self.suppose_given_dict)", "def select_query(self, queries):\n\n raise NotImplementedError", "def test_test_query(self):\n pass", "def query(\n self,\n body: Optional[Dict] = None,\n query: Optional[str] = None,\n query_model: Optional[Query] = None,\n debug_request: bool = False,\n recall: Optional[Tuple] = None,\n **kwargs\n ) -> VespaResult:\n\n if body is None:\n assert query is not None, \"No 'query' specified.\"\n assert query_model is not None, \"No 'query_model' specified.\"\n body = query_model.create_body(query=query)\n if recall is not None:\n body.update(\n {\n \"recall\": \"+(\"\n + \" \".join(\n [\"{}:{}\".format(recall[0], str(doc)) for doc in recall[1]]\n )\n + \")\"\n }\n )\n\n body.update(kwargs)\n\n if debug_request:\n return VespaResult(vespa_result={}, request_body=body)\n else:\n r = post(self.search_end_point, json=body)\n return VespaResult(vespa_result=r.json())", "def query():\n return render_template(\"dashboard/query.html\", tagname = 'query', form = QueryForm())", "def apply_query():\n s = Search(using=ES_OBJECT, index=SEARCHING_INDEX)\n if INDEXATION_MODE == \"autocomplete\":\n logging.info(\"Applying autocomplete search\")\n s.update_from_dict(\n autocomplete_query(QUERY, FIELDS_TO_SEARCH, popularity_field=POPULARITY_FIELD)\n )\n elif INDEXATION_MODE in [\"basic_english\", \"french\"]:\n logging.info(\"Applying multi match search with fuzziness if set in yaml\")\n s.update_from_dict(\n multi_match_query(QUERY, FIELDS_TO_SEARCH, fuzziness=FUZZINESS)\n )\n else:\n raise NotImplementedError(\"Mode d'indexation choisi pas setup\")\n return s", "def sql_query(dbname, query):\n ...", "def capture_query(self, query, params=(), engine=None, **kwargs):\n return self.capture('Query', query=query, params=params, engine=engine,\n **kwargs)", "def run(self, query, data=None, log_results=False):\n with self.driver.session(database=self.database) as session:\n if data:\n results = session.write_transaction(self.unwind_transaction, query=query, data=data)\n else:\n results = session.run(query)\n if log_results:\n logging.info(f\"Neo4j plugin - Query results: {results.consume().counters}\")", "def create_from_ado_query(self):\n self.visible_element_click(locators.SuiteManagerPageLocators.FROM_ADO_QUERY_OPT)", "def exec():\n # Recupera la consulta a ejecutar\n body = request.get_json()\n query = body.get('query')\n try:\n # Ejecuta el query (con el interpreter)\n result = interpreter.execution(query)\n result = json.loads(json.dumps(result, ignore_nan = True))\n return {\"result\": result, \"ok\": True}, 200\n except Exception as e:\n # Retorna un mensaje de error en el servidor\n print(e)\n return {\"ok\": False}, 400", "def dbGenerateSaveQuery(self, env):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and executes a query with a single dimension.
async def _query_dimension( self, session: aiohttp.ClientSession, model: Model, explore: Explore, dimension: Dimension, ) -> str: query_task_id = await self._run_query( session, model.name, explore.name, [dimension.name] ) self.query_tasks[query_task_id] = dimension return query_task_id
[ "def query(session, Dim, **kwargs):\n return session.query(Dim).filter_by(**kwargs).first()", "def run(self):\n return self.cdb.db.query(\"dataset\", self.query)", "def _Dynamic_RunQuery(self, request, response):\n runquery_response = datastore_pb.QueryResult()\n self.__call('datastore_v3', 'RunQuery', request, runquery_response)\n if runquery_response.result_size() > 0:\n response.CopyFrom(runquery_response)\n return\n\n next_request = datastore_pb.NextRequest()\n next_request.mutable_cursor().CopyFrom(runquery_response.cursor())\n next_request.set_count(request.limit())\n self.__call('datastore_v3', 'Next', next_request, response)", "def execute(self, *args, **kwargs):\n return graphql(self, *args, **kwargs)", "def query(self, sql):", "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def do(self, **kwargs):\n self._check_query_input(**kwargs)\n return Query(dag=self.dag, given=self.given_dict, do={**self.do_dict, **kwargs})", "def sql_query(dbname, query):\n ...", "def query(entity) -> OrmQueryBuilder[OrmQueryBuilder, E]:", "def soql_query(self, query, *args):\n query = \" \".join((query,) + args)\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def execute(self, context):\n self.log.info('Loading the data into {}'.format(self.table))\n redshift_hook = PostgresHook(self.redshift_conn_id)\n sql_statement = LoadDimensionOperator.load_dim_table.format(self.table, \n self.sql)\n redshift_hook.run(sql_statement)\n self.log.info(\"Done Loading..\")", "def executeQuery(conn, query):\n cur = conn.cursor()\n cur.execute(query)\n return cur", "def create_query(self, query_json):\n\n set_name = \"set-none\"\n if 'predicates' in query_json:\n set_name = self.add_set(query_json['predicates'])\n\n is_polar = query_json.get('isPolar', None)\n if is_polar is None:\n tag = \"NLQuery\"\n self.statement_xml = \"<NLQueryStatement>%s</NLQueryStatement>\" % query_json.get(\n 'nlQuery', '')\n\n # Add additional time intervals\n for time_obj in query_json.get('times', []):\n self.add_time(time_obj)\n\n else:\n if is_polar:\n tag = \"Query\"\n else:\n tag = \"NonePolarQuery\"\n self.add_statement(set_name, query_json['isPolar'])\n\n return self.compile_query(tag)", "def execute(graph_db, query, params=None, row_handler=None, metadata_handler=None, error_handler=None):\n return Query(graph_db, query).execute(\n params, row_handler=row_handler, metadata_handler=metadata_handler, error_handler=error_handler\n )", "def display_query(query, index): # pragma: no cover\n QueryPlot(query, index)", "def execute(self) :\n\n # If we've already executed, report as such with an Exception. This may\n # be an important warning. Perhaps we should implement a reExecute for\n # cases where we want the Query to read from the database again rather than\n # simply create a new Query object.\n if self.isExecuted :\n raise QueryException, \"Error: This query has already been executed. \" + \\\n \"Try reExecute().\"\n\n c = self.conn.cursor()\n\n if 'material' == self.qType :\n \n # Get the array dimensions. We don't know if we've filtered or collapsed\n # away some of\n # the potential result space, so we need to assume the array has the\n # following dimensions (and size).\n # time (tf - t0) X from (numFrom) X to (numTo) X iso (numIsos)\n # or time X from X to \n\n # get the list of actors\n actList = self.getActList()\n numActs = len(actlist)\n\n # Get the list of isotopes from the hard-coded list in getIsoList. Count\n # them up and make a dictionary for mapping them into the iso dimension\n # of the Query's data array.\n numIsos = len(self.indToIso)\n\n # Initialize the array.\n try :\n self.data = zeros( (self.tf - self.t0, numActs, numActs, numIsos) )\n except ValueError :\n raise QueryException, \"Error: you've executed a Query whose array \" + \\\n \"representation would be \" + str(self.tf - self.t0) + \" x \" + \\\n str(numActs) + \" x \" + str(numActs) + \" x \" + str(numIsos) + \\\n \". That's too large.\"\n\n # Perform the SQL query.\n c.execute(str(self.qStmt))\n\n # Load the results into the array.\n fromInd = -1\n toInd = -1\n for row in c :\n time = row[0] - self.t0\n fFac = row[1]\n tFac = row[2]\n nIso = row[3]\n mIso = row[4]\n\n # Get the indexes for the 'from' and 'to' dimensions.\n d = self.conn.cursor()\n d.execute(\"SELECT Agents.ID FROM Agents WHERE Agents.ID = ? \", (fFac,))\n \n for roe in d :\n fromInd = actList.index(roe[0])\n\n d.execute(\"SELECT Agents.ID FROM Agents \" + \\\n \"WHERE Agents.ID = ? \", (tFac,))\n \n for roe in d :\n toInd = actList.index(roe[0])\n\n self.data[time][fromInd][toInd][self.isoToInd[nIso]] += mIso\n\n # Store the labels.\n self.dataLabels[0] = range(self.t0, self.tf)\n self.dataLabels[1] = actList\n self.dataLabels[2] = actList\n self.dataLabels[3] = self.indToIso.values()\n \n elif 'resource' == self.qType :\n \n # Get the array dimensions. We don't know if we've filtered or collapsed\n # away some of\n # the potential result space, so we need to assume the array has the\n # following dimensions (and size).\n # time (tf - t0) X from (numFrom) X to (numTo) X iso (numIsos)\n # or time X from X to \n\n # get the list of actors\n actList = self.getActList()\n numActs = len(actList)\n\n # Initialize the array.\n try :\n self.data = zeros( (self.tf - self.t0, numActs, numActs) )\n except ValueError :\n raise QueryException, \"Error: you've executed a Query whose array \" + \\\n \"representation would be \" + str(self.tf - self.t0) + \" x \" + \\\n str(numActs) + \" x \" + str(numActs) + \\\n \". That's too large.\"\n\n # Perform the SQL query.\n c.execute(str(self.qStmt))\n\n # Load the results into the array.\n fromInd = -1\n toInd = -1\n for row in c :\n time = row[0] - self.t0\n fFac = row[1]\n tFac = row[2]\n rsrc = row[3]\n\n # Get the indexes for the 'from' and 'to' dimensions.\n d = self.conn.cursor()\n d.execute(\"SELECT Agents.ID FROM Agents WHERE Agents.ID = ? \", (fFac,))\n \n for roe in d :\n fromInd = actList.index(roe[0])\n\n d.execute(\"SELECT Agents.ID FROM Agents \" + \\\n \"WHERE Agents.ID = ? \", (tFac,))\n \n for roe in d :\n toInd = actList.index(roe[0])\n\n self.data[time][fromInd][toInd] += rsrc\n\n # Store the labels.\n self.dataLabels[0] = range(self.t0, self.tf)\n self.dataLabels[1] = actList\n self.dataLabels[2] = actList\n\n \n self.isExecuted = True", "def _execute_query(sql_raw, params, qry_type):\n conn = psycopg2.connect(config.DATABASE_STRING)\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(sql_raw, params)\n\n if qry_type == 'sel_single':\n results = cur.fetchone()\n elif qry_type == 'sel_multi':\n results = cur.fetchall()\n elif qry_type == 'insert':\n results = cur.fetchone()\n conn.commit()\n elif qry_type == 'update':\n results = cur.fetchone()\n conn.commit()\n else:\n raise Exception('Invalid query type defined.')\n\n conn.close()\n return results", "def elastic_query(self, query: str):\n self.description = None\n # Sanitize query\n query = self.sanitize_query(query)\n payload = {\"query\": query, \"fetch_size\": self.fetch_size}\n path = f\"/{self.sql_path}/\"\n try:\n resp = self.es.transport.perform_request(\"POST\", path, body=payload)\n except es_exceptions.ConnectionError as e:\n raise exceptions.OperationalError(\n f\"Error connecting to {self.url}: {e.info}\"\n )\n except es_exceptions.RequestError as e:\n raise exceptions.ProgrammingError(\n f\"Error ({e.error}): {e.info['error']['reason']}\"\n )\n return resp", "def query(self, query):\n self.operation += query.operation\n self.parameters += query.parameters" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the explores in the LookML project hierarchy.
def _count_explores(self) -> int: explore_count = 0 for model in self.project.models: explore_count += len(model.explores) return explore_count
[ "def count_explores(self) -> int:\n return len([explore for explore in self.iter_explores() if not explore.skipped])", "def _num_root(self) -> int:\n return sum(\n int(lineage.root.item.status != \"broken\") for lineage in self.lineages\n )", "def n_experiences(self):\n\n return len(self.heap.track)", "def getNumberOfHeuristics(self) -> None:", "def _get_count(self) -> \"size_t\" :\n return _core.DataProjects__get_count(self)", "def calories(self) -> int:\n raise NotImplementedError(\"Hey, you need to override this\")", "def depth(self):\n try:\n stack.depth()\n except:\n if settings.verbose:\n click.echo(f'depth() filed.')", "def _windowlevel(self):\n prev = self\n count = 0\n while not prev._istoplevel:\n count = count + 1\n prev = prev._parent\n return count", "def complexity(cx):\n\n cx.run(\"mkdir -p metrics/code_quality\")\n\n cx.run(f\"lizard -o metrics/code_quality/lizard.csv src/{project_slug()}\")\n cx.run(f\"lizard -o metrics/code_quality/lizard.html src/{project_slug()}\")\n\n # SNIPPET: annoyingly opens the browser\n\n # make a cute word cloud of the things used\n # cx.run(\"(cd metrics/code_quality; lizard -EWordCount src/project_slug() > /dev/null)\")", "def get_level_count(wsi) -> int:\n return wsi.resolutions[\"level_count\"] # type: ignore", "def depth(self) -> float:", "def get_jumps_count(parse_tree):\n raw_log = parse_tree\n acc_entries = raw_log.getElementsByTagName(JUMPS)\n return len(acc_entries)", "def getBackStackEntryCount(self):\n pass", "def compute_depth(self):\n\t\t\n\t\tself.depth = self.url.count(\"/\") - startURL.count(\"/\")", "def get_num_extracts(self):\n if self._extracts == None:\n return -1\n return len(self._extracts[\"labels\"]) # not counting aliases as these are a subset of the extracted entities", "def count_levels(self):\r\n lcount = 0\r\n rcount = 0\r\n if self.left:\r\n lcount = self.left.count_levels()\r\n if self.right:\r\n rcount = self.right.count_levels()\r\n return 1 + max(lcount, rcount)", "def get_number_of_sites(self, tree): # draft\n return tree.xpath(\"//p[@class='descriptor left']/text()\").split(\"or\",1)[1].strip() # We should get number of elements as str (la", "def count (self):\n total = 1\n for dep in self.deps:\n total += dep.count()\n return total", "def tackle(self):\n self.total_tackles += 1", "def number_of_reflections_of_full_support(self):\n n = self.rank()\n h = self.coxeter_number()\n l = self.cardinality()\n codegrees = self.codegrees()[:-1]\n return (n * h * prod(codegrees)) // l" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Conform a dictionary of variables onto a new set of variables reindexed with dimension positional indexers and possibly filled with missing values. Not public API.
def reindex_variables( variables: Mapping[Any, Variable], dim_pos_indexers: Mapping[Any, Any], copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> dict[Hashable, Variable]: new_variables = {} dim_sizes = calculate_dimensions(variables) masked_dims = set() unchanged_dims = set() for dim, indxr in dim_pos_indexers.items(): # Negative values in dim_pos_indexers mean values missing in the new index # See ``Index.reindex_like``. if (indxr < 0).any(): masked_dims.add(dim) elif np.array_equal(indxr, np.arange(dim_sizes.get(dim, 0))): unchanged_dims.add(dim) for name, var in variables.items(): if isinstance(fill_value, dict): fill_value_ = fill_value.get(name, dtypes.NA) else: fill_value_ = fill_value if sparse: var = var._as_sparse(fill_value=fill_value_) indxr = tuple( slice(None) if d in unchanged_dims else dim_pos_indexers.get(d, slice(None)) for d in var.dims ) needs_masking = any(d in masked_dims for d in var.dims) if needs_masking: new_var = var._getitem_with_mask(indxr, fill_value=fill_value_) elif all(is_full_slice(k) for k in indxr): # no reindexing necessary # here we need to manually deal with copying data, since # we neither created a new ndarray nor used fancy indexing new_var = var.copy(deep=copy) else: new_var = var[indxr] new_variables[name] = new_var return new_variables
[ "def _normalize_indexes(\n self,\n indexes: Mapping[Any, Any],\n ) -> tuple[NormalizedIndexes, NormalizedIndexVars]:\n if isinstance(indexes, Indexes):\n xr_variables = dict(indexes.variables)\n else:\n xr_variables = {}\n\n xr_indexes: dict[Hashable, Index] = {}\n for k, idx in indexes.items():\n if not isinstance(idx, Index):\n if getattr(idx, \"dims\", (k,)) != (k,):\n raise ValueError(\n f\"Indexer has dimensions {idx.dims} that are different \"\n f\"from that to be indexed along '{k}'\"\n )\n data = as_compatible_data(idx)\n pd_idx = safe_cast_to_index(data)\n pd_idx.name = k\n if isinstance(pd_idx, pd.MultiIndex):\n idx = PandasMultiIndex(pd_idx, k)\n else:\n idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype)\n xr_variables.update(idx.create_variables())\n xr_indexes[k] = idx\n\n normalized_indexes = {}\n normalized_index_vars = {}\n for idx, index_vars in Indexes(xr_indexes, xr_variables).group_by_index():\n coord_names_and_dims = []\n all_dims: set[Hashable] = set()\n\n for name, var in index_vars.items():\n dims = var.dims\n coord_names_and_dims.append((name, dims))\n all_dims.update(dims)\n\n exclude_dims = all_dims & self.exclude_dims\n if exclude_dims == all_dims:\n continue\n elif exclude_dims:\n excl_dims_str = \", \".join(str(d) for d in exclude_dims)\n incl_dims_str = \", \".join(str(d) for d in all_dims - exclude_dims)\n raise ValueError(\n f\"cannot exclude dimension(s) {excl_dims_str} from alignment because \"\n \"these are used by an index together with non-excluded dimensions \"\n f\"{incl_dims_str}\"\n )\n\n key = (tuple(coord_names_and_dims), type(idx))\n normalized_indexes[key] = idx\n normalized_index_vars[key] = index_vars\n\n return normalized_indexes, normalized_index_vars", "def dict_to_coords(metadata: Dict[str, Any], dim_name: str) -> Dict[str, xr.Variable]:\n coords = {}\n for field, props in metadata.items():\n while isinstance(props, list) and not isinstance(props, _ourlist):\n # a list scalar (like `instruments = ['OLI', 'TIRS']`).\n\n # first, unpack (arbitrarily-nested) 1-element lists.\n # keep re-checking if it's still a list\n if len(props) == 1:\n props = props[0]\n continue\n\n # for now, treat multi-item lists as a set so xarray can interpret them as 0D variables.\n # (numpy very much does not like to create object arrays containing python lists;\n # `set` is basically a hack to make a 0D ndarray containing a Python object with multiple items.)\n try:\n props = set(props)\n except TypeError:\n # if it's not set-able, just give up\n break\n\n props_arr = np.squeeze(np.array(props))\n if (\n props_arr.ndim > 1\n or props_arr.ndim == 1\n and not isinstance(props, _ourlist)\n ):\n # probably a list-of-lists situation. the other dims likely don't correspond to\n # our \"bands\", \"y\", and \"x\" dimensions, and xarray won't let us use unrelated\n # dimensions. so just skip it for now.\n continue\n\n coords[field] = xr.Variable(\n (dim_name,) if props_arr.ndim == 1 else (),\n props_arr,\n )\n\n return coords", "def _build_variable_replacements(self, variables, values):\n # initialise\n repl_dict = OrderedDict()\n repl_list = []\n\n # iterate variables\n for index, v in enumerate(variables):\n # variable should be set to 0\n if v in self.zero_out:\n x = 0\n else:\n # get variable size\n size = self.game.variables[v].size\n # set value\n # x = values[index] % (2 ** (size * 8))\n x = values[index] % (2 ** (size))\n\n # fill data structures\n repl_dict[v] = str(x)\n repl_list.append(x)\n\n return repl_dict, repl_list", "def fill_variational_params(var_params_dict, params_arr):\r\n for par, val in var_params_dict.items():\r\n if is_variational_input(val):\r\n global_index = int(val[-1]) - 1\r\n var_params_dict[par] = params_arr[global_index]\r\n return var_params_dict", "def mapVars(domains,compcount,scales):\n var2index,index2var,index = {},[],0\n for domin in xrange(len(domains)):\n for comp in xrange(compcount):\n for scale in scales:\n var2index[(domin,comp,scale)] = index\n index += 1\n index2var.append((domin,comp,scale))\n return var2index,index2var", "def _replace_dim(coords, dims, axis, into, dimensionality):\n # TODO can we improve this with assign_coords / swap_dims?\n old_dim = dims[axis]\n\n if dimensionality == 2:\n if into == \"cartesian\":\n new_dim = \"cartesian_axis\"\n new_coord = [\"x\", \"y\"]\n elif into == \"polar\":\n new_dim = \"polar_axis\"\n new_coord = [\"r\", \"phi\"]\n elif dimensionality == 3:\n if into == \"cartesian\":\n new_dim = \"cartesian_axis\"\n new_coord = [\"x\", \"y\", \"z\"]\n elif into == \"spherical\":\n new_dim = \"spherical_axis\"\n new_coord = [\"r\", \"theta\", \"phi\"]\n elif into == \"quaternion\":\n new_dim = \"quaternion_axis\"\n new_coord = [\"w\", \"x\", \"y\", \"z\"]\n\n dims = tuple((d if d != old_dim else new_dim) for d in dims)\n\n coords = {c: coords[c] for c in coords if old_dim not in coords[c].dims}\n coords[new_dim] = new_coord\n\n return coords, dims", "def rename_dimensions(DICT_IN, DICT_OUT):\n if 'x' in DICT_IN.dims:\n DICT_OUT = DICT_IN.rename(dict(x='j',y='i'))\n elif 'ni' in DICT_IN.dims:\n DICT_OUT = DICT_IN.rename(dict(nj='j',ni='i'))\n elif 'nlon' in DICT_IN.dims:\n DICT_OUT = DICT_IN.rename(dict(nlat='j',nlon='i'))\n elif 'longitude' in DICT_IN.dims:\n DICT_OUT = DICT_IN.rename(dict(latitude='j',longitude='i'))\n else: \n DICT_OUT = DICT_IN\n return DICT_OUT", "def set_variables(self, variables, dataset=0, **kwargs):\n #variables.update(kwargs)\n\n if not dataset:\n dataset = ['']\n\n for ds in listify(dataset):\n for (key, val) in variables.items():\n newkey = key + str(ds)\n self.set_variable(newkey, val, **kwargs)", "def _update_definition_variables(self, tag, hed_vars, index):\r\n level = tag.extension.lower()\r\n for var_name in hed_vars:\r\n hed_var = self._type_value_map.get(var_name, None)\r\n if hed_var is None:\r\n hed_var = HedTypeFactors(self.type_tag, var_name, self.total_events)\r\n self._type_value_map[var_name] = hed_var\r\n var_levels = hed_var.levels.get(level, {index: 0})\r\n var_levels[index] = 0\r\n hed_var.levels[level] = var_levels", "def mapVarsSpec(domains,compcount,comp2scale):\n var2index,index2var,index = {},[],0\n for domin in xrange(len(domains)):\n for comp in xrange(compcount):\n var2index[(domin,comp,comp2scale[comp])] = index\n index += 1\n index2var.append((domin,comp,comp2scale[comp]))\n return var2index,index2var", "def fixVariables(self,variableIndices,labels):\n if(self.operator=='adder'):\n manip = adder.GraphicalModelManipulator(self)\n elif(self.operator=='multiplier'):\n manip = multiplier.GraphicalModelManipulator(self)\n else:\n raise RuntimeError(\"uknown operator %s\"%self.operator)\n\n v=numpy.require(variableIndices,dtype=index_type)\n l=numpy.require(labels,dtype=label_type)\n\n # fix vars\n manip.fixVariables(v,l)\n # build submodel\n manip.buildModifiedModel()\n # get submodel\n subGm = manip.getModifiedModel()\n # get submodel variable indices\n subGmVis=manip.getModifiedModelVariableIndices()\n return subGm,subGmVis\n \n #pass", "def fill_undefined_dimension_variables(ncdf_grp, dims_vals):\r\n print '-'*50+'\\nPrinting netcdf vars: ', ncdf_grp.variables\r\n for var_name, value_array in dims_vals.iteritems():\r\n if var_name in ncdf_grp.variables:\r\n ncdf_grp.variables[var_name] = value_array\r\n else:\r\n err_msg = 'Variable \"{0}\" declared in Metafile do not match any of the variables in NETCDF file {1}'.format(var_name, ncdf_grp.variables)\r\n raise ValueError(err_msg)", "def build_independent_variable_placeholders(self):\n return {\n var: self.make_placeholder(var.shape) for var in self.independent_variables\n }", "def prepare_arrays(nc, formula_terms, new_shape):\n import dask.array as da\n\n arrays = {}\n for term, var in formula_terms.items():\n var = nc[var]\n if var.ndim == 0:\n arr = var[:]\n else:\n if var.ndim > 2:\n chunks = (1,) + var.shape[1:]\n else:\n chunks = var.shape\n if term == \"sigma\" and var.ndim == 2:\n chunks = var.shape\n if term == \"eta\" and var.ndim == 2:\n chunks = (1,) + var.shape[1:]\n arr = da.from_array(var, chunks=chunks)\n arr = _reshape(arr, new_shape)\n arrays.update({term: arr})\n return arrays", "def separate_coordinate_and_data(variables_dict):\n coordinate_dict = {}\n var_dict = {}\n coord_identifiers = ['plev','elev', 'station','x','y','projection']\n elev_identifier = 'plev'\n plev_identifier = 'elev'\n station_identifier = 'station'\n\n for name, var in variables_dict.iteritems():\n if 'OM__observedProperty' not in var.ncattrs():\n coordinate_dict[name] = var\n else:\n var_dict[name] = var\n\n return (coordinate_dict, var_dict)", "def builddimensions(self):\r\n e = self.experiment # synonym\r\n\r\n # find unique dimension values across variables. Dim values could be 0, 5, 5, 5, 2, 666, -74,...\r\n dims = list(np.unique([ var.dim for var in e.variables ])) # np.unique returns sorted values\r\n\r\n # renumber dimension values to be consecutive 0-based\r\n newdims = range(len(dims)) # 0-based consecutive dim values\r\n old2new = dict(zip(dims, newdims)) # maps from old dim values to new ones\r\n for var in e.variables:\r\n var.dim = old2new[var.dim] # overwrite each Variable's old dim value with the new one\r\n\r\n # use newdims to init a list of Dimensions, each with an empty Variables object\r\n self.dimensions = []\r\n for dim in newdims:\r\n d = Dimension(variables=Variables(), dim=dim)\r\n self.dimensions.append(d)\r\n\r\n # now assign each Variable object to the appropriate Dimension object\r\n for var in e.variables:\r\n d = self.dimensions[var.dim] # get the Dimension object\r\n d.variables[var.name] = var # assign the Variable to the Dimension's Variables\r\n d.shuffle = var.shuffle # set the Dimension's shuffle and random flags according to this Variable\r\n d.random = var.random\r\n d.check() # make sure everything is consistent in this Dimension\r", "def _createDimensions(): \n for d in ncin.dimensions:\n dimVar = ncin[d]\n ncout.createDimension(d, dimVar.size)\n data = ncout.createVariable(d, np.dtype('double').char, (d))\n _add_attributes(dimVar, data)\n if d == dimTName:\n data.units = \"Hours since {}-{}-{} {}\".format(year, month, day, timeString)\n if d == dimYName:\n data[:] = dimVar[::-1]\n else:\n data[:] = dimVar[:]", "def initialize_auxiliary_variable(self):\n auxiliary_map = {}\n for sequence_id in self.training_data:\n auxiliary_event_map = {}\n list_length = len(self.training_data[sequence_id])\n for i in range(0, list_length):\n single_event_auxiliary_list = []\n for j in range(-1, i):\n single_event_auxiliary_list.append(1 / (i + 1))\n auxiliary_event_map[i] = single_event_auxiliary_list\n auxiliary_map[sequence_id] = auxiliary_event_map\n return auxiliary_map", "def get_simple_vars(mapping = {},skip=[],custom=False):\r\n # The following are keys that Docassemble uses that we never want to extract from the answer set\r\n keys_to_ignore = ['_internal','url_args','PY2','string_types','nav','__warningregistry__'] + skip\r\n\r\n if custom:\r\n interview_state = custom\r\n else:\r\n interview_state = all_variables(simplify=False)\r\n interview_state = {k:v for k, v in interview_state.items() if k not in keys_to_ignore}\r\n\r\n simplified_vars = {}\r\n\r\n for key, value in interview_state.items():\r\n if isinstance(value, Individual):\r\n add_individual(simplified_vars,key,value)\r\n elif isinstance(value, Person):\r\n add_person(simplified_vars,key,value)\r\n elif isinstance(value,DADict):\r\n add_dict(simplified_vars,key, value)\r\n elif isinstance(value, DAList):\r\n simplified_vars[key] = comma_list(value)\r\n elif isinstance(value, DADateTime):\r\n simplified_vars[key] = value.format_date(format='yyyy-MM-dd')\r\n elif isinstance(value,DAObject):\r\n all_attributes = set(value.__dict__.keys()) - {'has_nonrandom_instance_name', 'instanceName', 'attrList', 'location'}\r\n for attribute in all_attributes:\r\n if isinstance(getattr(value,attribute), DADateTime):\r\n simplified_vars[key + '.' + attribute ] = getattr(value,attribute).format_date(format='yyyy-MM-dd')\r\n else:\r\n simplified_vars[key + '.' + attribute] = str(getattr(value,attribute))\r\n # Don't transform numbers\r\n elif isinstance(value,int) or isinstance(value,float):\r\n simplified_vars[key] = value\r\n # Send Decimal values as floating point\r\n elif isinstance(value,Decimal):\r\n simplified_vars[key] = float(value)\r\n # Everything else gets turned into a string, including True/False values\r\n else:\r\n simplified_vars[key] = str(value)\r\n\r\n # Map the values to new column names if the user provided a mapping\r\n simplified = {}\r\n if len(mapping) > 0:\r\n for name, value in simplified_vars.items():\r\n if name in mapping:\r\n simplified[mapping[name]] = value\r\n else:\r\n simplified[name] = value\r\n return simplified\r\n else:\r\n return simplified_vars" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize the indexes/indexers used for reindexing or alignment. Return dictionaries of xarray Index objects and coordinate variables such that we can group matching indexes based on the dictionary keys.
def _normalize_indexes( self, indexes: Mapping[Any, Any], ) -> tuple[NormalizedIndexes, NormalizedIndexVars]: if isinstance(indexes, Indexes): xr_variables = dict(indexes.variables) else: xr_variables = {} xr_indexes: dict[Hashable, Index] = {} for k, idx in indexes.items(): if not isinstance(idx, Index): if getattr(idx, "dims", (k,)) != (k,): raise ValueError( f"Indexer has dimensions {idx.dims} that are different " f"from that to be indexed along '{k}'" ) data = as_compatible_data(idx) pd_idx = safe_cast_to_index(data) pd_idx.name = k if isinstance(pd_idx, pd.MultiIndex): idx = PandasMultiIndex(pd_idx, k) else: idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype) xr_variables.update(idx.create_variables()) xr_indexes[k] = idx normalized_indexes = {} normalized_index_vars = {} for idx, index_vars in Indexes(xr_indexes, xr_variables).group_by_index(): coord_names_and_dims = [] all_dims: set[Hashable] = set() for name, var in index_vars.items(): dims = var.dims coord_names_and_dims.append((name, dims)) all_dims.update(dims) exclude_dims = all_dims & self.exclude_dims if exclude_dims == all_dims: continue elif exclude_dims: excl_dims_str = ", ".join(str(d) for d in exclude_dims) incl_dims_str = ", ".join(str(d) for d in all_dims - exclude_dims) raise ValueError( f"cannot exclude dimension(s) {excl_dims_str} from alignment because " "these are used by an index together with non-excluded dimensions " f"{incl_dims_str}" ) key = (tuple(coord_names_and_dims), type(idx)) normalized_indexes[key] = idx normalized_index_vars[key] = index_vars return normalized_indexes, normalized_index_vars
[ "def align_indexes(self) -> None:\n\n aligned_indexes = {}\n aligned_index_vars = {}\n reindex = {}\n new_indexes = {}\n new_index_vars = {}\n\n for key, matching_indexes in self.all_indexes.items():\n matching_index_vars = self.all_index_vars[key]\n dims = {d for coord in matching_index_vars[0].values() for d in coord.dims}\n index_cls = key[1]\n\n if self.join == \"override\":\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n need_reindex = False\n elif key in self.indexes:\n joined_index = self.indexes[key]\n joined_index_vars = self.index_vars[key]\n cmp_indexes = list(\n zip(\n [joined_index] + matching_indexes,\n [joined_index_vars] + matching_index_vars,\n )\n )\n need_reindex = self._need_reindex(dims, cmp_indexes)\n else:\n if len(matching_indexes) > 1:\n need_reindex = self._need_reindex(\n dims,\n list(zip(matching_indexes, matching_index_vars)),\n )\n else:\n need_reindex = False\n if need_reindex:\n if self.join == \"exact\":\n raise ValueError(\n \"cannot align objects with join='exact' where \"\n \"index/labels/sizes are not equal along \"\n \"these coordinates (dimensions): \"\n + \", \".join(f\"{name!r} {dims!r}\" for name, dims in key[0])\n )\n joiner = self._get_index_joiner(index_cls)\n joined_index = joiner(matching_indexes)\n if self.join == \"left\":\n joined_index_vars = matching_index_vars[0]\n elif self.join == \"right\":\n joined_index_vars = matching_index_vars[-1]\n else:\n joined_index_vars = joined_index.create_variables()\n else:\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n\n reindex[key] = need_reindex\n aligned_indexes[key] = joined_index\n aligned_index_vars[key] = joined_index_vars\n\n for name, var in joined_index_vars.items():\n new_indexes[name] = joined_index\n new_index_vars[name] = var\n\n # Explicitly provided indexes that are not found in objects to align\n # may relate to unindexed dimensions so we add them too\n for key, idx in self.indexes.items():\n if key not in aligned_indexes:\n index_vars = self.index_vars[key]\n reindex[key] = False\n aligned_indexes[key] = idx\n aligned_index_vars[key] = index_vars\n for name, var in index_vars.items():\n new_indexes[name] = idx\n new_index_vars[name] = var\n\n self.aligned_indexes = aligned_indexes\n self.aligned_index_vars = aligned_index_vars\n self.reindex = reindex\n self.new_indexes = Indexes(new_indexes, new_index_vars)", "def _orig_index(self):\n\n orig_index = [np.array(i, dtype=self.dataset.index.dtype) for i in\n groupby_unsorted(zip(self._groupindex,\n self.dataset.index.to_numpy()),\n key=itemgetter(0),\n get=itemgetter(1)).values()]\n return orig_index", "def load_indexers():\n global WORDS_SET, W2I, TAGS_SET, T2I, CHARS_SET, C2I, PREFIX_SIZE, SUFFIX_SIZE, P2I, S2I\n global I2W, I2T, I2C, P2I, S2I\n W2I = {word : i for i, word in enumerate(WORDS_SET)}\n I2W = {i : word for word, i in W2I.iteritems()}\n T2I = {tag : i for i, tag in enumerate(TAGS_SET)}\n I2T = {i : word for word, i in T2I.iteritems()}\n C2I = {tag : i for i, tag in enumerate(CHARS_SET)}\n I2C = {i : word for word, i in C2I.iteritems()}\n\n # initialize prefixes and suffixes\n prefixes = {word[:PREFIX_SIZE] for word in WORDS_SET}\n suffixes = {word[-SUFFIX_SIZE:] for word in WORDS_SET}\n P2I = {word[:PREFIX_SIZE]:i for i, word in enumerate(prefixes)}\n S2I = {word[-SUFFIX_SIZE:]:i for i, word in enumerate(suffixes)}", "def _compact_indexes(self):\n for index in self.indexes:\n self.compact_index(index)", "def calculate_indices(\n ds,\n index=None,\n collection=None,\n satellite_mission=None,\n custom_varname=None,\n normalise=True,\n drop=False,\n deep_copy=True,\n):\n\n # Set ds equal to a copy of itself in order to prevent the function\n # from editing the input dataset. This is to prevent unexpected\n # behaviour though it uses twice as much memory.\n if deep_copy:\n ds = ds.copy(deep=True)\n\n # Capture input band names in order to drop these if drop=True\n if drop:\n bands_to_drop = list(ds.data_vars)\n print(f\"Dropping bands {bands_to_drop}\")\n\n # Dictionary containing remote sensing index band recipes\n index_dict = {\n # Normalised Difference Vegation Index, Rouse 1973\n \"NDVI\": lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red),\n # Enhanced Vegetation Index, Huete 2002\n \"EVI\": lambda ds: (\n (2.5 * (ds.nir - ds.red)) / (ds.nir + 6 * ds.red - 7.5 * ds.blue + 1)\n ),\n # Leaf Area Index, Boegh 2002\n \"LAI\": lambda ds: (\n 3.618\n * ((2.5 * (ds.nir - ds.red)) / (ds.nir + 6 * ds.red - 7.5 * ds.blue + 1))\n - 0.118\n ),\n # Soil Adjusted Vegetation Index, Huete 1988\n \"SAVI\": lambda ds: ((1.5 * (ds.nir - ds.red)) / (ds.nir + ds.red + 0.5)),\n # Mod. Soil Adjusted Vegetation Index, Qi et al. 1994\n \"MSAVI\": lambda ds: (\n (2 * ds.nir + 1 - ((2 * ds.nir + 1) ** 2 - 8 * (ds.nir - ds.red)) ** 0.5)\n / 2\n ),\n # Normalised Difference Moisture Index, Gao 1996\n \"NDMI\": lambda ds: (ds.nir - ds.swir_1) / (ds.nir + ds.swir_1),\n # Normalised Burn Ratio, Lopez Garcia 1991\n \"NBR\": lambda ds: (ds.nir - ds.swir_2) / (ds.nir + ds.swir_2),\n # Burn Area Index, Martin 1998\n \"BAI\": lambda ds: (1.0 / ((0.10 - ds.red) ** 2 + (0.06 - ds.nir) ** 2)),\n # Normalised Difference Chlorophyll Index,\n # (Mishra & Mishra, 2012)\n \"NDCI\": lambda ds: (ds.red_edge_1 - ds.red) / (ds.red_edge_1 + ds.red),\n # Normalised Difference Snow Index, Hall 1995\n \"NDSI\": lambda ds: (ds.green - ds.swir_1) / (ds.green + ds.swir_1),\n # Normalised Difference Water Index, McFeeters 1996\n \"NDWI\": lambda ds: (ds.green - ds.nir) / (ds.green + ds.nir),\n # Modified Normalised Difference Water Index, Xu 2006\n \"MNDWI\": lambda ds: (ds.green - ds.swir_1) / (ds.green + ds.swir_1),\n # Normalised Difference Built-Up Index, Zha 2003\n \"NDBI\": lambda ds: (ds.swir_1 - ds.nir) / (ds.swir_1 + ds.nir),\n # Built-Up Index, He et al. 2010\n \"BUI\": lambda ds: ((ds.swir_1 - ds.nir) / (ds.swir_1 + ds.nir))\n - ((ds.nir - ds.red) / (ds.nir + ds.red)),\n # Built-up Area Extraction Index, Bouzekri et al. 2015\n \"BAEI\": lambda ds: (ds.red + 0.3) / (ds.green + ds.swir_1),\n # New Built-up Index, Jieli et al. 2010\n \"NBI\": lambda ds: (ds.swir_1 + ds.red) / ds.nir,\n # Bare Soil Index, Rikimaru et al. 2002\n \"BSI\": lambda ds: ((ds.swir_1 + ds.red) - (ds.nir + ds.blue))\n / ((ds.swir_1 + ds.red) + (ds.nir + ds.blue)),\n # Automated Water Extraction Index (no shadows), Feyisa 2014\n \"AWEI_ns\": lambda ds: (\n 4 * (ds.green - ds.swir_1) - (0.25 * ds.nir * +2.75 * ds.swir_2)\n ),\n # Automated Water Extraction Index (shadows), Feyisa 2014\n \"AWEI_sh\": lambda ds: (\n ds.blue + 2.5 * ds.green - 1.5 * (ds.nir + ds.swir_1) - 0.25 * ds.swir_2\n ),\n # Water Index, Fisher 2016\n \"WI\": lambda ds: (\n 1.7204\n + 171 * ds.green\n + 3 * ds.red\n - 70 * ds.nir\n - 45 * ds.swir_1\n - 71 * ds.swir_2\n ),\n # Tasseled Cap Wetness, Crist 1985\n \"TCW\": lambda ds: (\n 0.0315 * ds.blue\n + 0.2021 * ds.green\n + 0.3102 * ds.red\n + 0.1594 * ds.nir\n + -0.6806 * ds.swir_1\n + -0.6109 * ds.swir_2\n ),\n # Tasseled Cap Greeness, Crist 1985\n \"TCG\": lambda ds: (\n -0.1603 * ds.blue\n + -0.2819 * ds.green\n + -0.4934 * ds.red\n + 0.7940 * ds.nir\n + -0.0002 * ds.swir_1\n + -0.1446 * ds.swir_2\n ),\n # Tasseled Cap Brightness, Crist 1985\n \"TCB\": lambda ds: (\n 0.2043 * ds.blue\n + 0.4158 * ds.green\n + 0.5524 * ds.red\n + 0.5741 * ds.nir\n + 0.3124 * ds.swir_1\n + -0.2303 * ds.swir_2\n ),\n # Clay Minerals Ratio, Drury 1987\n \"CMR\": lambda ds: (ds.swir_1 / ds.swir_2),\n # Ferrous Minerals Ratio, Segal 1982\n \"FMR\": lambda ds: (ds.swir_1 / ds.nir),\n # Iron Oxide Ratio, Segal 1982\n \"IOR\": lambda ds: (ds.red / ds.blue),\n # Normalized Difference Turbidity Index, Lacaux, J.P. et al. 2007\n \"NDTI\": lambda ds: (ds.red - ds.green) / (ds.red + ds.green),\n # Modified Bare Soil Index, Nguyen et al. 2021\n \"MBI\": lambda ds: ((ds.swir_1 - ds.swir_2 - ds.nir) / (ds.swir_1 + ds.swir_2 + ds.nir)) + 0.5,\n }\n \n # Enhanced Normalised Difference Impervious Surfaces Index, Chen et al. 2019\n def mndwi(ds):\n return (ds.green - ds.swir_1) / (ds.green + ds.swir_1)\n def swir_diff(ds):\n return ds.swir_1/ds.swir_2\n def alpha(ds):\n return (2*(np.mean(ds.blue)))/(np.mean(swir_diff(ds)) + np.mean(mndwi(ds)**2))\n def ENDISI(ds):\n m = mndwi(ds)\n s = swir_diff(ds)\n a = alpha(ds)\n return (ds.blue - (a)*(s + m**2))/(ds.blue + (a)*(s + m**2))\n \n index_dict[\"ENDISI\"] = ENDISI\n \n ## Artificial Surface Index, Yongquan Zhao & Zhe Zhu 2022\n def af(ds):\n AF = (ds.nir - ds.blue) / (ds.nir + ds.blue)\n AF_norm = (AF - AF.min(dim=[\"y\",\"x\"]))/(AF.max(dim=[\"y\",\"x\"]) - AF.min(dim=[\"y\",\"x\"]))\n return AF_norm\n def ndvi(ds):\n return (ds.nir - ds.red) / (ds.nir + ds.red)\n def msavi(ds):\n return ((2 * ds.nir + 1 - ((2 * ds.nir + 1) ** 2 - 8 * (ds.nir - ds.red)) ** 0.5) / 2 )\n def vsf(ds):\n NDVI = ndvi(ds)\n MSAVI = msavi(ds)\n VSF = 1 - NDVI * MSAVI \n VSF_norm = (VSF - VSF.min(dim=[\"y\",\"x\"]))/(VSF.max(dim=[\"y\",\"x\"]) - VSF.min(dim=[\"y\",\"x\"]))\n return VSF_norm\n def mbi(ds):\n return ((ds.swir_1 - ds.swir_2 - ds.nir) / (ds.swir_1 + ds.swir_2 + ds.nir)) + 0.5\n def embi(ds):\n MBI = mbi(ds)\n MNDWI = mndwi(ds)\n return (MBI - MNDWI - 0.5) / (MBI + MNDWI + 1.5)\n def ssf(ds):\n EMBI = embi(ds)\n SSF = 1 - EMBI\n SSF_norm = (SSF - SSF.min(dim=[\"y\",\"x\"]))/(SSF.max(dim=[\"y\",\"x\"]) - SSF.min(dim=[\"y\",\"x\"]))\n return SSF_norm\n # Overall modulation using the Modulation Factor (MF).\n def mf(ds):\n MF = ((ds.blue + ds.green) - (ds.nir + ds.swir_1)) / ((ds.blue + ds.green) + (ds.nir + ds.swir_1))\n MF_norm = (MF - MF.min(dim=[\"y\",\"x\"]))/(MF.max(dim=[\"y\",\"x\"]) - MF.min(dim=[\"y\",\"x\"]))\n return MF_norm\n def ASI(ds):\n AF = af(ds)\n VSF = vsf(ds)\n SSF = ssf(ds)\n MF = mf(ds)\n return AF * VSF * SSF * MF\n \n index_dict[\"ASI\"] = ASI\n \n # If index supplied is not a list, convert to list. This allows us to\n # iterate through either multiple or single indices in the loop below\n indices = index if isinstance(index, list) else [index]\n\n # calculate for each index in the list of indices supplied (indexes)\n for index in indices:\n\n # Select an index function from the dictionary\n index_func = index_dict.get(str(index))\n\n # If no index is provided or if no function is returned due to an\n # invalid option being provided, raise an exception informing user to\n # choose from the list of valid options\n if index is None:\n\n raise ValueError(\n f\"No remote sensing `index` was provided. Please \"\n \"refer to the function \\ndocumentation for a full \"\n \"list of valid options for `index` (e.g. 'NDVI')\"\n )\n\n elif (\n index\n in [\n \"WI\",\n \"BAEI\",\n \"AWEI_ns\",\n \"AWEI_sh\",\n \"EVI\",\n \"LAI\",\n \"SAVI\",\n \"MSAVI\",\n ]\n and not normalise\n ):\n\n warnings.warn(\n f\"\\nA coefficient-based index ('{index}') normally \"\n \"applied to surface reflectance values in the \\n\"\n \"0.0-1.0 range was applied to values in the 0-10000 \"\n \"range. This can produce unexpected results; \\nif \"\n \"required, resolve this by setting `normalise=True`\"\n )\n\n elif index_func is None:\n\n raise ValueError(\n f\"The selected index '{index}' is not one of the \"\n \"valid remote sensing index options. \\nPlease \"\n \"refer to the function documentation for a full \"\n \"list of valid options for `index`\"\n )\n \n # Deprecation warning if `collection` is specified instead of `satellite_mission`.\n if collection is not None:\n warnings.warn('`collection` was deprecated in version 0.1.7. Use `satelite_mission` instead.', \n DeprecationWarning, \n stacklevel=2)\n # Map the collection values to the valid satellite_mission values.\n if collection == \"c2\":\n satellite_mission = \"ls\"\n elif collection == \"s2\":\n satellite_mission = \"s2\"\n # Raise error if no valid collection name is provided:\n else:\n raise ValueError(\n f\"'{collection}' is not a valid option for \"\n \"`collection`. Please specify either \\n\"\n \"'c2' or 's2'.\")\n\n \n # Rename bands to a consistent format if depending on what satellite mission\n # is specified in `satellite_mission`. This allows the same index calculations\n # to be applied to all satellite missions. If no satellite mission was provided,\n # raise an exception.\n if satellite_mission is None:\n\n raise ValueError(\n \"No `satellite_mission` was provided. Please specify \"\n \"either 'ls' or 's2' to ensure the \\nfunction \"\n \"calculates indices using the correct spectral \"\n \"bands.\"\n )\n \n elif satellite_mission == \"ls\":\n sr_max = 1.0\n # Dictionary mapping full data names to simpler alias names\n # This only applies to properly-scaled \"ls\" data i.e. from\n # the Landsat geomedians. calculate_indices will not show \n # correct output for raw (unscaled) Landsat data (i.e. default\n # outputs from dc.load)\n bandnames_dict = {\n \"SR_B1\": \"blue\",\n \"SR_B2\": \"green\",\n \"SR_B3\": \"red\",\n \"SR_B4\": \"nir\",\n \"SR_B5\": \"swir_1\",\n \"SR_B7\": \"swir_2\",\n }\n \n # Rename bands in dataset to use simple names (e.g. 'red')\n bands_to_rename = {\n a: b for a, b in bandnames_dict.items() if a in ds.variables\n }\n\n elif satellite_mission == \"s2\":\n sr_max = 10000\n # Dictionary mapping full data names to simpler alias names\n bandnames_dict = {\n \"nir_1\": \"nir\",\n \"B02\": \"blue\",\n \"B03\": \"green\",\n \"B04\": \"red\",\n \"B05\": \"red_edge_1\",\n \"B06\": \"red_edge_2\",\n \"B07\": \"red_edge_3\",\n \"B08\": \"nir\",\n \"B11\": \"swir_1\",\n \"B12\": \"swir_2\",\n }\n\n # Rename bands in dataset to use simple names (e.g. 'red')\n bands_to_rename = {\n a: b for a, b in bandnames_dict.items() if a in ds.variables\n }\n\n # Raise error if no valid satellite_mission name is provided:\n else:\n raise ValueError(\n f\"'{satellite_mission}' is not a valid option for \"\n \"`satellite_mission`. Please specify either \\n\"\n \"'ls' or 's2'\"\n )\n\n # Apply index function\n try:\n # If normalised=True, divide data by 10,000 before applying func\n mult = sr_max if normalise else 1.0\n index_array = index_func(ds.rename(bands_to_rename) / mult)\n\n except AttributeError:\n raise ValueError(\n f\"Please verify that all bands required to \"\n f\"compute {index} are present in `ds`.\"\n )\n\n # Add as a new variable in dataset\n output_band_name = custom_varname if custom_varname else index\n ds[output_band_name] = index_array\n\n # Once all indexes are calculated, drop input bands if drop=True\n if drop:\n ds = ds.drop(bands_to_drop)\n\n # Return input dataset with added water index variable\n return ds", "def _build_index_specs(cls, meta_indexes):\n geo_indices = cls._geo_indices()\n unique_indices = cls._unique_with_indexes()\n index_specs = [cls._build_index_spec(spec) for spec in meta_indexes]\n\n def merge_index_specs(index_specs, indices):\n \"\"\"Helper method for merging index specs.\"\"\"\n if not indices:\n return index_specs\n\n # Create a map of index fields to index spec. We're converting\n # the fields from a list to a tuple so that it's hashable.\n spec_fields = {tuple(index[\"fields\"]): index for index in index_specs}\n\n # For each new index, if there's an existing index with the same\n # fields list, update the existing spec with all data from the\n # new spec.\n for new_index in indices:\n candidate = spec_fields.get(tuple(new_index[\"fields\"]))\n if candidate is None:\n index_specs.append(new_index)\n else:\n candidate.update(new_index)\n\n return index_specs\n\n # Merge geo indexes and unique_with indexes into the meta index specs.\n index_specs = merge_index_specs(index_specs, geo_indices)\n index_specs = merge_index_specs(index_specs, unique_indices)\n return index_specs", "def re_index_types(self):\n\n for (index, atom_type) in enumerate(self.atom_types, 1):\n atom_type.index = index\n\n for (index, bond_type) in enumerate(self.bond_types, 1):\n bond_type.index = index\n\n for (index, angle_type) in enumerate(self.angle_types, 1):\n angle_type.index = index\n\n index = 1\n for dihedral_type in self.dihedral_types:\n if isinstance(dihedral_type.index, list):\n for i in range(len(dihedral_type.index)):\n dihedral_type.index[i] = index\n index += 1\n else:\n dihedral_type.index = index\n index += 1\n\n for (index, improper_type) in enumerate(self.improper_types, 1):\n improper_type.index = index", "def index(self):\n # get all unique values of each group in _orig_index\n return np.concatenate([list(dict.fromkeys(i))\n for i in self._orig_index])", "def setupIndexes( self, reindex=None, REQUEST=None ):\n reindexed = []\n\n # Setup new indexes\n for item in self.enumerateIndexes():\n index, typ = item[0:2]\n extra = len(item) == 3 and item[2]\n if index not in self.indexes():\n self.addIndex( index, typ, extra )\n reindexed.append( index )\n\n if reindex and reindexed:\n for index in reindexed:\n try:\n self.reindexIndex( index, REQUEST=REQUEST )\n except:\n raise\n LOG('AttributesIndex.setupIndexes', INFO, \"= Index: %s reindexed\" % index)", "def get_indexes(docs):\n indexes = {}\n attr = ['Brand', 'OperatingSystem', 'Manufacturer', 'ProductGroup']\n\n for asin in docs:\n doc = docs[asin]\n for a in attr:\n if doc.has_key(a):\n val = doc[a]\n if indexes.has_key(a) is False:\n indexes[a] = {}\n if indexes[a].has_key(val) is False:\n indexes[a][val] = {}\n indexes[a][val][asin] = doc\n return indexes", "def _re_number(self):\n new_dataset_indices = []\n for g, graph in enumerate(self.graphs):\n graph._force_index(g)\n for s, graph_set in enumerate(graph.sets):\n graph_set._force_index(s)\n new_dataset_indices.append((g,s))\n for i, dataset in enumerate(self.datasets):\n dataset._force_index(*new_dataset_indices[i])", "def reindexObject(idxs=[]):", "def inverse_index(records):\n r_index = dict()\n for i, ig in enumerate(records):\n for vcall in ig.setV or ():\n r_index.setdefault(vcall, []).append(i)\n for jcall in ig.setJ or ():\n r_index.setdefault(jcall, []).append(i)\n return r_index", "def merge_indexes(index_files):\n index = {}\n for f in index_files:\n print f\n part_index = pickle.load(file(f))\n index.update(part_index)\n\n return index", "def assert_no_index_conflict(self) -> None:\n matching_keys = set(self.all_indexes) | set(self.indexes)\n\n coord_count: dict[Hashable, int] = defaultdict(int)\n dim_count: dict[Hashable, int] = defaultdict(int)\n for coord_names_dims, _ in matching_keys:\n dims_set: set[Hashable] = set()\n for name, dims in coord_names_dims:\n coord_count[name] += 1\n dims_set.update(dims)\n for dim in dims_set:\n dim_count[dim] += 1\n\n for count, msg in [(coord_count, \"coordinates\"), (dim_count, \"dimensions\")]:\n dup = {k: v for k, v in count.items() if v > 1}\n if dup:\n items_msg = \", \".join(\n f\"{k!r} ({v} conflicting indexes)\" for k, v in dup.items()\n )\n raise ValueError(\n \"cannot re-index or align objects with conflicting indexes found for \"\n f\"the following {msg}: {items_msg}\\n\"\n \"Conflicting indexes may occur when\\n\"\n \"- they relate to different sets of coordinate and/or dimension names\\n\"\n \"- they don't have the same type\\n\"\n \"- they may be used to reindex data along common dimensions\"\n )", "def transform_to_indices(self, data):\n\t\ttransformed_data = {\"ids\": [],\n\t\t\t\t\t\t\t\"premises\": [],\n\t\t\t\t\t\t\t\"hypotheses\": [],\n\t\t\t\t\t\t\t\"labels\": []}\n\n\t\tfor i in range(self.n):\n\t\t\ttransformed_data[\"ids\"].append(data[\"ids\"][i])\n\n\t\t\ttransformed_data[\"labels\"].append(self.labeldict[data[\"labels\"][i]])\n\n\t\t\tpremises_indices = self.words_to_indices(data[\"premises\"][i])\n\t\t\ttransformed_data[\"premises\"].append(premises_indices)\n\n\t\t\thypotheses_indices = self.words_to_indices(data[\"hypotheses\"][i])\n\t\t\ttransformed_data[\"hypotheses\"].append(hypotheses_indices)\n\n\t\treturn transformed_data", "def returnIteratorIndexesFromIndex(self, listOfIndexes):\n coordinates = {}\n for cnt, key in enumerate(self.gridContainer['dimensionNames']):\n coordinates[key] = listOfIndexes[cnt]\n\n return coordinates", "def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n categories = self.categories\n r, counts = libalgos.groupsort_indexer(\n ensure_platform_int(self.codes), categories.size\n )\n counts = ensure_int64(counts).cumsum()\n _result = (r[start:end] for start, end in zip(counts, counts[1:]))\n return dict(zip(categories, _result))", "def get_group_indexes(indexes: Tensor) -> List[Tensor]:\n\n res: dict = {}\n for i, _id in enumerate(indexes):\n _id = _id.item()\n if _id in res:\n res[_id] += [i]\n else:\n res[_id] = [i]\n\n return [tensor(x, dtype=torch.long) for x in res.values()]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for uniqueness of both coordinate and dimension names across all sets of matching indexes. We need to make sure that all indexes used for reindexing or alignment are fully compatible and do not conflict each other.
def assert_no_index_conflict(self) -> None: matching_keys = set(self.all_indexes) | set(self.indexes) coord_count: dict[Hashable, int] = defaultdict(int) dim_count: dict[Hashable, int] = defaultdict(int) for coord_names_dims, _ in matching_keys: dims_set: set[Hashable] = set() for name, dims in coord_names_dims: coord_count[name] += 1 dims_set.update(dims) for dim in dims_set: dim_count[dim] += 1 for count, msg in [(coord_count, "coordinates"), (dim_count, "dimensions")]: dup = {k: v for k, v in count.items() if v > 1} if dup: items_msg = ", ".join( f"{k!r} ({v} conflicting indexes)" for k, v in dup.items() ) raise ValueError( "cannot re-index or align objects with conflicting indexes found for " f"the following {msg}: {items_msg}\n" "Conflicting indexes may occur when\n" "- they relate to different sets of coordinate and/or dimension names\n" "- they don't have the same type\n" "- they may be used to reindex data along common dimensions" )
[ "def _need_reindex(self, dims, cmp_indexes) -> bool:\n if not indexes_all_equal(cmp_indexes):\n # always reindex when matching indexes are not equal\n return True\n\n unindexed_dims_sizes = {}\n for dim in dims:\n if dim in self.unindexed_dim_sizes:\n sizes = self.unindexed_dim_sizes[dim]\n if len(sizes) > 1:\n # reindex if different sizes are found for unindexed dims\n return True\n else:\n unindexed_dims_sizes[dim] = next(iter(sizes))\n\n if unindexed_dims_sizes:\n indexed_dims_sizes = {}\n for cmp in cmp_indexes:\n index_vars = cmp[1]\n for var in index_vars.values():\n indexed_dims_sizes.update(var.sizes)\n\n for dim, size in unindexed_dims_sizes.items():\n if indexed_dims_sizes.get(dim, -1) != size:\n # reindex if unindexed dimension size doesn't match\n return True\n\n return False", "def check_uniqueness(self):\n for dset_path, incomings in self.paths.items():\n incoming_filenames = [incoming['filename'] for incoming in incomings]\n duplicates = [incoming['is_duplicate'] for incoming in incomings]\n latests = [incoming['latest'] for incoming in incomings]\n roots = [incoming['dset_root'] for incoming in incomings]\n assert latests.count(latests[0]) == len(latests)\n latest_version = latests[0]\n assert roots.count(roots[0]) == len(roots)\n dset_root = roots[0]\n latest_filenames = list()\n for _, _, filenames in os.walk(os.path.join(dset_root, latest_version)):\n latest_filenames.extend(filenames)\n # An upgrade version is different if it contains at least one file with is_duplicate = False\n # And it has the same number of files than the \"latest\" version\n if all(duplicates) and set(latest_filenames) == set(incoming_filenames):\n raise DuplicatedDataset(dset_path, latest_version)", "def checkDupplicates(master: List[ndarray], names: List[str] = None) -> None:\n \n if (names is None) or (len(names) != len(master)):\n try:\n len(names) != len(master)\n print(\"Given names were not enough. Using position in the list as name instead.\")\n except TypeError:\n pass\n names = np.char.array(['catalog nb ']*len(master)) + np.char.array(np.array(range(len(master)), dtype='str'))\n \n for catalog, nameCat in zip(master, names):\n cnt = True\n for ra, dec, nb in zip(catalog['RA'], catalog['DEC'], range(catalog['RA'].shape[0])):\n \n where1 = np.where(catalog['RA']==ra)[0]\n where2 = np.where(catalog['DEC']==dec)[0]\n \n if (len(where1)>1) and (len(where2)>1):\n \n flag = True\n for w in where2:\n \n if flag and (w in where1):\n print(\"RA =\", ra, \"deg and DEC =\", dec, \"deg galaxy (line \" + str(nb) + \") is present more than once in catalog\", nameCat)\n flag = False\n cnt = False\n if cnt:\n print(\"All the galaxies are only listed once in the catalog\", nameCat) \n return", "def check_index_consistency(self):\n dfs = [self._y, self._X_extra, self._X_extra_base, self._X_extra_unenc,\n self._X_select, self._X_select_base, self._X_select_unenc]\n\n indexes = [df.index for df in dfs if df is not None]\n\n for i in range(len(indexes)-1):\n idx1 = indexes[i]\n idx2 = indexes[i+1]\n assert idx1.equals(idx2)", "def _validate_disjoint_sets(self):\n for old_indices in self._old_system_exceptions.keys():\n hybrid_indices = (self._old_to_hybrid_map[old_indices[0]],\n self._old_to_hybrid_map[old_indices[1]])\n old_env_intersection = set(old_indices).intersection(\n self._atom_classes['environment_atoms'])\n if old_env_intersection:\n if set(old_indices).intersection(\n self._atom_classes['unique_old_atoms']\n ):\n errmsg = (f\"old index exceptions {old_indices} include \"\n \"unique old and environment atoms, which is \"\n \"disallowed\")\n raise AssertionError(errmsg)\n\n for new_indices in self._new_system_exceptions.keys():\n hybrid_indices = (self._new_to_hybrid_map[new_indices[0]],\n self._new_to_hybrid_map[new_indices[1]])\n new_env_intersection = set(hybrid_indices).intersection(\n self._atom_classes['environment_atoms'])\n if new_env_intersection:\n if set(hybrid_indices).intersection(\n self._atom_classes['unique_new_atoms']\n ):\n errmsg = (f\"new index exceptions {new_indices} include \"\n \"unique new and environment atoms, which is \"\n \"dissallowed\")\n raise AssertionError", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return \\\n len({node.get_name() for node in self.get_left_nodeset().union(self.get_right_nodeset())}) \\\n != len(self.get_left_nodeset()) + len(self.get_right_nodeset())", "def __validate_dim(self, ind, name):\n if not isinstance(ind, int):\n raise TypeError('Dimension must be an integer')\n if (0 > ind) or (ind >= self.ndim):\n raise IndexError('Dimension must be an integer between 0 and {}'\n ''.format(self.ndim-1))\n for key, dim in self._axes.items():\n if key != ind:\n if name == dim.name:\n raise ValueError('name: {} already used, but must be unique'.format(name))", "def check_uniq():\n global GLOBAL_MAPPINGS\n global SHARED_MEM\n\n # Iterate over all the global mappings\n for mapping in GLOBAL_MAPPINGS:\n if SHARED_MEM == mapping:\n return False\n return True", "def _check_duplicated_columns(prep_cols, sample_cols):\n prep_cols.extend(sample_cols)\n dups = set(duplicates(prep_cols))\n if dups:\n raise qdb.exceptions.QiitaDBColumnError(\n 'Duplicated column names in the sample and prep info '\n 'files: %s. You need to delete that duplicated field' %\n ','.join(dups))", "def ensure_indicies(self):\n # Search indicies for materials\n self.materials.ensure_index(self.materials.key, unique=True)\n self.materials.ensure_index(self.materials.lu_field)\n self.materials.ensure_index(\"chemsys\")\n\n # Search indicies for thermo\n self.thermo.ensure_index(self.thermo.key, unique=True)\n self.thermo.ensure_index(self.thermo.lu_field)\n self.thermo.ensure_index(\"chemsys\")", "def unique_names_check(name_list: Optional[List[str]]):\n if name_list is None:\n return\n\n # Name uniqueness checks\n names = set()\n for name in name_list:\n if name in names:\n logging.warning(\n \"Name resolution has found more than one data loader having the same name !\\n\"\n \"In such cases, logs will nor be properly generated. \"\n \"Please rename the item to have unique names.\\n\"\n f\"Resolved name : {name}\"\n )\n else:\n names.add(name) # we need just hash key check, value is just a placeholder", "def check_unique(filtered_mutants, single_mutants):\r\n # for each normal-filtered mutant test pool,\r\n # remove any samples that were uniquely identified.\r\n # if multiple samples still remain, they cannot be uniquely identified.\r\n # could report the ambiguous sample_ids and call the successful samples,\r\n # but the instructions say that ALL samples must be mapped uniquely.\r\n for test_set in filtered_mutants:\r\n test_set = test_set - single_mutants\r\n if len(test_set) > 1:\r\n return False\r\n # return true if check passes\r\n return True", "def check_that_df_index_is_unique(df):\n\n if len(set(df.index))!=len(df): raise ValueError(\"df index should be unique\")", "def _normalize_indexes(\n self,\n indexes: Mapping[Any, Any],\n ) -> tuple[NormalizedIndexes, NormalizedIndexVars]:\n if isinstance(indexes, Indexes):\n xr_variables = dict(indexes.variables)\n else:\n xr_variables = {}\n\n xr_indexes: dict[Hashable, Index] = {}\n for k, idx in indexes.items():\n if not isinstance(idx, Index):\n if getattr(idx, \"dims\", (k,)) != (k,):\n raise ValueError(\n f\"Indexer has dimensions {idx.dims} that are different \"\n f\"from that to be indexed along '{k}'\"\n )\n data = as_compatible_data(idx)\n pd_idx = safe_cast_to_index(data)\n pd_idx.name = k\n if isinstance(pd_idx, pd.MultiIndex):\n idx = PandasMultiIndex(pd_idx, k)\n else:\n idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype)\n xr_variables.update(idx.create_variables())\n xr_indexes[k] = idx\n\n normalized_indexes = {}\n normalized_index_vars = {}\n for idx, index_vars in Indexes(xr_indexes, xr_variables).group_by_index():\n coord_names_and_dims = []\n all_dims: set[Hashable] = set()\n\n for name, var in index_vars.items():\n dims = var.dims\n coord_names_and_dims.append((name, dims))\n all_dims.update(dims)\n\n exclude_dims = all_dims & self.exclude_dims\n if exclude_dims == all_dims:\n continue\n elif exclude_dims:\n excl_dims_str = \", \".join(str(d) for d in exclude_dims)\n incl_dims_str = \", \".join(str(d) for d in all_dims - exclude_dims)\n raise ValueError(\n f\"cannot exclude dimension(s) {excl_dims_str} from alignment because \"\n \"these are used by an index together with non-excluded dimensions \"\n f\"{incl_dims_str}\"\n )\n\n key = (tuple(coord_names_and_dims), type(idx))\n normalized_indexes[key] = idx\n normalized_index_vars[key] = index_vars\n\n return normalized_indexes, normalized_index_vars", "def check_missing_data(self, ds):\n\n ret_val = []\n \n \n name_list = ds.dataset.variables.keys()\n dim_list = ds.dataset.dimensions.keys()\n\n\n for name, var in ds.dataset.variables.iteritems():\n if hasattr(var,'coordinates'):\n aux_index_dict = {}\n dim_index_dict = {}\n reasoning = []\n valid = False\n aux_valid = False\n \n if hasattr(var, '_FillValue'):\n for coordinate in getattr(var, 'coordinates', '').split(\" \"):\n indices = []\n if coordinate in name_list and coordinate not in dim_list:\n try:\n indices = np.where(ds.dataset.variables[coordinate] == var._FillValue).tolist()\n except:\n indices = np.where(ds.dataset.variables[coordinate] == var._FillValue)[0].tolist()\n \n dim_index_dict[name+'-'+coordinate] = indices\n aux_index_dict[name+'-'+coordinate] = indices\n \n elif coordinate in name_list and coordinate in dim_list:\n try:\n indices = np.where(ds.dataset.variables[coordinate] == var._FillValue).tolist()\n except:\n indices = np.where(ds.dataset.variables[coordinate] == var._FillValue)[0].tolist()\n dim_index_dict[name+'-'+coordinate] = indices\n else:\n dim_index_dict[name+'-'+coordinate] = []\n \n \n #Check to see that all coordinate variable mising data locations are the same\n aux_index_list = []\n for each in aux_index_dict:\n aux_index_list.append(aux_index_dict[each])\n if aux_index_list != []: \n aux_valid = all(x == aux_index_list[0] for x in aux_index_list)\n else: \n aux_valid = True\n \n #Check to see that all auxilliary coordinate variable missing data appears in the coordinate variables\n dim_index_list = []\n for each in dim_index_dict:\n dim_index_list.append(dim_index_dict[each]) \n if dim_index_list != []:\n valid = all(x == dim_index_list[0] for x in dim_index_list)\n else:\n valid = True\n \n \n if aux_valid == False:\n reasoning.append('The auxillary coordinates do not have the same missing data locations')\n if valid == False:\n reasoning.append('The coordinate variables do not have the same missing data locations as the auxillary coordinates')\n \n #Check to see that all coordinate variable mising data is reflceted in the dataset\n valid_missing = True\n count = 0\n \n if hasattr(var, '_FillValue'):\n try:\n x_indices = np.where(var==var._FillValue).tolist()\n except:\n x_indices = np.where(var==var._FillValue)[0].tolist()\n \n for coordinate in var.coordinates.split(\" \"):\n coordinate_ind_list = dim_index_dict[name+'-'+coordinate]\n valid_missing = all(each in x_indices for each in coordinate_ind_list)\n \n if valid_missing == False:\n reasoning.append('The data does not have the same missing data locations as the coordinates')\n \n \n result = Result(BaseCheck.MEDIUM, \\\n valid and aux_valid and valid_missing, \\\n ('var', name, 'missing_data'), \\\n reasoning)\n ret_val.append(result)\n return ret_val", "def _validate_data_sources(data_sources: List[DataSource]):\n ds_names = set()\n for ds in data_sources:\n case_insensitive_ds_name = ds.name.lower()\n if case_insensitive_ds_name in ds_names:\n raise DataSourceRepeatNamesException(case_insensitive_ds_name)\n else:\n ds_names.add(case_insensitive_ds_name)", "def test_neoxargs_duplicates(self):\n self.assertTrue(NeoXArgs.validate_keys())", "def ensure_indicies(self):\n\n # Search index for materials\n self.materials.ensure_index(self.materials.key, unique=True)\n self.materials.ensure_index(\"task_ids\")\n\n # Search index for dielectric\n self.dielectric.ensure_index(self.dielectric.key, unique=True)\n self.dielectric.ensure_index(\"task_ids\")", "def check_unique(items, type):\n key_val_set = set()\n for id, item in items.items():\n id_key, keys = UNIQUE_KEY_SETS[type]\n t = tuple(item[key] for key in keys)\n\n # Check that it is not already added\n if t in key_val_set:\n raise ValueError(\"Key combination {}:{} is included multiple \"\n \"times in the {} sheet. \"\n \"ABORTING.\".format(keys, t, type))\n key_val_set.add(t)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not we need to reindex variables for a set of matching indexes.
def _need_reindex(self, dims, cmp_indexes) -> bool: if not indexes_all_equal(cmp_indexes): # always reindex when matching indexes are not equal return True unindexed_dims_sizes = {} for dim in dims: if dim in self.unindexed_dim_sizes: sizes = self.unindexed_dim_sizes[dim] if len(sizes) > 1: # reindex if different sizes are found for unindexed dims return True else: unindexed_dims_sizes[dim] = next(iter(sizes)) if unindexed_dims_sizes: indexed_dims_sizes = {} for cmp in cmp_indexes: index_vars = cmp[1] for var in index_vars.values(): indexed_dims_sizes.update(var.sizes) for dim, size in unindexed_dims_sizes.items(): if indexed_dims_sizes.get(dim, -1) != size: # reindex if unindexed dimension size doesn't match return True return False
[ "def check_index_consistency(self):\n dfs = [self._y, self._X_extra, self._X_extra_base, self._X_extra_unenc,\n self._X_select, self._X_select_base, self._X_select_unenc]\n\n indexes = [df.index for df in dfs if df is not None]\n\n for i in range(len(indexes)-1):\n idx1 = indexes[i]\n idx2 = indexes[i+1]\n assert idx1.equals(idx2)", "def precheck(self)->bool:\n flag = True\n if len(self.index2docs) <= 0:\n flag = False\n if len(self.qwords) == 0:\n flag = False\n if max([len(self.index2docs[idx]) for idx in INDEX_IDS]) == 0:\n flag = False\n return flag # len(self.index2docs) > 0", "def has_indexes(self):\r\n if self._has_indexes is None:\r\n self._has_indexes = False\r\n for _, vernaux_iter in self.iter_versions():\r\n for vernaux in vernaux_iter:\r\n if vernaux['vna_other']:\r\n self._has_indexes = True\r\n break\r\n\r\n return self._has_indexes", "def areIndexArraysMatched(self) -> \"SbBool\":\n return _coin.SoReorganizeAction_areIndexArraysMatched(self)", "def assert_no_index_conflict(self) -> None:\n matching_keys = set(self.all_indexes) | set(self.indexes)\n\n coord_count: dict[Hashable, int] = defaultdict(int)\n dim_count: dict[Hashable, int] = defaultdict(int)\n for coord_names_dims, _ in matching_keys:\n dims_set: set[Hashable] = set()\n for name, dims in coord_names_dims:\n coord_count[name] += 1\n dims_set.update(dims)\n for dim in dims_set:\n dim_count[dim] += 1\n\n for count, msg in [(coord_count, \"coordinates\"), (dim_count, \"dimensions\")]:\n dup = {k: v for k, v in count.items() if v > 1}\n if dup:\n items_msg = \", \".join(\n f\"{k!r} ({v} conflicting indexes)\" for k, v in dup.items()\n )\n raise ValueError(\n \"cannot re-index or align objects with conflicting indexes found for \"\n f\"the following {msg}: {items_msg}\\n\"\n \"Conflicting indexes may occur when\\n\"\n \"- they relate to different sets of coordinate and/or dimension names\\n\"\n \"- they don't have the same type\\n\"\n \"- they may be used to reindex data along common dimensions\"\n )", "def _has_indices(self, target_map):\n if self._enrichment_schema_version == \"0.1\":\n if \"top_level\" in target_map:\n return False\n if (\"indices\" in target_map and \"evaluate\" not in target_map) or \"indices_from\" in target_map:\n return True\n else:\n if \"indices\" in target_map or \"indices_from\" in target_map:\n return True\n\n if \"$index\" not in str(target_map['value']):\n return False\n\n source_table = self._get_first_table_reference(str(target_map['value']))\n if source_table:\n if source_table in self._oid_maps:\n return True\n\n return False", "def _indexes_valid(self):\n return self.input_index in range(self.num_inputs) and self.output_index in range(self.num_outputs)", "def testWalkReuseIndexes(self):\n results = [(idx, self.manager.snimpyReuseIndexValue[idx])\n for idx in self.manager.snimpyReuseIndexValue]\n self.assertEqual(results,\n [((\"end of row1\", 4), 1785),\n ((\"end of row1\", 5), 2458)])", "def allEqualThisIndex(dict_of_arrays, **fixed_vars):\n # base index is a boolean vector, everywhere true\n first_array = dict_of_arrays[list(dict_of_arrays.keys())[0]]\n index = np.ones_like(first_array, dtype=np.bool_)\n for var_name, var_val in fixed_vars.items():\n index = index & (np.asarray(dict_of_arrays[var_name]) == var_val)\n return index", "def reindex_variables(\n variables: Mapping[Any, Variable],\n dim_pos_indexers: Mapping[Any, Any],\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n sparse: bool = False,\n) -> dict[Hashable, Variable]:\n new_variables = {}\n dim_sizes = calculate_dimensions(variables)\n\n masked_dims = set()\n unchanged_dims = set()\n for dim, indxr in dim_pos_indexers.items():\n # Negative values in dim_pos_indexers mean values missing in the new index\n # See ``Index.reindex_like``.\n if (indxr < 0).any():\n masked_dims.add(dim)\n elif np.array_equal(indxr, np.arange(dim_sizes.get(dim, 0))):\n unchanged_dims.add(dim)\n\n for name, var in variables.items():\n if isinstance(fill_value, dict):\n fill_value_ = fill_value.get(name, dtypes.NA)\n else:\n fill_value_ = fill_value\n\n if sparse:\n var = var._as_sparse(fill_value=fill_value_)\n indxr = tuple(\n slice(None) if d in unchanged_dims else dim_pos_indexers.get(d, slice(None))\n for d in var.dims\n )\n needs_masking = any(d in masked_dims for d in var.dims)\n\n if needs_masking:\n new_var = var._getitem_with_mask(indxr, fill_value=fill_value_)\n elif all(is_full_slice(k) for k in indxr):\n # no reindexing necessary\n # here we need to manually deal with copying data, since\n # we neither created a new ndarray nor used fancy indexing\n new_var = var.copy(deep=copy)\n else:\n new_var = var[indxr]\n\n new_variables[name] = new_var\n\n return new_variables", "def generate_index_terms(self):\n\n return False", "def contains_vars(self, variables):\n for variable in variables:\n if variable not in self._map:\n return False\n\n return True", "def has_multiindex(self):\n if self.has_materialized_index:\n return isinstance(self.index, MultiIndex)\n return self._index_cols is not None and len(self._index_cols) > 1", "def indexes_intersect(self, indexes_set: Set[int]) -> bool:\n return bool(self.ambiguity_indexes.intersection(indexes_set))", "def testWalkTableWithReuseIndexes(self):\n results = [(idx, self.manager.snimpyReuseIndexValue[idx])\n for idx in self.manager.snimpyReuseIndexTable]\n self.assertEqual(results,\n [((\"end of row1\", 4), 1785),\n ((\"end of row1\", 5), 2458)])", "def needIndices(self) -> \"SbBool\":\n return _coin.SoTextureCoordinateBundle_needIndices(self)", "def reindex_data_objects(self):\n count = 0\n for defn in self.data_definition:\n # force the setup in the catalog\n defn.setup_catalog(self, True)\n for object in self.aq_explicit.objectValues():\n if getattr(object, 'isDataObject', False):\n object.reindex_object()\n count += 1\n \n return count", "def consistent_with(self, assignment, sub_variables):\n for sub_variable in sub_variables:\n if assignment.get_value(sub_variable) is None:\n return False\n\n if self._map.get(sub_variable, None) is None:\n return False\n\n if assignment.get_value(sub_variable) != self._map[sub_variable]:\n return False\n\n return True", "def is_indexed(self) -> bool:\n return self._is_indexed" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute all aligned indexes and their corresponding coordinate variables.
def align_indexes(self) -> None: aligned_indexes = {} aligned_index_vars = {} reindex = {} new_indexes = {} new_index_vars = {} for key, matching_indexes in self.all_indexes.items(): matching_index_vars = self.all_index_vars[key] dims = {d for coord in matching_index_vars[0].values() for d in coord.dims} index_cls = key[1] if self.join == "override": joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] need_reindex = False elif key in self.indexes: joined_index = self.indexes[key] joined_index_vars = self.index_vars[key] cmp_indexes = list( zip( [joined_index] + matching_indexes, [joined_index_vars] + matching_index_vars, ) ) need_reindex = self._need_reindex(dims, cmp_indexes) else: if len(matching_indexes) > 1: need_reindex = self._need_reindex( dims, list(zip(matching_indexes, matching_index_vars)), ) else: need_reindex = False if need_reindex: if self.join == "exact": raise ValueError( "cannot align objects with join='exact' where " "index/labels/sizes are not equal along " "these coordinates (dimensions): " + ", ".join(f"{name!r} {dims!r}" for name, dims in key[0]) ) joiner = self._get_index_joiner(index_cls) joined_index = joiner(matching_indexes) if self.join == "left": joined_index_vars = matching_index_vars[0] elif self.join == "right": joined_index_vars = matching_index_vars[-1] else: joined_index_vars = joined_index.create_variables() else: joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] reindex[key] = need_reindex aligned_indexes[key] = joined_index aligned_index_vars[key] = joined_index_vars for name, var in joined_index_vars.items(): new_indexes[name] = joined_index new_index_vars[name] = var # Explicitly provided indexes that are not found in objects to align # may relate to unindexed dimensions so we add them too for key, idx in self.indexes.items(): if key not in aligned_indexes: index_vars = self.index_vars[key] reindex[key] = False aligned_indexes[key] = idx aligned_index_vars[key] = index_vars for name, var in index_vars.items(): new_indexes[name] = idx new_index_vars[name] = var self.aligned_indexes = aligned_indexes self.aligned_index_vars = aligned_index_vars self.reindex = reindex self.new_indexes = Indexes(new_indexes, new_index_vars)
[ "def computeAllIndex(self):\n indexes = self.klucb_vect(self.rewards / self.pulls, self.c * np.log(self.t_for_each_arm) / self.pulls, self.tolerance)\n indexes[self.pulls < 1] = float('+inf')\n self.index[:] = indexes", "def calc_indices(self, x, y):\n i1, i2 = self.calc_fractional_indices(x, y)\n # Use np.round to ensure that returned type is numpy array or scalar.\n nx = np.round(i1).astype(int)\n ny = np.round(i2).astype(int)\n return nx, ny", "def _update_indexes(self):\n ntemp = 0\n ntarg = 0\n for pos in self.positions:\n if pos.temp!='-':\n ntemp+=1\n if pos.targ!='-':\n ntarg+=1\n pos.ntemp = ntemp\n pos.ntarg = ntarg", "def _normalize_indexes(\n self,\n indexes: Mapping[Any, Any],\n ) -> tuple[NormalizedIndexes, NormalizedIndexVars]:\n if isinstance(indexes, Indexes):\n xr_variables = dict(indexes.variables)\n else:\n xr_variables = {}\n\n xr_indexes: dict[Hashable, Index] = {}\n for k, idx in indexes.items():\n if not isinstance(idx, Index):\n if getattr(idx, \"dims\", (k,)) != (k,):\n raise ValueError(\n f\"Indexer has dimensions {idx.dims} that are different \"\n f\"from that to be indexed along '{k}'\"\n )\n data = as_compatible_data(idx)\n pd_idx = safe_cast_to_index(data)\n pd_idx.name = k\n if isinstance(pd_idx, pd.MultiIndex):\n idx = PandasMultiIndex(pd_idx, k)\n else:\n idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype)\n xr_variables.update(idx.create_variables())\n xr_indexes[k] = idx\n\n normalized_indexes = {}\n normalized_index_vars = {}\n for idx, index_vars in Indexes(xr_indexes, xr_variables).group_by_index():\n coord_names_and_dims = []\n all_dims: set[Hashable] = set()\n\n for name, var in index_vars.items():\n dims = var.dims\n coord_names_and_dims.append((name, dims))\n all_dims.update(dims)\n\n exclude_dims = all_dims & self.exclude_dims\n if exclude_dims == all_dims:\n continue\n elif exclude_dims:\n excl_dims_str = \", \".join(str(d) for d in exclude_dims)\n incl_dims_str = \", \".join(str(d) for d in all_dims - exclude_dims)\n raise ValueError(\n f\"cannot exclude dimension(s) {excl_dims_str} from alignment because \"\n \"these are used by an index together with non-excluded dimensions \"\n f\"{incl_dims_str}\"\n )\n\n key = (tuple(coord_names_and_dims), type(idx))\n normalized_indexes[key] = idx\n normalized_index_vars[key] = index_vars\n\n return normalized_indexes, normalized_index_vars", "def indices(self):\n nx, ny, nz = self.shape()\n return [(ix,iy,iz) for ix in range(nx) for iy in range(ny) for iz in range(nz)]", "def _idxs_to_calculate(self) -> Iterator:\n\n for row_idx in range(self._n_rows):\n\n if row_idx not in self._calculated_rows:\n self._calculated_rows.append(row_idx)\n\n atom_idx = row_idx // 3\n component = row_idx % 3 # 0: x, 1: y, 2: z\n\n yield atom_idx, component\n\n return", "def neighbor_indices(self):", "def indexes_and_lengths(self) -> Iterable[Tuple[int, int]]:\n raise NotImplementedError", "def elements22Dindexes(self, items_idx):\n M_elements = list(it.product(*[items_idx, items_idx]))\n i = [item[0] for item in M_elements]\n j = [item[1] for item in M_elements]\n Ndim = len(set(i))\n return (i,j, Ndim)", "def indices_per_axis(self):\n return self.__indices_per_axis", "def extract_indices(self):\r\n with open(self.path_to_idx) as idx_file:\r\n list_of_lines = idx_file.readlines()\r\n\r\n if len(list_of_lines) > 0:\r\n if \"Positions of Chroms:\" in list_of_lines[0]:\r\n list_of_lines = list_of_lines[1:]\r\n for list_item in list_of_lines:\r\n attributes = list_item.rstrip(';\\n').split(':')\r\n self.indices[attributes[0]] = attributes[1].replace(' ', '')", "def elements22Dindexes( items_idx ):\n M_elements = list(it.product(*[items_idx, items_idx])) #returns a list of all the combinations of the given arrays\n i = np.array([item[0] for item in M_elements])\n j = np.array([item[1] for item in M_elements])\n Ndim = len(set(i))\n return (i, j, Ndim)", "def getNormalIndices(*args, **kwargs):\n \n pass", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def _idx2coord(indices: Iterable) -> tuple:\n return tuple([int(x1) - x2 for x1, x2 in zip(indices, center_offset)])", "def computeCindices(self):\n\n self.surf_index_C = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_C = PUBSlib.computeedgeindices(self.nedge, self.ngroup, self.edge_group, self.group_m)\n self.nC = self.nvert\n self.nC += self.edge_index_C[-1,1]\n self.nC += self.surf_index_C[-1,1]\n\n if self.printInfo:\n print '# Control points =',self.nC", "def indices(self):\n return self.search_for_indices()", "def indexes(self) -> Iterable[int]:\n raise NotImplementedError", "def _get_pool_indices(self):\n indices = []\n for i in range(self.output_size[1]):\n for j in range(self.output_size[0]):\n current_block_indices = []\n for row_indx in range(self.kernel_size[0]):\n for col_indx in range(self.kernel_size[1]):\n current_block_indices.append(\n square_indx_to_flat(\n i * self.stride_size[0] + row_indx,\n j * self.stride_size[1] + col_indx,\n self.image_w_h,\n )\n )\n indices.append(current_block_indices)\n return indices" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with ``fill_value``. The default fill value is NaN.
def align( *objects: T_Alignable, join: JoinOptions = "inner", copy: bool = True, indexes=None, exclude=frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: aligner = Aligner( objects, join=join, copy=copy, indexes=indexes, exclude_dims=exclude, fill_value=fill_value, ) aligner.align() return aligner.results
[ "def deep_align(\n objects: Iterable[Any],\n join: JoinOptions = \"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n raise_on_invalid=True,\n fill_value=dtypes.NA,\n):\n from xarray.core.coordinates import Coordinates\n from xarray.core.dataarray import DataArray\n from xarray.core.dataset import Dataset\n\n if indexes is None:\n indexes = {}\n\n def is_alignable(obj):\n return isinstance(obj, (Coordinates, DataArray, Dataset))\n\n positions = []\n keys = []\n out = []\n targets = []\n no_key = object()\n not_replaced = object()\n for position, variables in enumerate(objects):\n if is_alignable(variables):\n positions.append(position)\n keys.append(no_key)\n targets.append(variables)\n out.append(not_replaced)\n elif is_dict_like(variables):\n current_out = {}\n for k, v in variables.items():\n if is_alignable(v) and k not in indexes:\n # Skip variables in indexes for alignment, because these\n # should to be overwritten instead:\n # https://github.com/pydata/xarray/issues/725\n # https://github.com/pydata/xarray/issues/3377\n # TODO(shoyer): doing this here feels super-hacky -- can we\n # move it explicitly into merge instead?\n positions.append(position)\n keys.append(k)\n targets.append(v)\n current_out[k] = not_replaced\n else:\n current_out[k] = v\n out.append(current_out)\n elif raise_on_invalid:\n raise ValueError(\n \"object to align is neither an xarray.Dataset, \"\n \"an xarray.DataArray nor a dictionary: {!r}\".format(variables)\n )\n else:\n out.append(variables)\n\n aligned = align(\n *targets,\n join=join,\n copy=copy,\n indexes=indexes,\n exclude=exclude,\n fill_value=fill_value,\n )\n\n for position, key, aligned_obj in zip(positions, keys, aligned):\n if key is no_key:\n out[position] = aligned_obj\n else:\n out[position][key] = aligned_obj # type: ignore[index] # maybe someone can fix this?\n\n return out", "def align_indexes(self) -> None:\n\n aligned_indexes = {}\n aligned_index_vars = {}\n reindex = {}\n new_indexes = {}\n new_index_vars = {}\n\n for key, matching_indexes in self.all_indexes.items():\n matching_index_vars = self.all_index_vars[key]\n dims = {d for coord in matching_index_vars[0].values() for d in coord.dims}\n index_cls = key[1]\n\n if self.join == \"override\":\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n need_reindex = False\n elif key in self.indexes:\n joined_index = self.indexes[key]\n joined_index_vars = self.index_vars[key]\n cmp_indexes = list(\n zip(\n [joined_index] + matching_indexes,\n [joined_index_vars] + matching_index_vars,\n )\n )\n need_reindex = self._need_reindex(dims, cmp_indexes)\n else:\n if len(matching_indexes) > 1:\n need_reindex = self._need_reindex(\n dims,\n list(zip(matching_indexes, matching_index_vars)),\n )\n else:\n need_reindex = False\n if need_reindex:\n if self.join == \"exact\":\n raise ValueError(\n \"cannot align objects with join='exact' where \"\n \"index/labels/sizes are not equal along \"\n \"these coordinates (dimensions): \"\n + \", \".join(f\"{name!r} {dims!r}\" for name, dims in key[0])\n )\n joiner = self._get_index_joiner(index_cls)\n joined_index = joiner(matching_indexes)\n if self.join == \"left\":\n joined_index_vars = matching_index_vars[0]\n elif self.join == \"right\":\n joined_index_vars = matching_index_vars[-1]\n else:\n joined_index_vars = joined_index.create_variables()\n else:\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n\n reindex[key] = need_reindex\n aligned_indexes[key] = joined_index\n aligned_index_vars[key] = joined_index_vars\n\n for name, var in joined_index_vars.items():\n new_indexes[name] = joined_index\n new_index_vars[name] = var\n\n # Explicitly provided indexes that are not found in objects to align\n # may relate to unindexed dimensions so we add them too\n for key, idx in self.indexes.items():\n if key not in aligned_indexes:\n index_vars = self.index_vars[key]\n reindex[key] = False\n aligned_indexes[key] = idx\n aligned_index_vars[key] = index_vars\n for name, var in index_vars.items():\n new_indexes[name] = idx\n new_index_vars[name] = var\n\n self.aligned_indexes = aligned_indexes\n self.aligned_index_vars = aligned_index_vars\n self.reindex = reindex\n self.new_indexes = Indexes(new_indexes, new_index_vars)", "def vstack(arrays, join_type='inner', col_name_map=None):\n # Store user-provided col_name_map until the end\n _col_name_map = col_name_map\n\n # Input validation\n if join_type not in ('inner', 'exact', 'outer'):\n raise ValueError(\"`join_type` arg must be one of 'inner', 'exact' or 'outer'\")\n\n _check_for_sequence_of_structured_arrays(arrays)\n\n # Trivial case of one input array\n if len(arrays) == 1:\n return arrays[0]\n\n # Start by assuming an outer match where all names go to output\n names = set(chain(*[arr.dtype.names for arr in arrays]))\n col_name_map = get_col_name_map(arrays, names)\n\n # If require_match is True then the output must have exactly the same\n # number of columns as each input array\n if join_type == 'exact':\n for names in six.itervalues(col_name_map):\n if any(x is None for x in names):\n raise TableMergeError('Inconsistent columns in input arrays '\n \"(use 'inner' or 'outer' join_type to \"\n \"allow non-matching columns)\")\n join_type = 'outer'\n\n # For an inner join, keep only columns where all input arrays have that column\n if join_type == 'inner':\n col_name_map = OrderedDict((name, in_names) for name, in_names in six.iteritems(col_name_map)\n if all(x is not None for x in in_names))\n if len(col_name_map) == 0:\n raise TableMergeError('Input arrays have no columns in common')\n\n # If there are any output columns where one or more input arrays are missing\n # then the output must be masked. If any input arrays are masked then\n # output is masked.\n masked = any(isinstance(arr, ma.MaskedArray) for arr in arrays)\n for names in six.itervalues(col_name_map):\n if any(x is None for x in names):\n masked = True\n break\n\n lens = [len(arr) for arr in arrays]\n n_rows = sum(lens)\n out_descrs = get_descrs(arrays, col_name_map)\n if masked:\n # Make a masked array with all values initially masked. Note\n # that setting an array value automatically unmasks it.\n # See comment in hstack for heritage of this code.\n out = ma.masked_array(np.zeros(n_rows, out_descrs),\n mask=np.ones(n_rows, ma.make_mask_descr(out_descrs)))\n else:\n out = np.empty(n_rows, dtype=out_descrs)\n\n for out_name, in_names in six.iteritems(col_name_map):\n idx0 = 0\n for name, array in izip(in_names, arrays):\n idx1 = idx0 + len(array)\n if name in array.dtype.names:\n out[out_name][idx0:idx1] = array[name]\n idx0 = idx1\n\n # If col_name_map supplied as a dict input, then update.\n if isinstance(_col_name_map, collections.Mapping):\n _col_name_map.update(col_name_map)\n\n return out", "def join(left, right, how, left_on, right_on, suffixes=('_x', '_y')):\n left, left_on = check_consistent(\n left, \n col_names=left_on, \n M_argument_name='left',\n col_names_argument_name='left_on')\n right, right_on = check_consistent(\n right, \n col_names=right_on,\n M_argument_name='right',\n col_names_argument_name='right_on')\n\n # left_on and right_on can both be strings or lists\n if isinstance(left_on, basestring):\n left_on = [left_on]\n if isinstance(right_on, basestring):\n right_on = [right_on]\n\n # assemble dtype for the merged array\n # Rules for naming columns in the new table, as inferred from Pandas:\n # 1. If a joined on column has the same name in both tables, it appears\n # in the joined table once under that name (no suffix)\n # 2. Otherwise, every column from each table will appear in the joined\n # table, whether they are joined on or not. If both tables share a \n # column name, the name will appear twice with suffixes. If a column\n # name appears only in one table, it will appear without a suffix.\n frozenset_left_on = frozenset(left_on)\n frozenset_right_on = frozenset(right_on)\n frozenset_shared_on = frozenset_left_on.intersection(frozenset_right_on)\n shared_on = list(frozenset_shared_on)\n # get arrays without shared join columns\n left_names = left.dtype.names\n right_names = right.dtype.names\n frozenset_left_names = frozenset(left.dtype.names).difference(\n frozenset_shared_on)\n left_names = list(frozenset_left_names)\n frozenset_right_names = frozenset(right.dtype.names).difference(\n frozenset_shared_on)\n right_names = list(frozenset_right_names)\n left_no_idx = left[left_names]\n right_no_idx = right[right_names]\n left_names_w_suffix = [col_name + suffixes[0] if \n col_name in frozenset_right_names else\n col_name for \n col_name in left_names]\n right_names_w_suffix = [col_name + suffixes[1] if \n col_name in frozenset_left_names else\n col_name for \n col_name in right_names]\n col_names = (left_names_w_suffix + shared_on + right_names_w_suffix)\n col_dtypes = ([left[left_col].dtype for left_col in left_names] +\n [left[shared_on_col].dtype for shared_on_col in shared_on] +\n [right[right_col].dtype for right_col in right_names])\n take_all_right_rows = how in ('outer', 'right')\n take_all_left_rows = how in ('outer', 'left')\n # data to fill in if we're doing an outer join and one of the sides is\n # missing\n left_fill = tuple([__fill_by_descr(dtype) for _, dtype in \n left_no_idx.dtype.descr])\n right_fill = tuple([__fill_by_descr(dtype) for _, dtype in \n right_no_idx.dtype.descr])\n\n # Make a hash of the first join column in the left table\n left_col = left[left_on[0]]\n hashed_col = {}\n for left_idx, left_cell in enumerate(left_col):\n try:\n rows = hashed_col[left_cell]\n except KeyError:\n rows = []\n hashed_col[left_cell] = rows\n rows.append(left_idx)\n\n # Pick out columns that we will be joining on beyond the 0th\n extra_left_cols = [left[left_on_name] for left_on_name in left_on[1:]]\n extra_right_cols = [right[right_on_name] for right_on_name in right_on[1:]]\n extra_contraint_cols = zip(extra_left_cols, extra_right_cols)\n\n rows_new_table = []\n right_col = right[right_on[0]]\n # keep track of used left rows so we can include all the rows if we're\n # doing a left or outer join\n left_rows_used = set()\n # Iterate through every row in the right table\n for right_idx, right_cell in enumerate(right_col):\n has_match = False\n # See if we have matches from the hashed col of the left table\n try:\n left_matches = hashed_col[right_cell]\n \n for left_idx in left_matches:\n # If all the constraints are met, we have a match\n if all([extra_left_col[left_idx] == extra_right_col[right_idx] \n for extra_left_col, extra_right_col in \n extra_contraint_cols]):\n has_match = True\n rows_new_table.append(\n tuple(left_no_idx[left_idx]) + \n tuple([left[shared_on_col][left_idx] \n for shared_on_col in shared_on]) +\n tuple(right_no_idx[right_idx]))\n left_rows_used.add(left_idx) \n # No match found for this right row\n except KeyError:\n pass \n # If we're doing a right or outer join and we didn't find a match, add\n # this row from the right table, filled with type-appropriate versions\n # of NULL from the left table\n if (not has_match) and take_all_right_rows:\n rows_new_table.append(left_fill + \n tuple([right[shared_on_col][right_idx] for shared_on_col in\n shared_on]) + \n tuple(right_no_idx[right_idx]))\n\n # if we're doing a left or outer join, we have to add all rows from the \n # left table, using type-appropriate versions of NULL for the right table\n if take_all_left_rows: \n left_rows_unused = [i for i in xrange(len(left)) if i not in \n left_rows_used]\n for unused_left_idx in left_rows_unused:\n rows_new_table.append(\n tuple(left_no_idx[unused_left_idx]) +\n tuple([left[shared_on_col][unused_left_idx] \n for shared_on_col in shared_on]) +\n right_fill)\n\n return np.array(rows_new_table, dtype={'names': col_names, \n 'formats': col_dtypes})", "def hstack(arrays, join_type='exact', uniq_col_name='{col_name}_{table_name}',\n table_names=None, col_name_map=None):\n # Store user-provided col_name_map until the end\n _col_name_map = col_name_map\n\n # Input validation\n if join_type not in ('inner', 'exact', 'outer'):\n raise ValueError(\"join_type arg must be either 'inner', 'exact' or 'outer'\")\n _check_for_sequence_of_structured_arrays(arrays)\n\n if table_names is None:\n table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]\n if len(arrays) != len(table_names):\n raise ValueError('Number of arrays must match number of table_names')\n\n # Trivial case of one input arrays\n if len(arrays) == 1:\n return arrays[0]\n\n col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)\n\n # If require_match is True then all input arrays must have the same length\n arr_lens = [len(arr) for arr in arrays]\n if join_type == 'exact':\n if len(set(arr_lens)) > 1:\n raise TableMergeError(\"Inconsistent number of rows in input arrays \"\n \"(use 'inner' or 'outer' join_type to allow \"\n \"non-matching rows)\")\n join_type = 'outer'\n\n # For an inner join, keep only columns where all input arrays have that column\n if join_type == 'inner':\n min_arr_len = min(arr_lens)\n arrays = [arr[:min_arr_len] for arr in arrays]\n arr_lens = [min_arr_len for arr in arrays]\n\n # If there are any output rows where one or more input arrays are missing\n # then the output must be masked. If any input arrays are masked then\n # output is masked.\n masked = (any(isinstance(arr, ma.MaskedArray) for arr in arrays) or\n len(set(arr_lens)) > 1)\n\n n_rows = max(arr_lens)\n out_descrs = get_descrs(arrays, col_name_map)\n if masked:\n # Adapted from ma.all_masked() code. Here the array is filled with\n # zeros instead of empty. This avoids the bug reported here:\n # https://github.com/numpy/numpy/issues/3276\n out = ma.masked_array(np.zeros(n_rows, out_descrs),\n mask=np.ones(n_rows, ma.make_mask_descr(out_descrs)))\n else:\n out = np.empty(n_rows, dtype=out_descrs)\n\n for out_name, in_names in six.iteritems(col_name_map):\n for name, array, arr_len in izip(in_names, arrays, arr_lens):\n if name is not None:\n out[out_name][:arr_len] = array[name]\n\n # If col_name_map supplied as a dict input, then update.\n if isinstance(_col_name_map, collections.Mapping):\n _col_name_map.update(col_name_map)\n\n return out", "def force_align(x1, x2, mode='fill'):\n x1, x1_targets, x1_lens = x1\n x2, x2_targets, x2_lens = x2\n x1_new = []\n x1_targets_new = []\n x2_new = []\n x2_targets_new = []\n x1_curr_idx = 0\n x2_curr_idx = 0\n for i, l1 in enumerate(x1_lens):\n l2 = x2_lens[i]\n difference = l1 - l2\n if mode == 'fill':\n if difference < 0:\n '''fill x1 with difference'''\n difference = abs(difference)\n for j in range(l1):\n x1_new.append(x1[x1_curr_idx + j])\n x1_targets_new.append(x1_targets[x1_curr_idx + j])\n '''[0][1][2][3]'''\n last_element = x1[x1_curr_idx + l1 - 1]\n last_element_target = x1_targets[x1_curr_idx + l1 - 1]\n for j in range(difference):\n x1_new.append(np.copy(last_element))\n x1_targets_new.append(np.copy(last_element_target))\n x1_lens[i] = l1 + difference # update the lens\n for j in range(l2):\n x2_new.append(x2[x2_curr_idx + j])\n x2_targets_new.append(x2_targets[x2_curr_idx + j])\n else:\n '''fill x2 with difference'''\n for j in range(l2):\n x2_new.append(x2[x2_curr_idx + j])\n x2_targets_new.append(x2_targets[x2_curr_idx + j])\n '''[0][1][2][3]'''\n last_element = x2[x2_curr_idx + l1 - 1]\n last_element_target = x2_targets[x2_curr_idx + l2 - 1]\n for j in range(difference):\n x2_new.append(np.copy(last_element))\n x2_targets_new.append(np.copy(last_element_target))\n x2_lens[i] = l2 + difference # update the lens\n for j in range(l1):\n x1_new.append(x1[x1_curr_idx + j])\n x1_targets_new.append(x1_targets[x1_curr_idx + j])\n x1_curr_idx += l1\n x2_curr_idx += l2\n # TODO: discard mode\n return (np.array(x1_new), np.array(x1_targets_new), x1_lens), (np.array(x2_new), np.array(x2_targets_new), x2_lens)", "def join(self, other_datasets: Iterable[DatasetBase]) -> None:\n if not all(isinstance(d, JsonIndexDataset) for d in other_datasets):\n raise ValueError(\"This function can only join a list of JsonIndexDataset\")\n # pyre-ignore[16]\n self.frame_annots.extend([fa for d in other_datasets for fa in d.frame_annots])\n # pyre-ignore[16]\n self.seq_annots.update(\n # https://gist.github.com/treyhunner/f35292e676efa0be1728\n functools.reduce(\n lambda a, b: {**a, **b},\n # pyre-ignore[16]\n [d.seq_annots for d in other_datasets],\n )\n )\n all_eval_batches = [\n self.eval_batches,\n *[d.eval_batches for d in other_datasets], # pyre-ignore[16]\n ]\n if not (\n all(ba is None for ba in all_eval_batches)\n or all(ba is not None for ba in all_eval_batches)\n ):\n raise ValueError(\n \"When joining datasets, either all joined datasets have to have their\"\n \" eval_batches defined, or all should have their eval batches undefined.\"\n )\n if self.eval_batches is not None:\n self.eval_batches = sum(all_eval_batches, [])\n self._invalidate_indexes(filter_seq_annots=True)", "def test_join_different_coords_inputs_unchanged(self):\n\n # get the ddis\n orig, cppy = self.helper_get_joinable_ddis(deep_copy_both=True, cppy_change_times=True)\n\n # set an extra attribute on vis0 (orig)\n orig.attrs['testing_extra_attr'] = 'foo'\n\n # do the merge\n join = cngi.vis.ddijoin(orig, cppy)\n\n # did the attribute get carried over\n self.assertTrue('testing_extra_attr' in orig.attrs, \"vis0 should have an attribute \\\"testing_extra_attr\\\"\")\n self.assertEqual(orig.testing_extra_attr, 'foo', \"vis0 should have an attribute \\\"testing_extra_attr\\\" with the value \\\"foo\\\"\")\n self.assertFalse('testing_extra_attr' in cppy.attrs, \"vis1 should NOT have an attribute \\\"testing_extra_attr\\\"\")\n self.assertTrue('testing_extra_attr' in join.attrs, \"join should have an attribute \\\"testing_extra_attr\\\"\")\n self.assertEqual(join.testing_extra_attr, 'foo', \"join should have an attribute \\\"testing_extra_attr\\\" with the value \\\"foo\\\"\")", "def test_merge_arrays_attrs_variables(\n self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception\n ):\n data1 = xr.Dataset(\n {\"var1\": (\"dim1\", [], attrs1)}, coords={\"dim1\": (\"dim1\", [], attrs1)}\n )\n data2 = xr.Dataset(\n {\"var1\": (\"dim1\", [], attrs2)}, coords={\"dim1\": (\"dim1\", [], attrs2)}\n )\n\n if expect_exception:\n with pytest.raises(MergeError, match=\"combine_attrs\"):\n actual = xr.merge([data1, data2], combine_attrs=combine_attrs)\n else:\n actual = xr.merge([data1, data2], combine_attrs=combine_attrs)\n expected = xr.Dataset(\n {\"var1\": (\"dim1\", [], expected_attrs)},\n coords={\"dim1\": (\"dim1\", [], expected_attrs)},\n )\n\n assert_identical(actual, expected)", "def interweave(arrays: Sequence[np.ndarray]) -> np.ndarray:\n shape = list(arrays[0].shape)\n shape[0] = sum(x.shape[0] for x in arrays)\n result = np.empty(shape, dtype=arrays[0].dtype)\n n = len(arrays)\n for i, arr in enumerate(arrays):\n result[i::n] = arr\n return result", "def join_dataframes(main_df, df, id_name):\n cur_df = main_df.copy()\n \n shape = []\n for ind in df.index:\n shape.append(cur_df[main_df[id_name] == ind].shape[0])\n \n for i in df.columns:\n cur_df[i] = np.repeat(df[i], shape).as_matrix()\n return cur_df", "def interleave_attributes(attributes):\n stride = 0\n max_size = 0\n for attribute in attributes:\n stride = _align(stride, attribute.align)\n attribute.offset = stride\n stride += attribute.size\n max_size = max(max_size, attribute.size)\n stride = _align(stride, max_size)\n for attribute in attributes:\n attribute.stride = stride", "def test_03_01_align_similarly(self):\n np.random.seed(0)\n shape = (53, 62)\n i, j = np.mgrid[0 : shape[0], 0 : shape[1]]\n for offset in ((3, 5), (-3, 5), (3, -5), (-3, -5)):\n image1 = np.random.randint(0, 10, size=shape).astype(float) / 10.0\n image1[\n np.sqrt(((i - shape[0] / 2) ** 2 + (j - shape[1] / 2) ** 2)) < 20\n ] = 0.5\n si1, si2 = self.slice_helper(offset[0], image1.shape[0])\n sj1, sj2 = self.slice_helper(offset[1], image1.shape[1])\n image2 = np.zeros(image1.shape)\n image2 = image1[\n (i + shape[0] - offset[0]) % shape[0],\n (j + shape[1] - offset[1]) % shape[1],\n ]\n image2 += (np.random.uniform(size=shape) - 0.5) * 0.1 * np.std(image2)\n image3 = (i * 100 + j).astype(np.float32) / 10000\n workspace, module = self.make_workspace(\n (image1, image2, image3), (None, None, None)\n )\n assert isinstance(module, Align)\n module.alignment_method.value = M_CROSS_CORRELATION\n module.crop_mode.value = C_PAD\n module.additional_images[0].align_choice.value = A_SIMILARLY\n module.run(workspace)\n output = workspace.image_set.get_image(\"Aligned2\")\n m = workspace.measurements\n columns = module.get_measurement_columns(workspace.pipeline)\n assert len(columns) == 6\n align_measurements = [\n x for x in m.get_feature_names(\"Image\") if x.startswith(\"Align\")\n ]\n assert len(align_measurements) == 6\n assert isinstance(m, Measurements)\n off_i0 = -m.get_current_image_measurement(\"Align_Yshift_Aligned0\")\n off_j0 = -m.get_current_image_measurement(\"Align_Xshift_Aligned0\")\n off_i1 = -m.get_current_image_measurement(\"Align_Yshift_Aligned1\")\n off_j1 = -m.get_current_image_measurement(\"Align_Xshift_Aligned1\")\n off_i2 = -m.get_current_image_measurement(\"Align_Yshift_Aligned2\")\n off_j2 = -m.get_current_image_measurement(\"Align_Xshift_Aligned2\")\n assert off_i0 - off_i1 == offset[0]\n assert off_j0 - off_j1 == offset[1]\n assert off_i0 - off_i2 == offset[0]\n assert off_j0 - off_j2 == offset[1]\n\n i_slice = self.single_slice_helper(off_i2, shape[0])\n j_slice = self.single_slice_helper(off_j2, shape[1])\n np.testing.assert_almost_equal(output.pixel_data[i_slice, j_slice], image3)", "def cat(\n self,\n others=None,\n sep: str | None = None,\n na_rep=None,\n join: AlignJoin = \"left\",\n ) -> str | Series | Index:\n # TODO: dispatch\n from pandas import (\n Index,\n Series,\n concat,\n )\n\n if isinstance(others, str):\n raise ValueError(\"Did you mean to supply a `sep` keyword?\")\n if sep is None:\n sep = \"\"\n\n if isinstance(self._orig, ABCIndex):\n data = Series(self._orig, index=self._orig, dtype=self._orig.dtype)\n else: # Series\n data = self._orig\n\n # concatenate Series/Index with itself if no \"others\"\n if others is None:\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Series\")\n data = ensure_object(data) # type: ignore[assignment]\n na_mask = isna(data)\n if na_rep is None and na_mask.any():\n return sep.join(data[~na_mask])\n elif na_rep is not None and na_mask.any():\n return sep.join(np.where(na_mask, na_rep, data))\n else:\n return sep.join(data)\n\n try:\n # turn anything in \"others\" into lists of Series\n others = self._get_series_list(others)\n except ValueError as err: # do not catch TypeError raised by _get_series_list\n raise ValueError(\n \"If `others` contains arrays or lists (or other \"\n \"list-likes without an index), these must all be \"\n \"of the same length as the calling Series/Index.\"\n ) from err\n\n # align if required\n if any(not data.index.equals(x.index) for x in others):\n # Need to add keys for uniqueness in case of duplicate columns\n others = concat(\n others,\n axis=1,\n join=(join if join == \"inner\" else \"outer\"),\n keys=range(len(others)),\n sort=False,\n copy=False,\n )\n data, others = data.align(others, join=join)\n others = [others[x] for x in others] # again list of Series\n\n all_cols = [ensure_object(x) for x in [data] + others]\n na_masks = np.array([isna(x) for x in all_cols])\n union_mask = np.logical_or.reduce(na_masks, axis=0)\n\n if na_rep is None and union_mask.any():\n # no na_rep means NaNs for all rows where any column has a NaN\n # only necessary if there are actually any NaNs\n result = np.empty(len(data), dtype=object)\n np.putmask(result, union_mask, np.nan)\n\n not_masked = ~union_mask\n result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)\n elif na_rep is not None and union_mask.any():\n # fill NaNs with na_rep in case there are actually any NaNs\n all_cols = [\n np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)\n ]\n result = cat_safe(all_cols, sep)\n else:\n # no NaNs - can just concatenate\n result = cat_safe(all_cols, sep)\n\n out: Index | Series\n if isinstance(self._orig, ABCIndex):\n # add dtype for case that result is all-NA\n\n out = Index(result, dtype=object, name=self._orig.name)\n else: # Series\n if isinstance(self._orig.dtype, CategoricalDtype):\n # We need to infer the new categories.\n dtype = None\n else:\n dtype = self._orig.dtype\n res_ser = Series(\n result, dtype=dtype, index=data.index, name=self._orig.name, copy=False\n )\n out = res_ser.__finalize__(self._orig, method=\"str_cat\")\n return out", "def align_index(standard, raw_data, *, axis='both'):\n if axis is 'both':\n aligned_data = raw_data.reindex(major_axis=standard.index, minor_axis=standard.columns)\n elif axis is 'major':\n aligned_data = raw_data.reindex(major_axis=standard.index)\n elif axis is 'minor':\n aligned_data = raw_data.reindex(minor_axis=standard.columns)\n\n return aligned_data", "def TableIterInnerJoin( tableIters, cols, suffixes = None, blanks = None, concat = True ):\n\n @TableMaker\n def TableIterInnerJoinAux( tableIters, cols, headings, blanks ):\n\n\n yield headings if concat else [ 'h_%d' % i for i in range( len( tableIters ) ) ]\n\n headingLens = [ len( ti.headings ) for ti in tableIters ]\n assert sum( headingLens ) == len( headings )\n blanks = [ blank if blank is None or hasattr(blank,'__len__') else (blank,)*headingLen\n for blank, headingLen in zip(blanks, headingLens) ]\n \n prevKey = None\n for k, g in itertools.groupby( itermerge( iters = tableIters, keys = cols, ids = True,\n includeKeys = True ),\n key = operator.itemgetter( 0 ) ):\n\n # check that the keys are sorted in strictly increasing order -- important for correct operation of join \n if not( prevKey==None or k > prevKey ):\n logging.info( 'prevKey=' + str(prevKey) + ' key=' + str(k) )\n assert prevKey==None or k > prevKey\n prevKey = k\n\n records = tuple( g )\n\n origins = [ r[1][0] for r in records ]\n if not is_sorted( origins, strict = True ):\n print('records are ', records)\n print('origins are ', origins)\n assert is_sorted( origins, strict = True )\n\n recordsList = [ None ] * len( tableIters )\n positionFilled = [ False ] * len( tableIters )\n for r in records:\n recordsList[ r[1][0] ] = r[1][1]\n positionFilled[ r[1][0] ] = True\n \n for i in range( len( tableIters ) ):\n if not positionFilled[ i ] and blanks[ i ] != None:\n recordsList[ i ] = blanks[ i ]\n positionFilled[ i ] = True\n\n if all( positionFilled ):\n rec = list(map( tuple, recordsList ))\n assert list(map( len, rec )) == headingLens\n if concat: rec = reduce( operator.concat, rec )\n yield rec\n\n tableIters = tuple( tableIters )\n if blanks == None: blanks = (None,) * len( tableIters )\n if suffixes == None: suffixes = [ '_%d' % i for i in range( len( tableIters ) ) ]\n\n assert all([ not hasattr(blank,'__len__') or len( blank ) == len( tableIter.headings )\n for tableIter, blank in zip( tableIters, blanks ) ])\n\n assert len( suffixes ) == len( tableIters )\n logging.info( 'suffixes are ' + str(suffixes) )\n\n sharedColNames = set( [] )\n allColNames = set( [] )\n for tableIter in tableIters:\n for heading in tableIter.headings:\n ( sharedColNames if heading in allColNames else allColNames ).add( heading )\n\n logging.info( 'sharedColNames=%s' % sharedColNames )\n \n newHeadings = [ n if n not in sharedColNames else n + sfx for tableIter, sfx in zip( tableIters, suffixes )\n for n in tableIter.headings ]\n\n logging.info( 'newHeadings are: %s' % newHeadings )\n\n return TableIterInnerJoinAux( tableIters = tableIters, cols = cols, headings = newHeadings, blanks = blanks )", "def pad_and_stack_arrays(list_of_arrays, align_pt=None):\n if align_pt is None: align_pt = np.zeros([len(list_of_arrays), list_of_arrays[0].ndim])\n far_corner = np.array([x.shape for x in list_of_arrays])\n leftpad = align_pt.max(0) - align_pt\n far_corner = far_corner + leftpad\n rightpad = far_corner.max(0) - far_corner\n def f(_r):\n x,lp,rp = _r\n p = np.stack([lp,rp],axis=1)\n r = np.pad(x,pad_width=p)\n return r\n res = np.stack([f(_r) for _r in zip(list_of_arrays,leftpad,rightpad)])\n return res", "def join(left, right, keys=None, join_type='inner',\n uniq_col_name='{col_name}_{table_name}',\n table_names=['1', '2'],\n col_name_map=None):\n # Store user-provided col_name_map until the end\n _col_name_map = col_name_map\n\n if join_type not in ('inner', 'outer', 'left', 'right'):\n raise ValueError(\"The 'join_type' argument should be in 'inner', \"\n \"'outer', 'left' or 'right' (got '{0}' instead)\".\n format(join_type))\n\n # If we have a single key, put it in a tuple\n if keys is None:\n keys = tuple(name for name in left.dtype.names if name in right.dtype.names)\n if len(keys) == 0:\n raise TableMergeError('No keys in common between left and right tables')\n elif isinstance(keys, six.string_types):\n keys = (keys,)\n\n # Check the key columns\n for arr, arr_label in ((left, 'Left'), (right, 'Right')):\n for name in keys:\n if name not in arr.dtype.names:\n raise TableMergeError('{0} table does not have key column {1!r}'\n .format(arr_label, name))\n if hasattr(arr[name], 'mask') and np.any(arr[name].mask):\n raise TableMergeError('{0} key column {1!r} has missing values'\n .format(arr_label, name))\n\n # Make sure we work with ravelled arrays\n left = left.ravel()\n right = right.ravel()\n len_left, len_right = len(left), len(right)\n left_names, right_names = left.dtype.names, right.dtype.names\n\n # Joined array dtype as a list of descr (name, type_str, shape) tuples\n col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)\n out_descrs = get_descrs([left, right], col_name_map)\n\n # Make an array with just the key columns\n out_keys_dtype = [descr for descr in out_descrs if descr[0] in keys]\n out_keys = np.empty(len_left + len_right, dtype=out_keys_dtype)\n for key in keys:\n out_keys[key][:len_left] = left[key]\n out_keys[key][len_left:] = right[key]\n idx_sort = out_keys.argsort(order=keys)\n out_keys = out_keys[idx_sort]\n\n # Get all keys\n diffs = np.concatenate(([True], out_keys[1:] != out_keys[:-1], [True]))\n idxs = np.flatnonzero(diffs)\n\n # Main inner loop in Cython to compute the cartesion product\n # indices for the given join type\n int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3}[join_type]\n masked, n_out, left_out, left_mask, right_out, right_mask = \\\n _np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)\n\n # If either of the inputs are masked then the output is masked\n if any(isinstance(array, ma.MaskedArray) for array in (left, right)):\n masked = True\n\n if masked:\n out = ma.empty(n_out, dtype=out_descrs)\n else:\n out = np.empty(n_out, dtype=out_descrs)\n\n # If either input array was zero length then stub a new version\n # with one row. In this case the corresponding left_out or right_out\n # will contain all zeros with mask set to true. This allows the\n # take(*_out) method calls to work as expected.\n if len(left) == 0:\n left = left.__class__(1, dtype=left.dtype)\n if len(right) == 0:\n right = right.__class__(1, dtype=right.dtype)\n\n for out_name, left_right_names in six.iteritems(col_name_map):\n left_name, right_name = left_right_names\n\n if left_name and right_name: # this is a key which comes from left and right\n out[out_name] = np.where(right_mask,\n left[left_name].take(left_out),\n right[right_name].take(right_out))\n continue\n elif left_name: # out_name came from the left table\n name, array, array_out, array_mask = left_name, left, left_out, left_mask\n elif right_name:\n name, array, array_out, array_mask = right_name, right, right_out, right_mask\n else:\n raise TableMergeError('Unexpected column names (maybe one is \"\"?)')\n out[out_name] = array[name].take(array_out, axis=0)\n if masked:\n if isinstance(array, ma.MaskedArray):\n array_mask = array_mask | array[name].mask.take(array_out)\n out[out_name].mask = array_mask\n\n # If col_name_map supplied as a dict input, then update.\n if isinstance(_col_name_map, collections.Mapping):\n _col_name_map.update(col_name_map)\n\n return out", "def multistream_force_align(orig_streams, mode='fill'):\n INPUT_IDX = 0\n TARGET_IDX = 1\n LEN_IDX = 2\n new_streams = [([], [], s[2]) for s in orig_streams]\n curr_idxs = [0]*len(orig_streams)\n inputs, targets, input_lens = extract_stream_elements(orig_streams)\n # for each sequence\n for i, l1 in enumerate(input_lens[0]):\n # compute the lens, find stream with longest length\n lens = [input_len_vec[i] for input_len_vec in input_lens]\n max_idx = np.argmax(lens)\n # compute the number of copies to generate\n copies_to_make = [input_lens[max_idx][i] - l[i] for l in input_lens]\n # for each stream, append the original stream and copies to make to new stream\n for j in range(len(orig_streams)):\n input_vec = inputs[j]\n target_vec = targets[j]\n l = lens[j] # length of sequence for current stream\n for k in range(l):\n new_streams[j][INPUT_IDX].append(input_vec[curr_idxs[j] + k])\n new_streams[j][TARGET_IDX].append(target_vec[curr_idxs[j] + k])\n copies = copies_to_make[j]\n # make copies to fill shorter streams\n for k in range(copies):\n last_element = input_vec[curr_idxs[j] + l - 1]\n last_element_target = target_vec[curr_idxs[j] + l - 1]\n new_streams[j][INPUT_IDX].append(np.copy(last_element))\n new_streams[j][TARGET_IDX].append(np.copy(last_element_target))\n new_streams[j][LEN_IDX][i] = l + copies\n curr_idxs[j] += l\n # convert the lists to numpy arrays\n new_streams = [(np.array(x[INPUT_IDX]), np.array(x[TARGET_IDX]), x[LEN_IDX]) for x in new_streams]\n return new_streams" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Align objects for merging, recursing into dictionary values. This function is not public API.
def deep_align( objects: Iterable[Any], join: JoinOptions = "inner", copy=True, indexes=None, exclude=frozenset(), raise_on_invalid=True, fill_value=dtypes.NA, ): from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if indexes is None: indexes = {} def is_alignable(obj): return isinstance(obj, (Coordinates, DataArray, Dataset)) positions = [] keys = [] out = [] targets = [] no_key = object() not_replaced = object() for position, variables in enumerate(objects): if is_alignable(variables): positions.append(position) keys.append(no_key) targets.append(variables) out.append(not_replaced) elif is_dict_like(variables): current_out = {} for k, v in variables.items(): if is_alignable(v) and k not in indexes: # Skip variables in indexes for alignment, because these # should to be overwritten instead: # https://github.com/pydata/xarray/issues/725 # https://github.com/pydata/xarray/issues/3377 # TODO(shoyer): doing this here feels super-hacky -- can we # move it explicitly into merge instead? positions.append(position) keys.append(k) targets.append(v) current_out[k] = not_replaced else: current_out[k] = v out.append(current_out) elif raise_on_invalid: raise ValueError( "object to align is neither an xarray.Dataset, " "an xarray.DataArray nor a dictionary: {!r}".format(variables) ) else: out.append(variables) aligned = align( *targets, join=join, copy=copy, indexes=indexes, exclude=exclude, fill_value=fill_value, ) for position, key, aligned_obj in zip(positions, keys, aligned): if key is no_key: out[position] = aligned_obj else: out[position][key] = aligned_obj # type: ignore[index] # maybe someone can fix this? return out
[ "def _merge_dicts(self, x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n\n return z", "def _merge_asized(base: Asized, other: Asized, level: int = 0) -> None:\n base.size += other.size\n base.flat += other.flat\n if level > 0:\n base.name = _ref2key(base)\n # Add refs from other to base. Any new refs are appended.\n base.refs = list(base.refs) # we may need to append items\n refs = {}\n for ref in base.refs:\n refs[_ref2key(ref)] = ref\n for ref in other.refs:\n key = _ref2key(ref)\n if key in refs:\n _merge_asized(refs[key], ref, level=level + 1)\n else:\n # Don't modify existing Asized instances => deepcopy\n base.refs.append(deepcopy(ref))\n base.refs[-1].name = key", "def _merge_page(self, a, b):\n for k in b:\n val = b[k]\n if k in a:\n if isinstance(val, dict):\n self._merge_page(a[k], val)\n elif isinstance(val, list):\n a[k] = a[k] + val\n else:\n a[k] = val\n else:\n a[k] = val", "def merge_doc(doc1, doc2):\n for k, v in doc2.iteritems():\n if isinstance(v, collections.Mapping):\n doc1[k] = merge_doc(doc1.get(k, {}), v)\n else:\n doc1[k] = v\n return doc1", "def AlignObj(self,obj1,obj2):\n resultlist=self.yasara.AlignObj(obj1,obj2,method='MOTIF')\n return resultlist[0]", "def merge(a, b):\n 'Taken from http://www.xormedia.com/recursively-merge-dictionaries-in-python/'\n #FIXME: deepcopy might not be needed\n from copy import deepcopy\n if not isinstance(b, dict):\n return b\n result = deepcopy(a)\n for k, v in b.iteritems():\n if k in result and isinstance(result[k], dict):\n result[k] = merge(result[k], v)\n else:\n result[k] = deepcopy(v)\n return result", "def merge(first, second, _recurse=0):\n if not isinstance(second, dict):\n return second\n result = deepcopy(first)\n for key, value in second.items():\n if key in result and isinstance(result[key], dict):\n if _recurse > 10: # Max 10 dicts deep\n result[key] = None\n else:\n result[key] = merge(result[key], value, _recurse=_recurse+1)\n else:\n result[key] = deepcopy(value)\n return result", "def merge(self):", "def _merge_config_tree(self, a, b):\n for key, value in b.items():\n # if key is in both a and b and both values are dictionary then merge it otherwise override it\n if key in list(a.items()) and isinstance(a[key], ConfigTree) and isinstance(a[key], ConfigTree):\n self._merge_dict(a[key], b[key])\n else:\n a[key] = value\n\n return a", "def merge(self, tree):\n pass", "def merge(\n *structures: Mapping[str, Mapping[str, Any]]\n) -> Mapping[str, Mapping[str, Any]]:\n out = collections.defaultdict(dict)\n for structure in structures:\n for module_name, name, value in traverse(structure):\n out[module_name][name] = value\n return data_structures.to_haiku_dict(out)", "def _merge_objects(tref: float, merged: Asized, obj: 'TrackedObject') -> None:\n size = None\n for (timestamp, tsize) in obj.snapshots:\n if timestamp == tref:\n size = tsize\n if size:\n _merge_asized(merged, size)", "def merge_all_dicts_with_dict(dict_of_dicts, dict_to_merge, mergetype=MERGETYPE):\n for k, v in dict_of_dicts.iteritems():\n dict_of_dicts[k] = merge_dict_as_type(v, dict_to_merge, mergetype)\n\n return dict_of_dicts", "def _merge_results(self):\n\t\tnew_dict = {}\n\t\tfor it in self.data:\n\t\t\tnew_dict.update(it)\n\n\t\tfor k,v in new_dict.items():\n\t\t\tfor kk, vv in v.time_stamps.items():\n\t\t\t\tfor kkk,vvv in vv.items():\n\t\t\t\t\tnew_dict[k].time_stamps[kk][kkk] = vvv - self.HB_config['time_ref']\n\n\t\tself.data = new_dict", "def expand_dict(doc, path, includes, current, cls=dict):\n cp = cls()\n # first merge any includes includes into cp\n templates: List[Mapping] = []\n assert isinstance(current, Mapping), current\n for (key, value) in current.items():\n if not isinstance(key, str):\n cp[key] = value\n continue\n if key.startswith(\"+\"):\n if key == mergeStrategyKey:\n cp[key] = value\n continue\n mergeKey = parse_merge_key(key)\n if not mergeKey:\n cp[key] = value\n continue\n foundTemplate = has_template(doc, mergeKey, value, path, cls)\n if not foundTemplate:\n includes.setdefault(path, []).append(_MissingInclude(mergeKey, value))\n cp[key] = value\n continue\n includes.setdefault(path, []).append((mergeKey, value))\n template = get_template(doc, mergeKey, value, path, cls, includes)\n if isinstance(template, Mapping):\n templates.append(template)\n elif mergeKey.include and template is None:\n continue # include path not found\n else:\n if len(current) > 1: # XXX include merge directive keys in count\n raise UnfurlError(\n f\"can not merge {mergeKey} with non-map value of type {type(template)}: {template}\"\n )\n else:\n return template # current dict is replaced with a value\n # elif key.startswith(\"q+\"):\n # cp[key[2:]] = value\n elif isinstance(value, Mapping):\n cp[key] = expand_dict(doc, path + (key,), includes, value, cls)\n elif isinstance(value, list):\n cp[key] = list(expand_list(doc, path + (key,), includes, value, cls))\n else:\n cp[key] = value\n\n if templates:\n accum = templates.pop(0)\n templates.append(cp)\n while templates:\n cls = getattr(templates[0], \"mapCtor\", cls)\n accum = merge_dicts(accum, templates.pop(0), cls)\n return accum\n else:\n return cp\n # e,g, merge_dicts(merge_dicts(a, b), cp)\n # return includes, reduce(lambda accum, next: merge_dicts(accum, next, cls), templates, {}), cp", "def dict_merge(base, addition):\n if not isinstance(base, dict) or not isinstance(addition, dict):\n raise TypeError(\"dict_merge only works with dicts.\")\n\n new_base = deepcopy(base)\n for key, value in addition.items():\n # If the value is a dict, need to merge those\n if isinstance(value, dict):\n new_base[key] = dict_merge(new_base.get(key, {}), value)\n # Otherwise, if the key is not in base, add it\n elif key not in new_base.keys():\n new_base[key] = value\n\n return new_base", "def nested_merge(map_1: MutableMapping, map_2: MutableMapping) -> MutableMapping:\n new = copy(map_1)\n for key, value in map_2.items():\n if key in map_1 and isinstance(value, MutableMapping):\n new[key] = nested_merge(map_1[key], value)\n else:\n new[key] = value\n\n return new", "def align_indexes(self) -> None:\n\n aligned_indexes = {}\n aligned_index_vars = {}\n reindex = {}\n new_indexes = {}\n new_index_vars = {}\n\n for key, matching_indexes in self.all_indexes.items():\n matching_index_vars = self.all_index_vars[key]\n dims = {d for coord in matching_index_vars[0].values() for d in coord.dims}\n index_cls = key[1]\n\n if self.join == \"override\":\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n need_reindex = False\n elif key in self.indexes:\n joined_index = self.indexes[key]\n joined_index_vars = self.index_vars[key]\n cmp_indexes = list(\n zip(\n [joined_index] + matching_indexes,\n [joined_index_vars] + matching_index_vars,\n )\n )\n need_reindex = self._need_reindex(dims, cmp_indexes)\n else:\n if len(matching_indexes) > 1:\n need_reindex = self._need_reindex(\n dims,\n list(zip(matching_indexes, matching_index_vars)),\n )\n else:\n need_reindex = False\n if need_reindex:\n if self.join == \"exact\":\n raise ValueError(\n \"cannot align objects with join='exact' where \"\n \"index/labels/sizes are not equal along \"\n \"these coordinates (dimensions): \"\n + \", \".join(f\"{name!r} {dims!r}\" for name, dims in key[0])\n )\n joiner = self._get_index_joiner(index_cls)\n joined_index = joiner(matching_indexes)\n if self.join == \"left\":\n joined_index_vars = matching_index_vars[0]\n elif self.join == \"right\":\n joined_index_vars = matching_index_vars[-1]\n else:\n joined_index_vars = joined_index.create_variables()\n else:\n joined_index = matching_indexes[0]\n joined_index_vars = matching_index_vars[0]\n\n reindex[key] = need_reindex\n aligned_indexes[key] = joined_index\n aligned_index_vars[key] = joined_index_vars\n\n for name, var in joined_index_vars.items():\n new_indexes[name] = joined_index\n new_index_vars[name] = var\n\n # Explicitly provided indexes that are not found in objects to align\n # may relate to unindexed dimensions so we add them too\n for key, idx in self.indexes.items():\n if key not in aligned_indexes:\n index_vars = self.index_vars[key]\n reindex[key] = False\n aligned_indexes[key] = idx\n aligned_index_vars[key] = index_vars\n for name, var in index_vars.items():\n new_indexes[name] = idx\n new_index_vars[name] = var\n\n self.aligned_indexes = aligned_indexes\n self.aligned_index_vars = aligned_index_vars\n self.reindex = reindex\n self.new_indexes = Indexes(new_indexes, new_index_vars)", "def merge_dictionaries(dicts):\n merge_dict = copy.deepcopy(dicts[0])\n for dict in dicts[1:]:\n merge_dict = merge_pair_of_dictionaries(merge_dict, dict)\n\n return merge_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the channel_order_no of this ChannelOrderRequest.
def channel_order_no(self, channel_order_no): if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501 raise ValueError("Invalid value for `channel_order_no`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and channel_order_no is not None and len(channel_order_no) > 60): raise ValueError("Invalid value for `channel_order_no`, length must be less than or equal to `60`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and channel_order_no is not None and len(channel_order_no) < 0): raise ValueError("Invalid value for `channel_order_no`, length must be greater than or equal to `0`") # noqa: E501 self._channel_order_no = channel_order_no
[ "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id", "def set_order(self, order_key: str) -> None:\n if order_key not in self.orders:\n raise exceptions.CommandError(\n \"Unknown flow order: %s\" % order_key\n )\n order_key = self.orders[order_key]\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview", "def channel_customer_no(self, channel_customer_no):\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) > 50):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be less than or equal to `50`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) < 0):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_customer_no = channel_customer_no", "def setOrder(order):\n ierr = c_int()\n lib.gmshModelMeshSetOrder(\n c_int(order),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetOrder returned non-zero error code: \",\n ierr.value)", "def set_module_order(self, order):\n with self.order_lock:\n self.module_order.set(order)\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")", "def test_set_order_num(self):\n\n self.bond.set_order_num(3)\n self.assertEqual(self.bond.get_order_str(), 'T')", "def send_order(self, order):\n\n # Takes the additional action of adding an order that is about to be sent\n # to a dictionary that keeps track of objects using their reference.\n\n if order.ref is None:\n order.ref = self._increment_counter(order.market.item, \"n\")\n self._orders_waiting_ackn[order.market.item][order.ref] = order\n super().send_order(order)", "def replace_order(self,\n order_specifier: OrderSpecifier = sentinel,\n order_request: OrderRequest = sentinel):\n pass", "def byte_order(self, byte_order):\n self._byte_order = byte_order", "def setZChannel(self, channel: int):\n self.axes[self.Axis.kZ] = channel", "def set_rank_order(order):\n global RANK_ORDER\n RANK_ORDER = order", "def create_order(self, order):\n return self.post(cc_urls['order'], {'order': json.dumps(order)})", "def originator_order_id(self, originator_order_id):\n\n self._originator_order_id = originator_order_id", "def add_order(self, order):\n neworder = copy.copy(order)\n self._validate_order(order)\n\n entrust_no = self._get_next_entrust_no()\n neworder.entrust_no = entrust_no\n\n self.__orders[entrust_no] = neworder\n return entrust_no", "async def cancel_order(ctx, symbol, order_id, orig_client_order_id, new_client_order_id, recv_window):\n if order_id is None and orig_client_order_id is None:\n ctx.log('Either --order_id (-oid) or --orig_client_order_id (-ocoid) must be sent.')\n return\n\n payload = {\n 'symbol': symbol,\n 'recvWindow': recv_window,\n 'timestamp': get_timestamp()\n }\n\n builder = CancelOrderBuilder(endpoint='api/v3/order', payload=payload, method='DELETE') \\\n .add_optional_params_to_payload(order_id=order_id,\n orig_client_order_id=orig_client_order_id,\n new_client_order_id=new_client_order_id) \\\n .set_security()\n\n await builder.send_http_req()\n\n builder.handle_response().generate_output()", "def generate_order_number(self, cart):\n self.no_order = ORDER_BASE + cart.id\n return str(self.no_order)", "def order_date(self, order_date):\n if self.local_vars_configuration.client_side_validation and order_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `order_date`, must not be `None`\") # noqa: E501\n\n self._order_date = order_date", "def send_set_channel_mode_to(self, channel_name, mode):\n\t\tcommand = \"MODE #%s %s\" % (channel_name, mode)\n\t\tself.send_command_to_server(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the is_business_order of this ChannelOrderRequest.
def is_business_order(self, is_business_order): self._is_business_order = is_business_order
[ "def business_info(self, business_info):\n\n self._business_info = business_info", "def business_name(self, business_name: str):\n if business_name is None:\n raise ValueError(\"Invalid value for `business_name`, must not be `None`\") # noqa: E501\n\n self._business_name = business_name", "def business_objects(self, business_objects):\n\n self._business_objects = business_objects", "def business_group(self, value):\n\n self.business_group_id = self.get_bg_id(value)\n self._business_group = value", "def is_buy_order(self, is_buy_order):\n if is_buy_order is None:\n raise ValueError(\"Invalid value for `is_buy_order`, must not be `None`\") # noqa: E501\n\n self._is_buy_order = is_buy_order", "def is_bill(self, is_bill):\n\n self._is_bill = is_bill", "def SetBusType(self):\n self.cal_type = \"BUS\"", "def is_buying(self, is_buying):\n\n self._is_buying = is_buying", "def _set_business_state_changes(self, business: dict):\n state_filings = []\n # Any filings like restoration, liquidation etc. that changes the state must be included here\n for filing in Filing.get_filings_by_types(self._business.id, ['dissolution', 'restorationApplication',\n 'dissolved', 'restoration',\n 'voluntaryDissolution',\n 'Involuntary Dissolution',\n 'voluntaryLiquidation', 'putBackOn',\n 'continuationOut']):\n state_filings.append(self._format_state_filing(filing))\n business['stateFilings'] = state_filings", "def set_is_single_bill_payment(self, is_single_bill_payment):\n self.is_single_bill_payment = is_single_bill_payment", "def delivered_on_b(self, delivered_on_b):\n\n self._delivered_on_b = delivered_on_b", "def setBold(self, isBold):\n\t\tself._isBold = isBold", "def marketing_state(self, marketing_state):\n\n self._marketing_state = marketing_state", "def b_is_taxable(self, b_is_taxable: bool):\n\n self._b_is_taxable = b_is_taxable", "def is_all(self, is_all):\n if is_all is None:\n raise ValueError(\"Invalid value for `is_all`, must not be `None`\") # noqa: E501\n\n self._is_all = is_all", "async def put_bool( # pylint: disable=inconsistent-return-statements\n self, complex_body: _models.BooleanWrapper, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def is_external_build(self, is_external_build):\n\n self._is_external_build = is_external_build", "def _set_description(self, business: dict):\n legal_type = self._business.legal_type\n corp_type = CorpType.find_by_id(legal_type)\n business['entityDescription'] = corp_type.full_desc\n act = {\n Business.LegalTypes.COOP.value: 'Cooperative Association Act',\n Business.LegalTypes.SOLE_PROP.value: 'Partnership Act',\n Business.LegalTypes.PARTNERSHIP.value: 'Partnership Act'\n } # This could be the legislation column from CorpType. Yet to discuss.\n business['entityAct'] = act.get(legal_type, 'Business Corporations Act')\n\n business['business']['coopType'] = BusinessDocument.CP_TYPE_DESCRIPTION[self._business.association_type]\\\n if self._business.association_type else 'Not Available'", "def delivery_frequency(self, delivery_frequency):\n\n self._delivery_frequency = delivery_frequency", "def set_cancelled(self):\n self.cancelled = True\n self.save()\n print(\"Order %s is cancelled\" % self.id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the key_is_merchant_product_no of this ChannelOrderRequest.
def key_is_merchant_product_no(self, key_is_merchant_product_no): self._key_is_merchant_product_no = key_is_merchant_product_no
[ "def set_product_id(self, **kwargs):\n if self.is_quicklook():\n self._product_id = f'{self._obs_id}.quicklook'\n else:\n self._product_id = f'{self._obs_id}.continuum_imaging'", "def job_product_id(self, job_product_id):\n\n self._job_product_id = job_product_id", "def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id", "def set_management_key(\n self,\n key_type: MANAGEMENT_KEY_TYPE,\n management_key: bytes,\n require_touch: bool = False,\n ) -> None:\n key_type = MANAGEMENT_KEY_TYPE(key_type)\n logger.debug(f\"Setting management key of type: {key_type}\")\n\n if key_type != MANAGEMENT_KEY_TYPE.TDES:\n require_version(self.version, (5, 4, 0))\n if len(management_key) != key_type.key_len:\n raise ValueError(\"Management key must be %d bytes\" % key_type.key_len)\n\n self.protocol.send_apdu(\n 0,\n INS_SET_MGMKEY,\n 0xFF,\n 0xFE if require_touch else 0xFF,\n int2bytes(key_type) + Tlv(SLOT_CARD_MANAGEMENT, management_key),\n )\n logger.info(\"Management key set\")", "def setIsQuantity(self, flag):\n self.info['isQuantity'] = flag", "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def goods_receipt_id(self, goods_receipt_id):\n\n self._goods_receipt_id = goods_receipt_id", "def set_external_product_ID(self, api_product, external_product_id):\n return _SetExternalProductID(self).call(api_product, external_product_id)", "def set_quantity(self, product, quantity, options=[]):\n quantity = int(quantity)\n if quantity < 0:\n raise ValueError('Quantity must be positive when updating cart')\n item_index = self.__index__(product, options)\n if item_index != -1:\n self._items_list[item_index].quantity = quantity\n if self._items_list[item_index].quantity < 1:\n del self._items_list[item_index]\n self.update_session()", "def buy_order_id(self, buy_order_id):\n\n self._buy_order_id = buy_order_id", "def product_sale_date(self, product_sale_date):\n\n self._product_sale_date = product_sale_date", "def set_number_of_products(self, number_of_products):\n self.number_of_products = number_of_products", "def i_product_item_id(self, i_product_item_id: int):\n\n self._i_product_item_id = i_product_item_id", "def product_sku(self, product_sku: str):\n if product_sku is None:\n raise ValueError(\"Invalid value for `product_sku`, must not be `None`\")\n\n self._product_sku = product_sku", "def set_ConsumerKey(self, value):\n super(UpdateAccountSettingsInputSet, self)._set_input('ConsumerKey', value)", "def card_verification_number_token(self, card_verification_number_token):\n\n self._card_verification_number_token = card_verification_number_token", "def product_id(self, product_id):\n if product_id is None:\n raise ValueError(\"Invalid value for `product_id`, must not be `None`\") # noqa: E501\n\n self._product_id = product_id", "def product_type(self, product_type):\n if product_type is not None and len(product_type) > 10:\n raise ValueError(\"Invalid value for `product_type`, length must be less than or equal to `10`\") # noqa: E501\n\n self._product_type = product_type", "def product_code_collect(self, product_code_collect):\n\n self._product_code_collect = product_code_collect" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the company_registration_no of this ChannelOrderRequest.
def company_registration_no(self, company_registration_no): if (self.local_vars_configuration.client_side_validation and company_registration_no is not None and len(company_registration_no) > 50): raise ValueError("Invalid value for `company_registration_no`, length must be less than or equal to `50`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and company_registration_no is not None and len(company_registration_no) < 0): raise ValueError("Invalid value for `company_registration_no`, length must be greater than or equal to `0`") # noqa: E501 self._company_registration_no = company_registration_no
[ "def channel_order_no(self, channel_order_no):\n if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501\n raise ValueError(\"Invalid value for `channel_order_no`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) > 60):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be less than or equal to `60`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) < 0):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_order_no = channel_order_no", "def channel_customer_no(self, channel_customer_no):\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) > 50):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be less than or equal to `50`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_customer_no is not None and len(channel_customer_no) < 0):\n raise ValueError(\"Invalid value for `channel_customer_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_customer_no = channel_customer_no", "def set_Company(self, value):\n super(GetNewsInputSet, self)._set_input('Company', value)", "def company_logo(self, company_logo):\n\n self._company_logo = company_logo", "def external_company(self, external_company):\n\n self._external_company = external_company", "def company_chcek(self):\n ids = []\n for company in self.env['res.company'].search([('id', '!=', self.company_id.id)]):\n ids.append(company.id)\n return {\n 'domain': {\n 'new_company_id': [('id', 'in', ids)]\n }\n }", "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def chrono_identifier(self, chrono_identifier):\n\n self._chrono_identifier = chrono_identifier", "def code_no(self, code_no):\n\n self._code_no = code_no", "def set_delivery_customer(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_DELIVERY_CUSTOMER).\n send_keys(value))", "def setCNonce(self, cNonce):\n self[Header.PARAM_CNONCE] = cNonce", "def company_list(self, company_list):\n\n self._company_list = company_list", "def customer_code(self, customer_code: str):\n\n self._customer_code = customer_code", "def _prep_contact(self, c):\n c['update_token'] = self._next_update_token()\n return c", "def get_vehicle_registration_number(self):\n\n return self.car.get_registration_number()", "def clan_rank(self, clan_rank):\n\n self._clan_rank = clan_rank", "def numero_licenceffa(self, numero_licenceffa):\n\n self._numero_licenceffa = numero_licenceffa", "def delivery_frequency(self, delivery_frequency):\n\n self._delivery_frequency = delivery_frequency", "def set_indirect_salesforce_number(self, value):\n (self.driver.find_element\n (*ProjectFormLoc.FIELD_INDIRECT_SALESFORCE_NUMBER)).send_keys(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the vat_no of this ChannelOrderRequest.
def vat_no(self, vat_no): if (self.local_vars_configuration.client_side_validation and vat_no is not None and len(vat_no) > 50): raise ValueError("Invalid value for `vat_no`, length must be less than or equal to `50`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and vat_no is not None and len(vat_no) < 0): raise ValueError("Invalid value for `vat_no`, length must be greater than or equal to `0`") # noqa: E501 self._vat_no = vat_no
[ "def vat_details(self, vat_details):\n\n self._vat_details = vat_details", "def setKnotInV(*args, **kwargs):\n \n pass", "def ucapv(self, ucapv):\n\n self._ucapv = ucapv", "def setNonce(self, nonce):\n self[Header.PARAM_NONCE] = nonce", "def vif_uuid(self, vif_uuid):\n\n self._vif_uuid = vif_uuid", "def set_VCard(self, value):\n super(SendMessageInputSet, self)._set_input('VCard', value)", "def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id", "def setNonce(self, nonce: int):\n self.blockHeader.nonce = nonce", "def setNum(self, num: 'int const') -> \"void\":\n return _coin.SoMField_setNum(self, num)", "def sent_at(self, sent_at):\n if self.local_vars_configuration.client_side_validation and sent_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `sent_at`, must not be `None`\") # noqa: E501\n\n self._sent_at = sent_at", "def set_line_ver(self, line, ver):\n self._set_line_ver(line, ver)", "def verdi(self, verdi):\n\n self._verdi = verdi", "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def channel_order_no(self, channel_order_no):\n if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501\n raise ValueError(\"Invalid value for `channel_order_no`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) > 60):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be less than or equal to `60`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) < 0):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_order_no = channel_order_no", "def set_version(self, ver):\n self._ver = ver", "def setKnotsInV(*args, **kwargs):\n \n pass", "def set_n(self, value):\n self._n = value", "def set_line_num(self, line, num):\n self._set_line_num(line, num)", "def set_number(self, number:int):\n self.number = number #set number, let this card to be..)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }